2023-08-14 18:18:26 +00:00
|
|
|
-- Test edit_campsite
|
|
|
|
set client_min_messages to warning;
|
|
|
|
create extension if not exists pgtap;
|
|
|
|
reset client_min_messages;
|
|
|
|
|
|
|
|
begin;
|
|
|
|
|
|
|
|
set search_path to camper, public;
|
|
|
|
|
|
|
|
select plan(12);
|
|
|
|
|
|
|
|
select has_function('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean']);
|
|
|
|
select function_lang_is('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean'], 'sql');
|
|
|
|
select function_returns('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean'], 'uuid');
|
|
|
|
select isnt_definer('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean']);
|
|
|
|
select volatility_is('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean'], 'volatile');
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean'], 'guest', array[]::text[]);
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean'], 'employee', array[]::text[]);
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean'], 'admin', array['EXECUTE']);
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['uuid', 'integer', 'text', 'boolean'], 'authenticator', array[]::text[]);
|
|
|
|
|
|
|
|
set client_min_messages to warning;
|
|
|
|
truncate campsite cascade;
|
|
|
|
truncate campsite_type cascade;
|
Add cover media to campsite types
This is the image that is shown at the home page, and maybe other pages
in the future. We can not use a static file because this image can be
changed by the customer, not us; just like name and description.
I decided to keep the actual media content in the database, but to copy
this file out to the file system the first time it is accessed. This is
because we are going to replicate the database to a public instance that
must show exactly the same image, but the customer will update the image
from the private instance, behind a firewall. We could also synchronize
the folder where they upload the images, the same way we will replicate,
but i thought that i would make the whole thing a little more brittle:
this way if it can replicate the update of the media, it is impossible
to not have its contents; dumping it to a file is to improve subsequent
requests to the same media.
I use the hex representation of the media’s hash as the URL to the
resource, because PostgreSQL’s base64 is not URL save (i.e., it uses
RFC2045’s charset that includes the forward slash[0]), and i did not
feel necessary write a new function just to slightly reduce the URLs’
length.
Before checking if the file exists, i make sure that the given hash is
an hex string, like i do for UUID, otherwise any other check is going
to fail for sure. I moved out hex.Valid function from UUID to check for
valid hex values, but the actual hash check is inside app/media because
i doubt it will be used outside that module.
[0]: https://datatracker.ietf.org/doc/html/rfc2045#section-6.8
2023-09-10 01:04:18 +00:00
|
|
|
truncate media cascade;
|
Manage all media uploads in a single place
It made no sense to have a file upload in each form that needs a media,
because to reuse an existing media users would need to upload the exact
same file again; this is very unusual and unfriendly.
A better option is to have a “centralized” media section, where people
can upload files there, and then have a picker to select from there.
Ideally, there would be an upload option in the picker, but i did not
add it yet.
I’ve split the content from the media because i want users to have the
option to update a media, for instance when they need to upload a
reduced or cropped version of the same photo, without an edit they would
need to upload the file as a new media and then update all places where
the old version was used. And i did not want to trouble people that
uploads the same photo twice: without the separate relation, doing so
would throw a constraint error.
I do not believe there is any security problem to have all companies
link their media to the same file, as they were already readable by
everyone and could upload the data from a different company to their
own; in other words, it is not worse than it was now.
2023-09-20 23:56:44 +00:00
|
|
|
truncate media_content cascade;
|
2023-08-14 18:18:26 +00:00
|
|
|
truncate company cascade;
|
|
|
|
reset client_min_messages;
|
|
|
|
|
|
|
|
|
|
|
|
insert into company (company_id, business_name, vatin, trade_name, phone, email, web, address, city, province, postal_code, country_code, currency_code, default_lang_tag)
|
|
|
|
values (1, 'Company 2', 'XX123', '', '555-555-555', 'a@a', '', '', '', '', '', 'ES', 'EUR', 'ca')
|
|
|
|
;
|
|
|
|
|
Manage all media uploads in a single place
It made no sense to have a file upload in each form that needs a media,
because to reuse an existing media users would need to upload the exact
same file again; this is very unusual and unfriendly.
A better option is to have a “centralized” media section, where people
can upload files there, and then have a picker to select from there.
Ideally, there would be an upload option in the picker, but i did not
add it yet.
I’ve split the content from the media because i want users to have the
option to update a media, for instance when they need to upload a
reduced or cropped version of the same photo, without an edit they would
need to upload the file as a new media and then update all places where
the old version was used. And i did not want to trouble people that
uploads the same photo twice: without the separate relation, doing so
would throw a constraint error.
I do not believe there is any security problem to have all companies
link their media to the same file, as they were already readable by
everyone and could upload the data from a different company to their
own; in other words, it is not worse than it was now.
2023-09-20 23:56:44 +00:00
|
|
|
insert into media_content (media_type, bytes)
|
|
|
|
values ('image/x-xpixmap', 'static char *s[]={"1 1 1 1","a c #ffffff","a"};')
|
|
|
|
;
|
|
|
|
|
|
|
|
insert into media (media_id, company_id, original_filename, content_hash)
|
|
|
|
values (3, 1, 'cover2.xpm', sha256('static char *s[]={"1 1 1 1","a c #ffffff","a"};'))
|
Add cover media to campsite types
This is the image that is shown at the home page, and maybe other pages
in the future. We can not use a static file because this image can be
changed by the customer, not us; just like name and description.
I decided to keep the actual media content in the database, but to copy
this file out to the file system the first time it is accessed. This is
because we are going to replicate the database to a public instance that
must show exactly the same image, but the customer will update the image
from the private instance, behind a firewall. We could also synchronize
the folder where they upload the images, the same way we will replicate,
but i thought that i would make the whole thing a little more brittle:
this way if it can replicate the update of the media, it is impossible
to not have its contents; dumping it to a file is to improve subsequent
requests to the same media.
I use the hex representation of the media’s hash as the URL to the
resource, because PostgreSQL’s base64 is not URL save (i.e., it uses
RFC2045’s charset that includes the forward slash[0]), and i did not
feel necessary write a new function just to slightly reduce the URLs’
length.
Before checking if the file exists, i make sure that the given hash is
an hex string, like i do for UUID, otherwise any other check is going
to fail for sure. I moved out hex.Valid function from UUID to check for
valid hex values, but the actual hash check is inside app/media because
i doubt it will be used outside that module.
[0]: https://datatracker.ietf.org/doc/html/rfc2045#section-6.8
2023-09-10 01:04:18 +00:00
|
|
|
;
|
|
|
|
|
|
|
|
insert into campsite_type (campsite_type_id, company_id, media_id, name)
|
|
|
|
values (11, 1, 3, 'Type A')
|
|
|
|
, (12, 1, 3, 'Type B')
|
|
|
|
, (13, 1, 3, 'Type C')
|
2023-08-14 18:18:26 +00:00
|
|
|
;
|
|
|
|
|
|
|
|
insert into campsite (company_id, campsite_type_id, slug, label, active)
|
|
|
|
values (1, 11, '87452b88-b48f-48d3-bb6c-0296de64164e', 'A1', true)
|
|
|
|
, (1, 12, '9b6370f7-f941-46f2-bc6e-de455675bd0a', 'B1', false)
|
|
|
|
;
|
|
|
|
|
|
|
|
select lives_ok(
|
|
|
|
$$ select edit_campsite('87452b88-b48f-48d3-bb6c-0296de64164e', 13, 'C1', false) $$,
|
|
|
|
'Should be able to edit the first campsite.'
|
|
|
|
);
|
|
|
|
|
|
|
|
select lives_ok(
|
|
|
|
$$ select edit_campsite('9b6370f7-f941-46f2-bc6e-de455675bd0a', 12, 'B2', true) $$,
|
|
|
|
'Should be able to edit the second campsite.'
|
|
|
|
);
|
|
|
|
|
|
|
|
select bag_eq(
|
|
|
|
$$ select slug::text, campsite_type_id, label, active from campsite $$,
|
|
|
|
$$ values ('87452b88-b48f-48d3-bb6c-0296de64164e', 13, 'C1', false)
|
|
|
|
, ('9b6370f7-f941-46f2-bc6e-de455675bd0a', 12, 'B2', true)
|
|
|
|
$$,
|
|
|
|
'Should have updated all campsites.'
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
|
|
select *
|
|
|
|
from finish();
|
|
|
|
|
|
|
|
rollback;
|