2023-08-14 18:18:26 +00:00
|
|
|
-- Test edit_campsite
|
|
|
|
set client_min_messages to warning;
|
|
|
|
create extension if not exists pgtap;
|
|
|
|
reset client_min_messages;
|
|
|
|
|
|
|
|
begin;
|
|
|
|
|
|
|
|
set search_path to camper, public;
|
|
|
|
|
|
|
|
select plan(12);
|
|
|
|
|
2024-01-26 21:27:54 +00:00
|
|
|
select has_function('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean']);
|
|
|
|
select function_lang_is('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean'], 'sql');
|
|
|
|
select function_returns('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean'], 'integer');
|
|
|
|
select isnt_definer('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean']);
|
|
|
|
select volatility_is('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean'], 'volatile');
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean'], 'guest', array[]::text[]);
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean'], 'employee', array[]::text[]);
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean'], 'admin', array['EXECUTE']);
|
|
|
|
select function_privs_are('camper', 'edit_campsite', array ['integer', 'integer', 'text', 'text', 'text', 'boolean'], 'authenticator', array[]::text[]);
|
2023-08-14 18:18:26 +00:00
|
|
|
|
|
|
|
set client_min_messages to warning;
|
|
|
|
truncate campsite cascade;
|
|
|
|
truncate campsite_type cascade;
|
Add cover media to campsite types
This is the image that is shown at the home page, and maybe other pages
in the future. We can not use a static file because this image can be
changed by the customer, not us; just like name and description.
I decided to keep the actual media content in the database, but to copy
this file out to the file system the first time it is accessed. This is
because we are going to replicate the database to a public instance that
must show exactly the same image, but the customer will update the image
from the private instance, behind a firewall. We could also synchronize
the folder where they upload the images, the same way we will replicate,
but i thought that i would make the whole thing a little more brittle:
this way if it can replicate the update of the media, it is impossible
to not have its contents; dumping it to a file is to improve subsequent
requests to the same media.
I use the hex representation of the media’s hash as the URL to the
resource, because PostgreSQL’s base64 is not URL save (i.e., it uses
RFC2045’s charset that includes the forward slash[0]), and i did not
feel necessary write a new function just to slightly reduce the URLs’
length.
Before checking if the file exists, i make sure that the given hash is
an hex string, like i do for UUID, otherwise any other check is going
to fail for sure. I moved out hex.Valid function from UUID to check for
valid hex values, but the actual hash check is inside app/media because
i doubt it will be used outside that module.
[0]: https://datatracker.ietf.org/doc/html/rfc2045#section-6.8
2023-09-10 01:04:18 +00:00
|
|
|
truncate media cascade;
|
Manage all media uploads in a single place
It made no sense to have a file upload in each form that needs a media,
because to reuse an existing media users would need to upload the exact
same file again; this is very unusual and unfriendly.
A better option is to have a “centralized” media section, where people
can upload files there, and then have a picker to select from there.
Ideally, there would be an upload option in the picker, but i did not
add it yet.
I’ve split the content from the media because i want users to have the
option to update a media, for instance when they need to upload a
reduced or cropped version of the same photo, without an edit they would
need to upload the file as a new media and then update all places where
the old version was used. And i did not want to trouble people that
uploads the same photo twice: without the separate relation, doing so
would throw a constraint error.
I do not believe there is any security problem to have all companies
link their media to the same file, as they were already readable by
everyone and could upload the data from a different company to their
own; in other words, it is not worse than it was now.
2023-09-20 23:56:44 +00:00
|
|
|
truncate media_content cascade;
|
2023-08-14 18:18:26 +00:00
|
|
|
truncate company cascade;
|
|
|
|
reset client_min_messages;
|
|
|
|
|
|
|
|
|
2024-01-14 01:09:17 +00:00
|
|
|
insert into company (company_id, business_name, vatin, trade_name, phone, email, web, address, city, province, postal_code, rtc_number, tourist_tax, country_code, currency_code, default_lang_tag)
|
|
|
|
values (1, 'Company 2', 'XX123', '', '555-555-555', 'a@a', '', '', '', '', '', '', 60, 'ES', 'EUR', 'ca')
|
2023-08-14 18:18:26 +00:00
|
|
|
;
|
|
|
|
|
Manage all media uploads in a single place
It made no sense to have a file upload in each form that needs a media,
because to reuse an existing media users would need to upload the exact
same file again; this is very unusual and unfriendly.
A better option is to have a “centralized” media section, where people
can upload files there, and then have a picker to select from there.
Ideally, there would be an upload option in the picker, but i did not
add it yet.
I’ve split the content from the media because i want users to have the
option to update a media, for instance when they need to upload a
reduced or cropped version of the same photo, without an edit they would
need to upload the file as a new media and then update all places where
the old version was used. And i did not want to trouble people that
uploads the same photo twice: without the separate relation, doing so
would throw a constraint error.
I do not believe there is any security problem to have all companies
link their media to the same file, as they were already readable by
everyone and could upload the data from a different company to their
own; in other words, it is not worse than it was now.
2023-09-20 23:56:44 +00:00
|
|
|
insert into media_content (media_type, bytes)
|
|
|
|
values ('image/x-xpixmap', 'static char *s[]={"1 1 1 1","a c #ffffff","a"};')
|
|
|
|
;
|
|
|
|
|
|
|
|
insert into media (media_id, company_id, original_filename, content_hash)
|
|
|
|
values (3, 1, 'cover2.xpm', sha256('static char *s[]={"1 1 1 1","a c #ffffff","a"};'))
|
Add cover media to campsite types
This is the image that is shown at the home page, and maybe other pages
in the future. We can not use a static file because this image can be
changed by the customer, not us; just like name and description.
I decided to keep the actual media content in the database, but to copy
this file out to the file system the first time it is accessed. This is
because we are going to replicate the database to a public instance that
must show exactly the same image, but the customer will update the image
from the private instance, behind a firewall. We could also synchronize
the folder where they upload the images, the same way we will replicate,
but i thought that i would make the whole thing a little more brittle:
this way if it can replicate the update of the media, it is impossible
to not have its contents; dumping it to a file is to improve subsequent
requests to the same media.
I use the hex representation of the media’s hash as the URL to the
resource, because PostgreSQL’s base64 is not URL save (i.e., it uses
RFC2045’s charset that includes the forward slash[0]), and i did not
feel necessary write a new function just to slightly reduce the URLs’
length.
Before checking if the file exists, i make sure that the given hash is
an hex string, like i do for UUID, otherwise any other check is going
to fail for sure. I moved out hex.Valid function from UUID to check for
valid hex values, but the actual hash check is inside app/media because
i doubt it will be used outside that module.
[0]: https://datatracker.ietf.org/doc/html/rfc2045#section-6.8
2023-09-10 01:04:18 +00:00
|
|
|
;
|
|
|
|
|
2023-09-29 18:17:39 +00:00
|
|
|
insert into campsite_type (campsite_type_id, company_id, media_id, name, dogs_allowed, max_campers)
|
|
|
|
values (11, 1, 3, 'Type A', false, 5)
|
|
|
|
, (12, 1, 3, 'Type B', false, 5)
|
|
|
|
, (13, 1, 3, 'Type C', false, 5)
|
2023-08-14 18:18:26 +00:00
|
|
|
;
|
|
|
|
|
2024-01-26 21:27:54 +00:00
|
|
|
insert into campsite (campsite_id, company_id, campsite_type_id, label, info1, info2, active)
|
|
|
|
values (21, 1, 11, 'A1', '<p>A1.1</p>', '<p>A1.2</p>', true)
|
|
|
|
, (22, 1, 12, 'B1', '<p>B1.1</p>', '<p>B1.2</p>', false)
|
2023-08-14 18:18:26 +00:00
|
|
|
;
|
|
|
|
|
|
|
|
select lives_ok(
|
2024-01-26 21:27:54 +00:00
|
|
|
$$ select edit_campsite(21, 13, 'C1', '<p>C1.1</p>', '<p>C1.2</p>', false) $$,
|
2023-08-14 18:18:26 +00:00
|
|
|
'Should be able to edit the first campsite.'
|
|
|
|
);
|
|
|
|
|
|
|
|
select lives_ok(
|
2024-01-26 21:27:54 +00:00
|
|
|
$$ select edit_campsite(22, 12, 'B2', '<p>B2.1</p>', '<p>B2.2</p>', true) $$,
|
2023-08-14 18:18:26 +00:00
|
|
|
'Should be able to edit the second campsite.'
|
|
|
|
);
|
|
|
|
|
|
|
|
select bag_eq(
|
2024-01-26 21:27:54 +00:00
|
|
|
$$ select campsite_id, campsite_type_id, label, info1::text, info2::text, active from campsite $$,
|
|
|
|
$$ values (21, 13, 'C1', '<p>C1.1</p>', '<p>C1.2</p>', false)
|
|
|
|
, (22, 12, 'B2', '<p>B2.1</p>', '<p>B2.2</p>', true)
|
2023-08-14 18:18:26 +00:00
|
|
|
$$,
|
|
|
|
'Should have updated all campsites.'
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
|
|
select *
|
|
|
|
from finish();
|
|
|
|
|
|
|
|
rollback;
|