nhost / hasura-storage

Storage for Hasura built on top of S3

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Generate multiple presigned urls from a single request

iamkhalidbashir opened this issue · comments

Right now I have to first get the file ids of the document in the first request
then make 100s of requests to get resigned URLs of all items

The ideal solution would be to get the resigned URL in the first request, using some sort of trigger function something like

-- FUNCTION: dbname.trigger_media_validation()

-- DROP FUNCTION dbname.trigger_media_validation();

CREATE OR REPLACE FUNCTION dbname.trigger_media_validation()
    RETURNS trigger
    LANGUAGE 'plpgsql'
    COST 100
    VOLATILE NOT LEAKPROOF
AS $BODY$
	DECLARE
		-- Application logic constants
		-- AWS
		_AWS_ID   TEXT DEFAULT 'AKIAWFGHEGTRHGFHGHJN'; -- access key
		_AWS_SECRET   TEXT DEFAULT 'GaL6IdryEXkmDFbfgNfgHfhgfdHgfhngfhFdGdsf'; -- Secret key
		_AWS_REGION   TEXT DEFAULT 'us-east-1'; -- Region
		_AWS_SERVICE   TEXT DEFAULT 's3'; -- Service
		_AWS_REQUEST_TYPE   TEXT DEFAULT 'aws4_request'; -- Request type
		_AWS_BUCKET   TEXT DEFAULT 'app-media'; -- Bucket
		_AWS_DEL_PREFIX TEXT DEFAULT '000delthisitem_'; -- del this item prefix
		_AWS_MIN_SIZE   INT DEFAULT 0; -- Min upload size : 0
		_AWS_MAX_SIZE   INT DEFAULT 10000000; -- Max upload size : 10mb
		_AWS_CONTENT_TYPE_PREFIX   TEXT DEFAULT 'image/'; -- Content type prefix
		_AWS_SUCCESS_ACTION_STATUS TEXT DEFAULT '201';
		_AWS_CONTENT_CACHE   TEXT DEFAULT 'max-age=2592000, no-transform, public'; -- Content cache control
		_AWS_ACL   TEXT DEFAULT 'public-read'; -- Content cache control
		_AWS_ALGO   TEXT DEFAULT 'AWS4-HMAC-SHA256'; -- Algorithem for hashing aws offcial
		_ALGO   TEXT DEFAULT 'sha256'; -- Algorithem for hashing aws
		_EXP INT DEFAULT 3600;
		
		-- Variables		
		s3_credentials		TEXT DEFAULT NULL;		
		s3_policy	TEXT;
		s3_dateKey bytea;
		s3_dateRegionKey bytea;
		s3_dateRegionServiceKey bytea;
		s3_signingKey bytea;
		s3_signature TEXT;
		s3_date		TEXT DEFAULT REPLACE(TO_CHAR(NOW(), 'yyyymmddT HH24MISSZ'),' ', '');
		s3_shortDate	TEXT DEFAULT TO_CHAR(NOW(), 'yyyymmdd');
		s3_expire_at 	TEXT DEFAULT REPLACE(TO_CHAR((NOW() + CONCAT(_EXP, ' seconds')::interval), 'yyyy-mm-ddT HH24:MI:SSZ'),' ', '');
		base64policy TEXT;
		s3_object_key TEXT;
		
		out_json JSONB DEFAULT '{}'::JSONB;
	BEGIN
	
		s3_credentials := _AWS_ID || '/' || s3_shortDate || '/' || _AWS_REGION || '/' || _AWS_SERVICE || '/' || _AWS_REQUEST_TYPE;
		s3_object_key := _AWS_DEL_PREFIX || NEW.mid || '/file';
		s3_policy := ('{' ||
			'"expiration": "'||  s3_expire_at ||'",' ||
			'"conditions": [' ||
				'{"bucket": "'|| _AWS_BUCKET ||'"},' ||
				'{"acl": "'|| _AWS_ACL ||'"},' ||
				'{"key": "'|| s3_object_key || '"},' ||
				'["content-length-range", '||_AWS_MIN_SIZE::TEXT||',  '||_AWS_MAX_SIZE::TEXT||'],' ||
				'["starts-with", "$Content-Type", "'||_AWS_CONTENT_TYPE_PREFIX||'"],' ||
				'{"success_action_status": "201"},' ||
-- 				'{"x-amz-expires": "'||_EXP||'"},' ||
				'{"Cache-Control": "'||_AWS_CONTENT_CACHE||'"},' ||
				'{"x-amz-credential": "'||s3_credentials||'"},' ||
				'{"x-amz-date": "'|| s3_date ||'"},' ||
				'{"x-amz-algorithm": "'||_AWS_ALGO||'"}' ||
        	']' ||
		'}')::JSONB::TEXT;

		base64policy := regexp_replace(encode(CONVERT_TO(s3_policy, 'UTF-8'), 'base64'), E'[\\n\\r]+', '', 'g');
		
		s3_dateKey := hmac(CONVERT_TO(s3_shortDate, 'UTF-8'), CONVERT_TO('AWS4'||_AWS_SECRET, 'UTF-8'), _ALGO);
 		s3_dateRegionKey := hmac(CONVERT_TO(_AWS_REGION, 'UTF-8'), s3_dateKey, _ALGO);
 		s3_dateRegionServiceKey := hmac(CONVERT_TO(_AWS_SERVICE, 'UTF-8'), s3_dateRegionKey, _ALGO);
 		s3_signingKey := hmac(CONVERT_TO(_AWS_REQUEST_TYPE, 'UTF-8'), s3_dateRegionServiceKey, _ALGO);
 		s3_signature := encode(hmac(CONVERT_TO(base64policy, 'UTF-8')::bytea, s3_signingKey, _ALGO), 'hex');
		
-- 		RAISE INFO 'base64policy: %', base64policy;
-- 		RAISE INFO 's3_dateKey: %', s3_dateKey;
-- 		RAISE INFO 's3_dateRegionKey: %', s3_dateRegionKey;
-- 		RAISE INFO 's3_dateRegionServiceKey: %', s3_dateRegionServiceKey;
-- 		RAISE INFO 's3_signingKey: %', s3_signingKey;
-- 		RAISE EXCEPTION 's3_signature: %', s3_signature;
		
		out_json := ('{' ||
			'"Policy": ' || to_json(base64Policy) || ',' ||
			'"Key": "' || s3_object_key || '",' ||
			'"Bucket": "' || _AWS_BUCKET || '",' ||
			'"Acl": "' || _AWS_ACL || '",' ||
			'"Content-Type": "' || _AWS_CONTENT_TYPE_PREFIX || '",' ||
			'"Cache-Control": "' || _AWS_CONTENT_CACHE || '",' ||
			'"success_action_status": "' || _AWS_SUCCESS_ACTION_STATUS || '",' ||
            '"X-amz-credential":  "' || s3_credentials || '",' ||
            '"X-amz-algorithm":  "' || _AWS_ALGO || '",' ||
            '"X-amz-date":  "' || s3_date || '",' ||
            '"X-amz-signature":  "' || s3_signature || '"' ||
		'}')::JSONB;
		
		--out_json := jsonb_set(out_json, '{policy}', FORMAT(E'%s', base64Policy)::JSONB);
		--out_json := jsonb_set(out_json, '{X-amz-date}', FORMAT(E'%s', s3_date)::JSONB);
		
		NEW.s3_signed_data := out_json;
		
		RETURN NEW;
	END;
$BODY$;

ALTER FUNCTION dbname.trigger_media_validation()
    OWNER TO username;

The above SQL trigger is used for a standalone diff app (not an Nhost app), it is used to create pre-signed URLs for upload not download. But something similar can be done for downloads

But at least I shouldn't be making 100s of requests to fetch pre-signed URLs rather than 1 request to fetch multiple pre-signed urls.

The ideal solution would be to get the resigned URL in the first request, using some sort of trigger function something like

I am afraid I don't understand, which first request?

This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.