Merge pull request #25 from CareerPlug/CP-11289

CP-11289 - Add CloudFront signed URLs for secured document storage
pull/544/head
Bernardo Anderson 2 months ago committed by GitHub
commit d5e21021d6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,199 @@
# =============================================================================
# DOCUSEAL S3 CONFIGURATION TEMPLATE
# =============================================================================
# Copy this file to .env and customize the values for your environment.
# Remove the .template extension after copying.
#
# SECURITY NOTE: Never commit actual credentials to version control!
# Use environment-specific .env files and add them to .gitignore.
# =============================================================================
# =============================================================================
# AWS CREDENTIALS
# =============================================================================
# Required: AWS access key ID for programmatic access
# Get this from AWS IAM console -> Users -> Security credentials
AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE
# Required: AWS secret access key for programmatic access
# Keep this confidential and never share or commit to version control
AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
# Optional: AWS session token for temporary credentials
# Only required when using temporary credentials (e.g., with AWS STS)
# AWS_SESSION_TOKEN=AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE=
# =============================================================================
# AWS S3 CONFIGURATION
# =============================================================================
# Required: AWS region where your S3 bucket is located
# Examples: us-east-1, us-west-2, eu-west-1, ap-southeast-1
AWS_REGION=us-east-1
# Required: S3 bucket name for storing attachments
# Must be globally unique and follow S3 bucket naming rules
# Recommended format: your-company-docuseal-attachments-env
S3_ATTACHMENTS_BUCKET=your-company-docuseal-attachments-production
# =============================================================================
# S3 ACCESS CONTROL
# =============================================================================
# Optional: Whether files should be publicly accessible via direct URLs
# Set to 'true' for public access, 'false' for private access
# Private files require presigned URLs for access (more secure)
# Default: false (recommended for production)
ACTIVE_STORAGE_PUBLIC=false
# Optional: Expiration time for presigned URLs (in minutes)
# Only used when ACTIVE_STORAGE_PUBLIC=false
# Default: 240 minutes (4 hours)
PRESIGNED_URLS_EXPIRE_MINUTES=240
# =============================================================================
# S3 SECURITY OPTIONS
# =============================================================================
# Optional: Server-side encryption for uploaded files
# Options:
# - AES256 (S3-managed encryption)
# - aws:kms (KMS-managed encryption with AWS KMS key)
# - aws:kms:dsse (KMS-managed encryption with double server-side encryption)
# Uncomment the desired option below
# S3_SERVER_SIDE_ENCRYPTION=AES256
# S3_SERVER_SIDE_ENCRYPTION=aws:kms
# Optional: AWS KMS Key ID for KMS-managed encryption
# Only required when using aws:kms encryption with a specific KMS key
# S3_KMS_KEY_ID=arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
# Optional: Force path-style URLs instead of virtual-hosted-style URLs
# Set to 'true' for S3-compatible services (MinIO, DigitalOcean Spaces, etc.)
# or if you encounter DNS resolution issues
# Default: false
# S3_FORCE_PATH_STYLE=false
# =============================================================================
# S3 ENDPOINT CONFIGURATION (FOR S3-COMPATIBLE SERVICES)
# =============================================================================
# Optional: Custom S3 endpoint URL
# Only required for S3-compatible services (MinIO, DigitalOcean Spaces, etc.)
# S3_ENDPOINT=https://nyc3.digitaloceanspaces.com
# =============================================================================
# ADVANCED S3 OPTIONS
# =============================================================================
# Optional: S3 storage class for uploaded files
# Options: STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
# GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
# Default: STANDARD
# S3_STORAGE_CLASS=STANDARD
# Optional: Cache control header for uploaded files
# Affects browser caching behavior for publicly accessible files
# Default: 'public, max-age=31536000' (1 year)
# S3_CACHE_CONTROL=public, max-age=31536000
# Optional: Content disposition for uploaded files
# Controls how browsers handle file downloads
# S3_CONTENT_DISPOSITION=attachment
# =============================================================================
# AWS IAM ROLE CONFIGURATION (ALTERNATIVE TO ACCESS KEYS)
# =============================================================================
# Optional: Use IAM role instead of access keys (recommended for EC2/ECS)
# When using IAM roles, you don't need to set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# The role must have appropriate S3 permissions
# Optional: AWS profile name for credential configuration
# Uses credentials from ~/.aws/credentials file
# AWS_PROFILE=default
# Optional: AWS credentials file path
# Default: ~/.aws/credentials
# AWS_SHARED_CREDENTIALS_FILE=/path/to/credentials
# Optional: AWS config file path
# Default: ~/.aws/config
# AWS_CONFIG_FILE=/path/to/config
# =============================================================================
# MONITORING AND DEBUGGING
# =============================================================================
# Optional: Enable AWS SDK logging
# Set to 'true' for debug output, 'false' to disable
# Default: false
# AWS_SDK_LOGGING=false
# Optional: AWS SDK log level
# Options: DEBUG, INFO, WARN, ERROR, FATAL
# Default: INFO
# AWS_SDK_LOG_LEVEL=INFO
# =============================================================================
# CLOUDFRONT SIGNED URLs (SECURED STORAGE)
# =============================================================================
# Optional: CloudFront distribution URL for secure document access
# Required for secured storage with signed URLs (production deployments)
# Format: https://your-cloudfront-domain.cloudfront.net
# SECURITY NOTE: Set via cpdocuseal deployment config, not committed to repo
# CF_URL=https://example.cloudfront.net
# Optional: CloudFront key pair ID for signing URLs
# Required when using CloudFront signed URLs for document access
# SECURITY NOTE: Set via cpdocuseal deployment config, not committed to repo
# CF_KEY_PAIR_ID=K1234567890ABC
# Optional: CloudFront private key secret path in AWS Secrets Manager
# The initializer will load the private key from this secret location
# Format: environment/cloudfront/private_key
# SECURITY NOTE: Set via cpdocuseal deployment config, not committed to repo
# CF_KEY_SECRET=production/cloudfront/private_key
# Optional: Secured storage bucket name (shared with ATS for compliance)
# Required when using secured CloudFront storage
# SECURITY NOTE: Set via cpdocuseal deployment config, not committed to repo
# SECURED_STORAGE_BUCKET=your-company-compliance-documents
# Optional: Secured storage region
# Default: us-east-1
# SECURED_STORAGE_REGION=us-east-1
# Optional: Disable secured storage in development
# Set to 'true' to use local disk storage instead of secured S3/CloudFront
# Only applies in development environment
# DOCUSEAL_DISABLE_SECURED_STORAGE=true
# =============================================================================
# EXAMPLE CONFIGURATIONS
# =============================================================================
#
# DEVELOPMENT (Local Disk Storage):
# Comment out all S3 variables above
# The application will use local disk storage automatically
#
# STAGING (Basic S3):
# AWS_ACCESS_KEY_ID=your_staging_access_key
# AWS_SECRET_ACCESS_KEY=your_staging_secret_key
# AWS_REGION=us-east-1
# S3_ATTACHMENTS_BUCKET=your-company-docuseal-staging
# ACTIVE_STORAGE_PUBLIC=true
#
# PRODUCTION (Secure S3):
# AWS_ACCESS_KEY_ID=your_production_access_key
# AWS_SECRET_ACCESS_KEY=your_production_secret_key
# AWS_REGION=us-east-1
# S3_ATTACHMENTS_BUCKET=your-company-docuseal-production
# ACTIVE_STORAGE_PUBLIC=false
# PRESIGNED_URLS_EXPIRE_MINUTES=60
# S3_SERVER_SIDE_ENCRYPTION=AES256
# S3_STORAGE_CLASS=STANDARD_IA
#
# MINIO (Self-hosted S3-compatible):
# AWS_ACCESS_KEY_ID=minioadmin
# AWS_SECRET_ACCESS_KEY=minioadmin
# AWS_REGION=us-east-1
# S3_ENDPOINT=http://localhost:9000
# S3_FORCE_PATH_STYLE=true
# S3_ATTACHMENTS_BUCKET=docuseal-minio
# ACTIVE_STORAGE_PUBLIC=true
#
# =============================================================================

@ -5,6 +5,7 @@ source 'https://rubygems.org'
ruby '3.4.2'
gem 'arabic-letter-connector', require: 'arabic-letter-connector/logic'
gem 'aws-sdk-cloudfront', require: false
gem 'aws-sdk-s3', require: false
gem 'aws-sdk-secretsmanager', require: false
gem 'azure-storage-blob', require: false

@ -83,6 +83,9 @@ GEM
ast (2.4.2)
aws-eventstream (1.3.0)
aws-partitions (1.1027.0)
aws-sdk-cloudfront (1.108.0)
aws-sdk-core (~> 3, >= 3.210.0)
aws-sigv4 (~> 1.5)
aws-sdk-core (3.214.0)
aws-eventstream (~> 1, >= 1.3.0)
aws-partitions (~> 1, >= 1.992.0)
@ -597,6 +600,7 @@ DEPENDENCIES
airbrake
annotaterb
arabic-letter-connector
aws-sdk-cloudfront
aws-sdk-s3
aws-sdk-secretsmanager
azure-storage-blob

@ -57,11 +57,16 @@ class SubmissionsDownloadController < ApplicationController
key: AccountConfig::DOCUMENT_FILENAME_FORMAT_KEY)&.value
Submitters.select_attachments_for_download(submitter).map do |attachment|
ActiveStorage::Blob.proxy_url(
attachment.blob,
expires_at: FILES_TTL.from_now.to_i,
filename: Submitters.build_document_filename(submitter, attachment.blob, filename_format)
)
# Use signed URLs for secured storage
if uses_secured_storage?(attachment)
DocumentSecurityService.signed_url_for(attachment, expires_in: FILES_TTL)
else
ActiveStorage::Blob.proxy_url(
attachment.blob,
expires_at: FILES_TTL.from_now.to_i,
filename: Submitters.build_document_filename(submitter, attachment.blob, filename_format)
)
end
end
end
@ -75,10 +80,19 @@ class SubmissionsDownloadController < ApplicationController
filename_format = AccountConfig.find_or_initialize_by(account_id: submitter.account_id,
key: AccountConfig::DOCUMENT_FILENAME_FORMAT_KEY)&.value
ActiveStorage::Blob.proxy_url(
attachment.blob,
expires_at: FILES_TTL.from_now.to_i,
filename: Submitters.build_document_filename(submitter, attachment.blob, filename_format)
)
# Use signed URLs for secured storage
if uses_secured_storage?(attachment)
DocumentSecurityService.signed_url_for(attachment, expires_in: FILES_TTL)
else
ActiveStorage::Blob.proxy_url(
attachment.blob,
expires_at: FILES_TTL.from_now.to_i,
filename: Submitters.build_document_filename(submitter, attachment.blob, filename_format)
)
end
end
def uses_secured_storage?(attachment)
attachment.blob.service_name == 'aws_s3_secured'
end
end

@ -32,9 +32,20 @@ class SubmitFormDownloadController < ApplicationController
end
urls = attachments.map do |attachment|
ActiveStorage::Blob.proxy_url(attachment.blob, expires_at: FILES_TTL.from_now.to_i)
# Use signed URLs for secured storage
if uses_secured_storage?(attachment)
DocumentSecurityService.signed_url_for(attachment, expires_in: FILES_TTL)
else
ActiveStorage::Blob.proxy_url(attachment.blob, expires_at: FILES_TTL.from_now.to_i)
end
end
render json: urls
end
private
def uses_secured_storage?(attachment)
attachment.blob.service_name == 'aws_s3_secured'
end
end

@ -58,10 +58,21 @@ class ProcessSubmitterCompletionJob
submitter.documents.filter_map do |attachment|
next if attachment.metadata['sha256'].blank?
CompletedDocument.find_or_create_by!(sha256: attachment.metadata['sha256'], submitter_id: submitter.id)
# Determine storage location based on service used
storage_location = determine_storage_location_for_attachment(attachment)
CompletedDocument.find_or_create_by!(sha256: attachment.metadata['sha256'], submitter_id: submitter.id) do |doc|
doc.storage_location = storage_location
end
end
end
def determine_storage_location_for_attachment(attachment)
# Check if attachment is stored in secured storage
service_name = attachment.blob.service_name
service_name == 'aws_s3_secured' ? 'secured' : 'legacy'
end
def enqueue_completed_webhooks(submitter, is_all_completed: false)
WebhookUrls.for_account_id(submitter.account_id, %w[form.completed submission.completed]).each do |webhook|
if webhook.events.include?('form.completed')

@ -4,19 +4,46 @@
#
# Table name: completed_documents
#
# id :bigint not null, primary key
# sha256 :string not null
# created_at :datetime not null
# updated_at :datetime not null
# submitter_id :bigint not null
# id :bigint not null, primary key
# sha256 :string not null
# storage_location :string default("secured")
# created_at :datetime not null
# updated_at :datetime not null
# submitter_id :bigint not null
#
# Indexes
#
# index_completed_documents_on_sha256 (sha256)
# index_completed_documents_on_submitter_id (submitter_id)
# index_completed_documents_on_sha256 (sha256)
# index_completed_documents_on_storage_location (storage_location)
# index_completed_documents_on_submitter_id (submitter_id)
#
class CompletedDocument < ApplicationRecord
belongs_to :submitter, optional: true
has_one :completed_submitter, primary_key: :submitter_id, inverse_of: :completed_documents, dependent: :destroy
enum :storage_location, {
legacy: 'legacy', # Fallback for development/testing
secured: 'secured' # Default secured storage (shared with ATS)
}, suffix: true
# Check if document uses secured storage (default for new documents)
def uses_secured_storage?
storage_location == 'secured'
end
# Get appropriate Active Storage service name
def storage_service_name
uses_secured_storage? ? 'aws_s3_secured' : Rails.application.config.active_storage.service
end
# Generate signed URL for secured documents (same system as ATS)
# @param attachment [ActiveStorage::Attachment] The attachment to generate URL for
# @param expires_in [ActiveSupport::Duration] How long the URL should be valid
# @return [String] Signed CloudFront URL or regular URL for legacy storage
def signed_url_for(attachment, expires_in: 1.hour)
return attachment.url unless uses_secured_storage?
DocumentSecurityService.signed_url_for(attachment, expires_in: expires_in)
end
end

@ -13,7 +13,7 @@
# Indexes
#
# index_document_generation_events_on_submitter_id (submitter_id)
# index_document_generation_events_on_submitter_id_and_event_name (submitter_id,event_name) UNIQUE WHERE ((event_name)::text = ANY ((ARRAY['start'::character varying, 'complete'::character varying])::text[]))
# index_document_generation_events_on_submitter_id_and_event_name (submitter_id,event_name) UNIQUE WHERE ((event_name)::text = ANY (ARRAY[('start'::character varying)::text, ('complete'::character varying)::text]))
#
# Foreign Keys
#

@ -20,7 +20,7 @@
#
# index_email_events_on_account_id_and_event_datetime (account_id,event_datetime)
# index_email_events_on_email (email)
# index_email_events_on_email_event_types (email) WHERE ((event_type)::text = ANY ((ARRAY['bounce'::character varying, 'soft_bounce'::character varying, 'complaint'::character varying, 'soft_complaint'::character varying])::text[]))
# index_email_events_on_email_event_types (email) WHERE ((event_type)::text = ANY (ARRAY[('bounce'::character varying)::text, ('soft_bounce'::character varying)::text, ('complaint'::character varying)::text, ('soft_complaint'::character varying)::text]))
# index_email_events_on_emailable (emailable_type,emailable_id)
# index_email_events_on_message_id (message_id)
#

@ -0,0 +1,61 @@
# frozen_string_literal: true
# Service for handling secure document access with CloudFront signed URLs
# Reuses same infrastructure and key pairs as ATS
class DocumentSecurityService
class << self
# Generate signed URL for a secured attachment
# @param attachment [ActiveStorage::Attachment] The attachment to generate URL for
# @param expires_in [ActiveSupport::Duration] How long the URL should be valid
# @return [String] Signed CloudFront URL
def signed_url_for(attachment, expires_in: 1.hour)
return attachment.url unless cloudfront_configured?
# Get the CloudFront URL for this attachment
cloudfront_url = build_cloudfront_url(attachment)
# Generate signed URL using same system as ATS
signer = cloudfront_signer
signer.signed_url(cloudfront_url, expires: expires_in.from_now.to_i)
rescue StandardError => e
Rails.logger.error("Failed to generate signed URL: #{e.message}")
# Fallback to direct URL if signing fails
attachment.url
end
private
def cloudfront_configured?
cloudfront_base_url.present? &&
cloudfront_key_pair_id.present? &&
cloudfront_private_key.present?
end
def cloudfront_signer
@cloudfront_signer ||= Aws::CloudFront::UrlSigner.new(
key_pair_id: cloudfront_key_pair_id,
private_key: cloudfront_private_key
)
end
def build_cloudfront_url(attachment)
# Convert S3 URL to CloudFront URL with DocuSeal prefix
s3_key = attachment.blob.key
# Ensure DocuSeal prefix for document organization
prefixed_key = s3_key.start_with?('docuseal/') ? s3_key : "docuseal/#{s3_key}"
"#{cloudfront_base_url}/#{prefixed_key}"
end
def cloudfront_base_url
@cloudfront_base_url ||= ENV.fetch('CF_URL', nil)
end
def cloudfront_key_pair_id
@cloudfront_key_pair_id ||= ENV.fetch('CF_KEY_PAIR_ID', nil)
end
def cloudfront_private_key
@cloudfront_private_key ||= ENV.fetch('SECURE_ATTACHMENT_PRIVATE_KEY', nil)
end
end
end

@ -0,0 +1,18 @@
# frozen_string_literal: true
require 'aws-sdk-secretsmanager'
# Load CloudFront private key from AWS Secrets Manager (same as ATS)
# Configuration loaded from environment variables (set in cpdocuseal deployment)
key_secret = ENV.fetch('CF_KEY_SECRET', nil)
if key_secret.present?
begin
client = Aws::SecretsManager::Client.new
response = client.get_secret_value(secret_id: key_secret)
ENV['SECURE_ATTACHMENT_PRIVATE_KEY'] = response.secret_string
Rails.logger.info('Successfully loaded CloudFront private key from Secrets Manager')
rescue StandardError => e
Rails.logger.error("Failed to load CloudFront private key: #{e.message}")
end
end

@ -13,6 +13,16 @@ aws_s3:
upload:
cache_control: 'public, max-age=31536000'
# Secured storage service for completed documents (reuses ATS infrastructure)
# Uses IAM role for authentication in staging/production (no keys needed)
# Set SECURED_STORAGE_BUCKET and SECURED_STORAGE_REGION in Secrets Manager
aws_s3_secured:
service: S3
bucket: <%= ENV['SECURED_STORAGE_BUCKET'] %>
region: <%= ENV['SECURED_STORAGE_REGION'] || 'us-east-1' %>
public: false
force_path_style: true
test:
service: Disk
root: <%= Rails.root.join("tmp/storage") %>

@ -0,0 +1,6 @@
class AddStorageLocationToCompletedDocuments < ActiveRecord::Migration[8.0]
def change
add_column :completed_documents, :storage_location, :string, default: 'secured'
add_index :completed_documents, :storage_location
end
end

@ -624,8 +624,23 @@ module Submissions
end
end
# Determine storage service for secured storage
service_name = determine_storage_service
# Create blob with appropriate service
blob = if service_name == 'aws_s3_secured'
# For secured storage, create blob with custom key including docuseal prefix
create_secured_blob(io.tap(&:rewind), "#{name}.pdf", service_name)
else
# For regular storage, use standard creation
ActiveStorage::Blob.create_and_upload!(
io: io.tap(&:rewind),
filename: "#{name}.pdf"
)
end
ActiveStorage::Attachment.new(
blob: ActiveStorage::Blob.create_and_upload!(io: io.tap(&:rewind), filename: "#{name}.pdf"),
blob: blob,
metadata: { original_uuid: uuid,
analyzed: true,
sha256: Base64.urlsafe_encode64(Digest::SHA256.digest(io.string)) },
@ -830,6 +845,43 @@ module Submissions
end
end
def determine_storage_service
# Use secured storage by default unless explicitly disabled
if Rails.env.development? && ENV['DOCUSEAL_DISABLE_SECURED_STORAGE'].present?
return Rails.application.config.active_storage.service
end
# Use secured storage if CloudFront configuration is present (loaded from ENV)
if ENV['CF_URL'].present? && ENV['CF_KEY_PAIR_ID'].present?
'aws_s3_secured'
else
Rails.application.config.active_storage.service
end
end
def create_secured_blob(io, filename, service_name)
# Generate a unique key with docuseal prefix for document organization in shared bucket
key = "docuseal/#{SecureRandom.uuid}/#{filename}"
# Create the blob with the custom key
blob = ActiveStorage::Blob.create_before_direct_upload!(
filename: filename,
byte_size: io.size,
checksum: Digest::MD5.base64digest(io.read),
content_type: 'application/pdf',
service_name: service_name
)
# Override the generated key with our custom prefixed key
blob.update_column(:key, key)
# Upload the file to S3 with the custom key
io.rewind
blob.upload(io)
blob
end
def h
Rails.application.routes.url_helpers
end

Loading…
Cancel
Save