diff --git a/api/src/services/assets.ts b/api/src/services/assets.ts index 8a7d1de6cc2cd..0ab2bb2f33970 100644 --- a/api/src/services/assets.ts +++ b/api/src/services/assets.ts @@ -79,12 +79,26 @@ export class AssetsService { const systemPublicKeys = Object.values(publicSettings || {}); + console.log('ENV:ASSETS_TRANSFORM_IMAGE_MAX_DIMENSION', env['ASSETS_TRANSFORM_IMAGE_MAX_DIMENSION']) + console.log('ENV:ASSETS_TRANSFORM_MAX_CONCURRENT', env['ASSETS_TRANSFORM_MAX_CONCURRENT']) + console.log('ENV:ASSETS_TRANSFORM_TIMEOUT', env['ASSETS_TRANSFORM_TIMEOUT']) + /** * This is a little annoying. Postgres will error out if you're trying to search in `where` * with a wrong type. In case of directus_files where id is a uuid, we'll have to verify the * validity of the uuid ahead of time. */ - if (!isValidUuid(id)) throw new ForbiddenError(); + if (!isValidUuid(id)) { + console.warn(`[ForbiddenError:InvalidUUID] Invalid UUID provided`, { + id, + accountability: this.accountability, + action: 'read', + collection: 'directus_files', + timestamp: new Date().toISOString(), + }); + + throw new ForbiddenError({ reason: 'Invalid UUID provided' }); + } if (systemPublicKeys.includes(id) === false && this.accountability) { await validateAccess( @@ -102,7 +116,17 @@ export class AssetsService { const exists = await storage.location(file.storage).exists(file.filename_disk); - if (!exists) throw new ForbiddenError(); + if (!exists) { + console.warn(`[ForbiddenError:FileMissing] File missing in storage`, { + id, + filename: file.filename_disk, + storage: file.storage, + accountability: this.accountability, + timestamp: new Date().toISOString(), + }); + + throw new ForbiddenError({ reason: 'File does not exist in storage' }); + } if (range) { const missingRangeLimits = range.start === undefined && range.end === undefined; @@ -111,6 +135,14 @@ export class AssetsService { const endUnderflow = range.end !== undefined && range.end <= 0; if (missingRangeLimits || endBeforeStart || startOverflow || endUnderflow) { + console.warn(`[RangeNotSatisfiableError] Invalid range`, { + range, + fileId: file.id, + filename: file.filename_disk, + storage: file.storage, + accountability: this.accountability, + }); + throw new RangeNotSatisfiableError({ range }); } @@ -151,6 +183,13 @@ export class AssetsService { if (type && transforms.length > 0 && SUPPORTED_IMAGE_TRANSFORM_FORMATS.includes(type)) { const maybeNewFormat = TransformationUtils.maybeExtractFormat(transforms); + console.warn(`[AssetTransform] Preparing transformation`, { + fileId: file.id, + originalName: file.filename_disk, + format: maybeNewFormat, + transforms, + }); + const assetFilename = path.basename(file.filename_disk, path.extname(file.filename_disk)) + getAssetSuffix(transforms) + @@ -163,6 +202,12 @@ export class AssetsService { } if (exists) { + console.warn(`[AssetTransform] Found cached transformed asset`, { + fileId: file.id, + storage: file.storage, + filename: assetFilename, + }); + const assetStream = () => storage.location(file.storage).read(assetFilename, { range }); return { @@ -172,6 +217,12 @@ export class AssetsService { }; } + console.warn(`[AssetTransform] No cached version found, generating new transformation`, { + fileId: file.id, + storage: file.storage, + transforms, + }); + // Check image size before transforming. Processing an image that's too large for the // system memory will kill the API. Sharp technically checks for this too in it's // limitInputPixels, but we should have that check applied before starting the read streams @@ -183,6 +234,13 @@ export class AssetsService { width > (env['ASSETS_TRANSFORM_IMAGE_MAX_DIMENSION'] as number) || height > (env['ASSETS_TRANSFORM_IMAGE_MAX_DIMENSION'] as number) ) { + console.warn(`[AssetTransform] Image too large or dimensions unavailable`, { + fileId: file.id, + width, + height, + storage: file.storage, + }); + logger.warn(`Image is too large to be transformed, or image size couldn't be determined.`); throw new IllegalAssetTransformationError({ invalidTransformations: ['width', 'height'] }); } @@ -190,6 +248,12 @@ export class AssetsService { const { queue, process } = sharp.counters(); if (queue + process > (env['ASSETS_TRANSFORM_MAX_CONCURRENT'] as number)) { + console.warn(`[AssetTransform] Transformation queue full, server too busy`, { + active: process, + pending: queue, + fileId: file.id, + }); + throw new ServiceUnavailableError({ service: 'files', reason: 'Server too busy', @@ -210,37 +274,82 @@ export class AssetsService { } } catch (error) { if (error instanceof Error && error.message.startsWith('Expected')) { + console.warn(`[AssetTransform] Error:`, { + fileId: file.id, + error: error.message, + }); + throw new InvalidQueryError({ reason: error.message }); } + console.warn(`[AssetTransform] Error:`, { + fileId: file.id, + error: error instanceof Error && error.message, + }); + throw error; } const readStream = await storage.location(file.storage).read(file.filename_disk, { range, version }); readStream.on('error', (e: Error) => { + console.warn(`[AssetTransform] Error:`, { + fileId: file.id, + filename: file.filename_disk, + error: e.message, + }); + logger.error(e, `Couldn't transform file ${file.id}`); readStream.unpipe(transformer); }); try { await storage.location(file.storage).write(assetFilename, readStream.pipe(transformer), type); + + console.warn(`[AssetTransform] Successfully wrote transformed asset`, { + fileId: file.id, + output: assetFilename, + storage: file.storage, + }); } catch (error) { + console.warn(`[AssetTransform] Error:`, { + fileId: file.id, + output: assetFilename, + storage: file.storage, + error: (error as Error).message, + }); + try { await storage.location(file.storage).delete(assetFilename); } catch { + console.warn(`[AssetTransform] Cleanup failed after write error`, { fileId: file.id }); // Ignored to prevent original error from being overwritten } if ((error as Error)?.message?.includes('timeout')) { + console.warn(`[AssetTransform] Error:`, { fileId: file.id, error: (error as Error).message }); + throw new ServiceUnavailableError({ service: 'assets', reason: `Transformation timed out` }); } else { + console.warn(`[AssetTransform] Error:`, { + fileId: file.id, + output: assetFilename, + storage: file.storage, + error: (error as Error).message, + }); + throw error; } } const assetStream = () => storage.location(file.storage).read(assetFilename, { range, version }); + console.warn(`[AssetTransform] Returning transformed asset stream`, { + fileId: file.id, + filename: assetFilename, + storage: file.storage, + }); + return { stream: deferStream ? assetStream : await assetStream(), stat: await storage.location(file.storage).stat(assetFilename), diff --git a/packages/storage-driver-s3/src/index.ts b/packages/storage-driver-s3/src/index.ts index c1de565b9d717..853a69044d348 100644 --- a/packages/storage-driver-s3/src/index.ts +++ b/packages/storage-driver-s3/src/index.ts @@ -69,6 +69,8 @@ export class DriverS3 implements TusDriver { public maxMultipartParts = 10_000 as const; public minPartSize = 5_242_880 as const; // 5MiB public maxUploadSize = 5_497_558_138_880 as const; // 5TiB + private activeRequests = 0; + private queuedRequests = 0; constructor(config: DriverS3Config) { this.config = config; @@ -158,6 +160,21 @@ export class DriverS3 implements TusDriver { } async stat(filepath: string) { + this.queuedRequests++; + // Log queue state + if (this.queuedRequests > 1 || this.activeRequests > 10) { + console.log('[S3 REQUEST QUEUE]', JSON.stringify({ + queued: this.queuedRequests, + active: this.activeRequests, + total: this.queuedRequests + this.activeRequests, + maxSockets: this.config.maxSockets ?? 500, + timestamp: new Date().toISOString() + })); + } + + this.queuedRequests--; + this.activeRequests++; + const { ContentLength, LastModified } = await this.client.send( new HeadObjectCommand({ Key: this.fullPath(filepath), @@ -165,6 +182,8 @@ export class DriverS3 implements TusDriver { }), ); + this.activeRequests--; + return { size: ContentLength as number, modified: LastModified as Date,