diff --git a/netlify.toml b/netlify.toml index dccd624..332110c 100644 --- a/netlify.toml +++ b/netlify.toml @@ -58,6 +58,17 @@ to = "/.netlify/functions/mcp-context" status = 200 +# Chunked blob upload (init, chunk, complete, status) +[[redirects]] + from = "/api/blob-upload/*" + to = "/.netlify/functions/blob-upload" + status = 200 + +[[redirects]] + from = "/api/blob-upload" + to = "/.netlify/functions/blob-upload" + status = 200 + # Swagger UI [[redirects]] from = "/docs" @@ -68,6 +79,6 @@ for = "/api/*" [headers.values] Access-Control-Allow-Origin = "*" - Access-Control-Allow-Methods = "GET, PUT, OPTIONS" + Access-Control-Allow-Methods = "GET, PUT, POST, OPTIONS" Access-Control-Allow-Headers = "Content-Type, Authorization" Content-Type = "application/json" diff --git a/netlify/functions/blob-upload.js b/netlify/functions/blob-upload.js new file mode 100644 index 0000000..43e4b8b --- /dev/null +++ b/netlify/functions/blob-upload.js @@ -0,0 +1,297 @@ +const { json, error, options, blobUrl, blobHeaders, putBlob } = require("./helpers"); + +const UPLOAD_STORE = "uploads"; +const ONCALL_STORE = "oncall"; + +// Max chunk size: 4MB (safe margin under Netlify's 6MB request limit after base64 overhead) +const MAX_CHUNK_BYTES = 4 * 1024 * 1024; + +function authenticate(event) { + const auth = event.headers["authorization"] || event.headers["Authorization"] || ""; + const token = auth.startsWith("Bearer ") ? auth.slice(7) : ""; + if (!token || token !== process.env.NETLIFY_AUTH_TOKEN) { + return error("Unauthorized — provide Authorization: Bearer ", 401); + } + return null; +} + +/** + * POST /api/blob-upload/init + * Body: { service, date, filename, totalChunks, totalBytes } + * Returns: { uploadId, chunkUrl } + */ +async function handleInit(event) { + let body; + try { + body = JSON.parse(event.body); + } catch { + return error("Request body must be valid JSON", 400); + } + + const { service, date, filename, totalChunks, totalBytes } = body; + + if (!service || !date || !filename || !totalChunks) { + return error("Required fields: service, date, filename, totalChunks", 400); + } + if (!/^\d{4}-\d{2}-\d{2}$/.test(date)) { + return error("Date must be in YYYY-MM-DD format", 400); + } + if (!filename.endsWith(".zip")) { + return error("Only .zip files are supported", 400); + } + if (totalChunks < 1 || totalChunks > 500) { + return error("totalChunks must be between 1 and 500", 400); + } + + const uploadId = `${Date.now()}-${Math.random().toString(36).slice(2, 8)}`; + const manifest = { + uploadId, + service, + date, + filename, + totalChunks, + totalBytes: totalBytes || null, + chunksReceived: [], + status: "in_progress", + createdAt: new Date().toISOString(), + }; + + const manifestKey = encodeURIComponent(`${uploadId}/manifest`); + await fetch(blobUrl(UPLOAD_STORE, manifestKey), { + method: "PUT", + headers: { ...blobHeaders(), "Content-Type": "application/json" }, + body: JSON.stringify(manifest), + }); + + return json({ uploadId, totalChunks, maxChunkBytes: MAX_CHUNK_BYTES }, 201); +} + +/** + * PUT /api/blob-upload/chunk?uploadId=X&index=N + * Body: raw binary chunk (base64-encoded by Netlify when isBase64Encoded) + */ +async function handleChunk(event) { + const uploadId = event.queryStringParameters?.uploadId; + const indexStr = event.queryStringParameters?.index; + + if (!uploadId || indexStr == null) { + return error("Required query params: uploadId, index", 400); + } + + const index = parseInt(indexStr, 10); + if (isNaN(index) || index < 0) { + return error("index must be a non-negative integer", 400); + } + + if (!event.body) { + return error("Request body (chunk data) is required", 400); + } + + // Fetch manifest to validate + const manifestKey = encodeURIComponent(`${uploadId}/manifest`); + const manifestResp = await fetch(blobUrl(UPLOAD_STORE, manifestKey), { + headers: blobHeaders(), + }); + if (!manifestResp.ok) { + return error(`Upload session not found: ${uploadId}`, 404); + } + + const manifest = await manifestResp.json(); + if (manifest.status !== "in_progress") { + return error(`Upload already ${manifest.status}`, 409); + } + if (index >= manifest.totalChunks) { + return error(`Chunk index ${index} exceeds totalChunks ${manifest.totalChunks}`, 400); + } + + // Store the chunk as raw binary + const chunkData = event.isBase64Encoded ? Buffer.from(event.body, "base64") : Buffer.from(event.body); + + if (chunkData.length > MAX_CHUNK_BYTES) { + return error(`Chunk exceeds max size of ${MAX_CHUNK_BYTES} bytes`, 413); + } + + const chunkKey = encodeURIComponent(`${uploadId}/chunk-${String(index).padStart(5, "0")}`); + const chunkResp = await fetch(blobUrl(UPLOAD_STORE, chunkKey), { + method: "PUT", + headers: { ...blobHeaders(), "Content-Type": "application/octet-stream" }, + body: chunkData, + }); + + if (!chunkResp.ok) { + return error(`Failed to store chunk: ${chunkResp.status}`, 500); + } + + // Update manifest with received chunk + if (!manifest.chunksReceived.includes(index)) { + manifest.chunksReceived.push(index); + manifest.chunksReceived.sort((a, b) => a - b); + } + await fetch(blobUrl(UPLOAD_STORE, manifestKey), { + method: "PUT", + headers: { ...blobHeaders(), "Content-Type": "application/json" }, + body: JSON.stringify(manifest), + }); + + return json({ + uploadId, + chunkIndex: index, + chunkSize: chunkData.length, + chunksReceived: manifest.chunksReceived.length, + totalChunks: manifest.totalChunks, + complete: manifest.chunksReceived.length === manifest.totalChunks, + }); +} + +/** + * POST /api/blob-upload/complete?uploadId=X + * Reassembles all chunks into the final blob in the oncall store. + */ +async function handleComplete(event) { + const uploadId = event.queryStringParameters?.uploadId; + if (!uploadId) { + return error("Required query param: uploadId", 400); + } + + // Fetch manifest + const manifestKey = encodeURIComponent(`${uploadId}/manifest`); + const manifestResp = await fetch(blobUrl(UPLOAD_STORE, manifestKey), { + headers: blobHeaders(), + }); + if (!manifestResp.ok) { + return error(`Upload session not found: ${uploadId}`, 404); + } + + const manifest = await manifestResp.json(); + if (manifest.status === "completed") { + return error("Upload already completed", 409); + } + + // Check all chunks are present + if (manifest.chunksReceived.length !== manifest.totalChunks) { + const missing = []; + for (let i = 0; i < manifest.totalChunks; i++) { + if (!manifest.chunksReceived.includes(i)) missing.push(i); + } + return error(`Missing chunks: [${missing.join(", ")}]. Received ${manifest.chunksReceived.length}/${manifest.totalChunks}`, 400); + } + + // Read and concatenate all chunks in order + const chunks = []; + for (let i = 0; i < manifest.totalChunks; i++) { + const chunkKey = encodeURIComponent(`${uploadId}/chunk-${String(i).padStart(5, "0")}`); + const resp = await fetch(blobUrl(UPLOAD_STORE, chunkKey), { + headers: blobHeaders(), + }); + if (!resp.ok) { + return error(`Failed to read chunk ${i}: ${resp.status}`, 500); + } + chunks.push(Buffer.from(await resp.arrayBuffer())); + } + + const assembled = Buffer.concat(chunks); + + // Store final blob in oncall store + const ts = Math.floor(Date.now() / 1000); + const finalKey = `${manifest.service}/${manifest.date}/${ts}-${manifest.filename}`; + const encodedFinalKey = encodeURIComponent(finalKey); + + const storeResp = await fetch(blobUrl(ONCALL_STORE, encodedFinalKey), { + method: "PUT", + headers: { ...blobHeaders(), "Content-Type": "application/zip" }, + body: assembled, + }); + + if (!storeResp.ok) { + return error(`Failed to store assembled blob: ${storeResp.status}`, 500); + } + + // Mark upload as completed + manifest.status = "completed"; + manifest.completedAt = new Date().toISOString(); + manifest.finalKey = finalKey; + manifest.finalBytes = assembled.length; + await fetch(blobUrl(UPLOAD_STORE, manifestKey), { + method: "PUT", + headers: { ...blobHeaders(), "Content-Type": "application/json" }, + body: JSON.stringify(manifest), + }); + + // Clean up chunk blobs (best-effort, don't fail the request) + for (let i = 0; i < manifest.totalChunks; i++) { + const chunkKey = encodeURIComponent(`${uploadId}/chunk-${String(i).padStart(5, "0")}`); + fetch(blobUrl(UPLOAD_STORE, chunkKey), { + method: "DELETE", + headers: blobHeaders(), + }).catch(() => {}); + } + + return json({ + message: "Upload complete", + key: finalKey, + totalBytes: assembled.length, + totalChunks: manifest.totalChunks, + }); +} + +/** + * GET /api/blob-upload/status?uploadId=X + * Returns upload progress. + */ +async function handleStatus(event) { + const uploadId = event.queryStringParameters?.uploadId; + if (!uploadId) { + return error("Required query param: uploadId", 400); + } + + const manifestKey = encodeURIComponent(`${uploadId}/manifest`); + const resp = await fetch(blobUrl(UPLOAD_STORE, manifestKey), { + headers: blobHeaders(), + }); + if (!resp.ok) { + return error(`Upload session not found: ${uploadId}`, 404); + } + + const manifest = await resp.json(); + return json({ + uploadId: manifest.uploadId, + status: manifest.status, + filename: manifest.filename, + totalChunks: manifest.totalChunks, + chunksReceived: manifest.chunksReceived.length, + totalBytes: manifest.totalBytes, + createdAt: manifest.createdAt, + completedAt: manifest.completedAt || null, + finalKey: manifest.finalKey || null, + }); +} + +exports.handler = async (event) => { + if (event.httpMethod === "OPTIONS") return options(); + + const authError = authenticate(event); + if (authError) return authError; + + // Route by path suffix and method + // event.path is the rewritten path; rawUrl has the original request URL + const raw = event.rawUrl || event.path || ""; + const suffix = raw.includes("/blob-upload") ? raw.split("/blob-upload").pop().split("?")[0] : ""; + + try { + if (event.httpMethod === "POST" && (suffix === "/init" || suffix === "")) { + return await handleInit(event); + } + if (event.httpMethod === "PUT" && suffix === "/chunk") { + return await handleChunk(event); + } + if (event.httpMethod === "POST" && suffix === "/complete") { + return await handleComplete(event); + } + if (event.httpMethod === "GET" && suffix === "/status") { + return await handleStatus(event); + } + return error("Not found. Use /init, /chunk, /complete, or /status", 404); + } catch (e) { + return error(`Upload failed: ${e.message}`); + } +}; diff --git a/netlify/functions/helpers.js b/netlify/functions/helpers.js index 4d53d23..c000012 100644 --- a/netlify/functions/helpers.js +++ b/netlify/functions/helpers.js @@ -3,7 +3,7 @@ const STORE_NAME = "order-book"; const CORS_HEADERS = { "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Methods": "GET, PUT, OPTIONS", + "Access-Control-Allow-Methods": "GET, PUT, POST, OPTIONS", "Access-Control-Allow-Headers": "Content-Type, Authorization", }; diff --git a/public/swagger/openapi.yaml b/public/swagger/openapi.yaml index 449a75b..ee7bf2f 100644 --- a/public/swagger/openapi.yaml +++ b/public/swagger/openapi.yaml @@ -219,7 +219,189 @@ paths: schema: $ref: "#/components/schemas/Error" + /blob-upload/init: + post: + summary: Initialize chunked zip upload + description: | + Starts a new chunked upload session for a zip file. Returns an `uploadId` + to use with subsequent chunk uploads. Recommended chunk size is 4MB to stay + safely within Netlify's 6MB request limit. + operationId: initBlobUpload + tags: [Blob Upload] + security: + - BearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + required: [service, date, filename, totalChunks] + properties: + service: + type: string + description: Target service name + example: allocation-engine-2.0 + date: + type: string + pattern: "^\\d{4}-\\d{2}-\\d{2}$" + description: Date in YYYY-MM-DD format + example: "2026-03-16" + filename: + type: string + description: Original filename (must end in .zip) + example: screenshots-batch.zip + totalChunks: + type: integer + minimum: 1 + maximum: 500 + description: Number of chunks the file will be split into + example: 5 + totalBytes: + type: integer + description: Total file size in bytes (optional, for tracking) + example: 20971520 + responses: + "201": + description: Upload session created + content: + application/json: + schema: + $ref: "#/components/schemas/BlobUploadInitResponse" + "400": + description: Missing or invalid parameters + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /blob-upload/chunk: + put: + summary: Upload a single chunk + description: | + Uploads one chunk of a file. Send the raw binary data as the request body. + Each chunk must be under 4MB. Chunks can be uploaded in any order and + concurrently for faster uploads. + operationId: uploadBlobChunk + tags: [Blob Upload] + security: + - BearerAuth: [] + parameters: + - name: uploadId + in: query + required: true + description: Upload session ID from /init + schema: + type: string + - name: index + in: query + required: true + description: Zero-based chunk index + schema: + type: integer + minimum: 0 + requestBody: + required: true + content: + application/octet-stream: + schema: + type: string + format: binary + description: Raw chunk bytes (max 4MB) + responses: + "200": + description: Chunk received + content: + application/json: + schema: + $ref: "#/components/schemas/BlobUploadChunkResponse" + "404": + description: Upload session not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "413": + description: Chunk exceeds max size + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /blob-upload/complete: + post: + summary: Complete chunked upload + description: | + Reassembles all uploaded chunks into the final zip blob and stores it + in the oncall blob store. All chunks must be uploaded before calling this. + Chunk blobs are cleaned up automatically after assembly. + operationId: completeBlobUpload + tags: [Blob Upload] + security: + - BearerAuth: [] + parameters: + - name: uploadId + in: query + required: true + description: Upload session ID from /init + schema: + type: string + responses: + "200": + description: Upload assembled and stored + content: + application/json: + schema: + $ref: "#/components/schemas/BlobUploadCompleteResponse" + "400": + description: Missing chunks + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "404": + description: Upload session not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /blob-upload/status: + get: + summary: Check upload progress + description: Returns the current status and chunk progress for an upload session. + operationId: getBlobUploadStatus + tags: [Blob Upload] + security: + - BearerAuth: [] + parameters: + - name: uploadId + in: query + required: true + description: Upload session ID from /init + schema: + type: string + responses: + "200": + description: Upload status + content: + application/json: + schema: + $ref: "#/components/schemas/BlobUploadStatusResponse" + "404": + description: Upload session not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + components: + securitySchemes: + BearerAuth: + type: http + scheme: bearer + description: Netlify auth token (NETLIFY_AUTH_TOKEN) + schemas: Error: type: object @@ -547,6 +729,86 @@ components: type: string description: All oncall entries across services (when listing all) + BlobUploadInitResponse: + type: object + properties: + uploadId: + type: string + description: Unique upload session identifier + example: "1710590400000-a3f2k9" + totalChunks: + type: integer + example: 5 + maxChunkBytes: + type: integer + description: Maximum allowed bytes per chunk + example: 4194304 + + BlobUploadChunkResponse: + type: object + properties: + uploadId: + type: string + chunkIndex: + type: integer + description: The index of the chunk just received + chunkSize: + type: integer + description: Bytes received for this chunk + chunksReceived: + type: integer + description: Total chunks received so far + totalChunks: + type: integer + complete: + type: boolean + description: True when all chunks have been received + + BlobUploadCompleteResponse: + type: object + properties: + message: + type: string + example: Upload complete + key: + type: string + description: Final blob key in the oncall store + example: "allocation-engine-2.0/2026-03-16/1710590400-screenshots-batch.zip" + totalBytes: + type: integer + description: Total assembled file size in bytes + totalChunks: + type: integer + + BlobUploadStatusResponse: + type: object + properties: + uploadId: + type: string + status: + type: string + enum: [in_progress, completed] + filename: + type: string + totalChunks: + type: integer + chunksReceived: + type: integer + totalBytes: + type: integer + nullable: true + createdAt: + type: string + format: date-time + completedAt: + type: string + format: date-time + nullable: true + finalKey: + type: string + nullable: true + description: Blob key of the assembled file (only after completion) + SnapshotResponse: type: object properties: