diff --git a/.dockerignore b/.dockerignore index 386c86a71..e754b92a5 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,5 +2,17 @@ node_modules /dist logs c2d_storage -.env.local -.env \ No newline at end of file +databases +.env +.env.* +.git +.github +docs +src/test +*.md +*.log +.nyc_output +coverage +docker-compose.yml +elasticsearch-compose.yml +typesense-compose.yml \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 6ba093edb..e49a3272b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,44 +1,53 @@ -FROM ubuntu:22.04 AS base -RUN apt-get update && apt-get -y install bash curl git wget libatomic1 python3 build-essential -COPY .nvmrc /usr/src/app/ -RUN rm /bin/sh && ln -s /bin/bash /bin/sh -ENV NVM_DIR=/usr/local/nvm -RUN mkdir $NVM_DIR -ENV NODE_VERSION=v22.15.0 -# Install nvm with node and npm -RUN curl https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash \ - && source $NVM_DIR/nvm.sh \ - && nvm install $NODE_VERSION \ - && nvm alias default $NODE_VERSION \ - && nvm use default -ENV NODE_PATH=$NVM_DIR/$NODE_VERSION/lib/node_modules -ENV PATH=$NVM_DIR/versions/node/$NODE_VERSION/bin:$PATH -ENV IPFS_GATEWAY='https://ipfs.io/' -ENV ARWEAVE_GATEWAY='https://arweave.net/' - -FROM base AS builder -COPY package*.json /usr/src/app/ -COPY scripts/ /usr/src/app/scripts/ -WORKDIR /usr/src/app/ +FROM node:22.15.0-bookworm@sha256:a1f1274dadd49738bcd4cf552af43354bb781a7e9e3bc984cfeedc55aba2ddd8 AS builder +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 \ + build-essential \ + libatomic1 \ + git \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /usr/src/app +COPY package*.json ./ +COPY scripts/ ./scripts/ RUN npm ci +COPY . . +RUN npm run build && npm prune --omit=dev + + +FROM node:22.15.0-bookworm-slim@sha256:557e52a0fcb928ee113df7e1fb5d4f60c1341dbda53f55e3d815ca10807efdce AS runner +RUN apt-get update && apt-get install -y --no-install-recommends \ + dumb-init \ + gosu \ + libatomic1 \ + && rm -rf /var/lib/apt/lists/* + +ENV NODE_ENV=production \ + IPFS_GATEWAY='https://ipfs.io/' \ + ARWEAVE_GATEWAY='https://arweave.net/' \ + P2P_ipV4BindTcpPort=9000 \ + P2P_ipV4BindWsPort=9001 \ + P2P_ipV6BindTcpPort=9002 \ + P2P_ipV6BindWsPort=9003 \ + P2P_ipV4BindWssPort=9005 \ + HTTP_API_PORT=8000 + +EXPOSE 9000 9001 9002 9003 9005 8000 + +# Docker group membership is handled at runtime in docker-entrypoint.sh by +# inspecting the GID of /var/run/docker.sock, so it works across hosts. + +WORKDIR /usr/src/app + +COPY --chown=node:node --from=builder /usr/src/app/dist ./dist +COPY --chown=node:node --from=builder /usr/src/app/node_modules ./node_modules +COPY --chown=node:node --from=builder /usr/src/app/schemas ./schemas +COPY --chown=node:node --from=builder /usr/src/app/package.json ./ +COPY --chown=node:node --from=builder /usr/src/app/config.json ./ + +RUN mkdir -p databases c2d_storage logs +COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +RUN chmod +x /usr/local/bin/docker-entrypoint.sh -FROM base AS runner -COPY . /usr/src/app -WORKDIR /usr/src/app/ -COPY --from=builder /usr/src/app/node_modules/ /usr/src/app/node_modules/ -RUN npm run build -ENV P2P_ipV4BindTcpPort=9000 -EXPOSE 9000 -ENV P2P_ipV4BindWsPort=9001 -EXPOSE 9001 -ENV P2P_ipV6BindTcpPort=9002 -EXPOSE 9002 -ENV P2P_ipV6BindWsPort=9003 -EXPOSE 9003 -ENV P2P_ipV4BindWssPort=9005 -EXPOSE 9005 -ENV HTTP_API_PORT=8000 -EXPOSE 8000 -ENV NODE_ENV='production' -CMD ["npm","run","start"] +ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] +CMD ["node", "--max-old-space-size=28784", "--trace-warnings", "--experimental-specifier-resolution=node", "dist/index.js"] diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100644 index 000000000..41d2b4473 --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,19 @@ +#!/bin/sh +set -e + +# Fix ownership of directories that may be mounted as volumes (owned by root). +# Runs as root, then drops to 'node' user via gosu. +chown -R node:node /usr/src/app/databases /usr/src/app/c2d_storage /usr/src/app/logs 2>/dev/null || true + +# Add node user to the docker group matching the host's /var/run/docker.sock GID, +# so compute jobs can access the socket regardless of the host's docker GID. +if [ -S /var/run/docker.sock ]; then + SOCK_GID=$(stat -c '%g' /var/run/docker.sock) + if ! getent group "$SOCK_GID" > /dev/null 2>&1; then + groupadd -g "$SOCK_GID" dockerhost 2>/dev/null || true + fi + DOCKER_GROUP=$(getent group "$SOCK_GID" | cut -d: -f1) + usermod -aG "$DOCKER_GROUP" node +fi + +exec gosu node dumb-init -- "$@" diff --git a/docs/compute-pricing.md b/docs/compute-pricing.md index e2912bd46..c6598e760 100644 --- a/docs/compute-pricing.md +++ b/docs/compute-pricing.md @@ -5,8 +5,11 @@ This guide explains how to configure your node’s Docker compute environments a ## Overview - **Configuration**: Define compute environments via the `DOCKER_COMPUTE_ENVIRONMENTS` environment variable (JSON) or via `config.json` under `dockerComputeEnvironments`. +- **Environment**: Is a group of resources, payment and accesslists. - **Resources**: Each environment declares resources (e.g. `cpu`, `ram`, `disk`, and optionally GPUs). You must declare a `disk` resource. - **Pricing**: For each chain and fee token, you set a `price` per resource. Cost is computed as **price × amount × duration (in minutes, rounded up)**. +- **Free**: Environments which does not require a payment for the resources, but most likley are very limited in terms of resources available and job duration. +- **Image building**: **Free jobs cannot build images** (Dockerfiles are not allowed). For **paid jobs**, **image build time counts toward billable duration** and also consumes the job’s `maxJobDuration`. ## Pricing Units diff --git a/docs/env.md b/docs/env.md index beeae0180..e3af9b272 100644 --- a/docs/env.md +++ b/docs/env.md @@ -34,49 +34,6 @@ Environmental variables are also tracked in `ENVIRONMENT_VARIABLES` within `src/ - `AUTHORIZED_PUBLISHERS_LIST`: AccessList contract addresses (per chain). If present, Node will only index assets published by the accounts present on the given access lists. Example: `"{ \"8996\": [\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"] }"` - `VALIDATE_UNSIGNED_DDO`: If set to `false`, the node will not validate unsigned DDOs and will request a signed message with the publisher address, nonce and signature. Default is `true`. Example: `false` - `JWT_SECRET`: Secret used to sign JWT tokens. Default is `ocean-node-secret`. Example: `"my-secret-jwt-token"` -- `NODE_OWNER_INFO`: Optional JSON object returned by the root endpoint as `ownerInfo`. Example: `"{\"imprint\":{\"legalName\":\"Example Ocean Services GmbH\"},\"termsAndConditions\":{\"url\":\"https://example.com/terms\"},\"anyCustomSection\":{\"foo\":\"bar\"}}"` - -## Database - -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` -- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` -- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` -- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` -- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` -- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` -- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` -- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` -- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` -- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` - -## Database - -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` -- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` -- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` -- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` -- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` -- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` -- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` -- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` -- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` -- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` - -## Database - -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` -- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` -- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` -- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` -- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` -- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` -- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` -- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` -- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` -- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` ## Database @@ -179,6 +136,8 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of [ { "socketPath": "/var/run/docker.sock", + "scanImages": true, + "enableNetwork": false, "imageRetentionDays": 7, "imageCleanupInterval": 86400, "resources": [ @@ -237,6 +196,9 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of #### Configuration Options - **socketPath**: Path to the Docker socket (e.g., docker.sock). +- **scanImages**: Whether Docker images should be scanned for vulnerabilities using Trivy. If enabled and critical vulnerabilities are found, the C2D job is rejected. +- **scanImageDBUpdateInterval**: How often to update the vulnerability database, in seconds. Default: 43200 (12 hours) +- **enableNetwork**: Whether networking is enabled for algorithm containers. Default: false - **imageRetentionDays** - how long docker images are kept, in days. Default: 7 - **imageCleanupInterval** - how often to run cleanup for docker images, in seconds. Min: 3600 (1hour), Default: 86400 (24 hours) - **paymentClaimInterval** - how often to run payment claiming, in seconds. Default: 3600 (1 hour) @@ -261,6 +223,7 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of - **maxJobDuration**: Maximum duration in seconds for a free job. - **minJobDuration**: Minimum duration in seconds for a free job. - **maxJobs**: Maximum number of simultaneous free jobs. + - **allowImageBuild**: If building images is allowed on free envs. Default: false - **access**: Access control configuration for free compute jobs. Works the same as the main `access` field. - **addresses**: Array of Ethereum addresses allowed to run free compute jobs. - **accessLists**: Array of AccessList contract addresses for free compute access control. diff --git a/package-lock.json b/package-lock.json index 487678832..c2aee96d9 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "ocean-node", - "version": "2.1.1", + "version": "2.1.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "ocean-node", - "version": "2.1.1", + "version": "2.1.2", "hasInstallScript": true, "license": "Apache-2.0", "dependencies": { diff --git a/package.json b/package.json index 7a20f414b..39479fe4a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "ocean-node", - "version": "2.1.1", + "version": "2.1.2", "description": "Ocean Node is used to run all core services in the Ocean stack", "author": "Ocean Protocol Foundation", "license": "Apache-2.0", diff --git a/src/@types/C2D/C2D.ts b/src/@types/C2D/C2D.ts index 603e8b188..feb2f64b1 100644 --- a/src/@types/C2D/C2D.ts +++ b/src/@types/C2D/C2D.ts @@ -106,6 +106,7 @@ export interface ComputeEnvironmentFreeOptions { maxJobs?: number // maximum number of simultaneous free jobs resources?: ComputeResource[] access: ComputeAccessList + allowImageBuild?: boolean } export interface ComputeEnvironmentBaseConfig { description?: string // v1 @@ -158,6 +159,9 @@ export interface C2DDockerConfig { imageRetentionDays?: number // Default: 7 days imageCleanupInterval?: number // Default: 86400 seconds (24 hours) paymentClaimInterval?: number // Default: 3600 seconds (1 hours) + scanImages?: boolean + scanImageDBUpdateInterval?: number // Default: 12 hours + enableNetwork?: boolean // whether network is enabled for algorithm containers } export type ComputeResultType = @@ -280,6 +284,8 @@ export interface DBComputeJob extends ComputeJob { encryptedDockerRegistryAuth?: string output?: string // this is always an ECIES encrypted string, that decodes to ComputeOutput interface jobIdHash: string + buildStartTimestamp?: string + buildStopTimestamp?: string } // make sure we keep them both in sync @@ -299,6 +305,8 @@ export enum C2DStatusNumber { // eslint-disable-next-line no-unused-vars BuildImageFailed = 13, // eslint-disable-next-line no-unused-vars + VulnerableImage = 14, + // eslint-disable-next-line no-unused-vars ConfiguringVolumes = 20, // eslint-disable-next-line no-unused-vars VolumeCreationFailed = 21, @@ -347,6 +355,8 @@ export enum C2DStatusText { // eslint-disable-next-line no-unused-vars BuildImageFailed = 'Building algorithm image failed', // eslint-disable-next-line no-unused-vars + VulnerableImage = 'Image has vulnerabilities', + // eslint-disable-next-line no-unused-vars ConfiguringVolumes = 'Configuring volumes', // eslint-disable-next-line no-unused-vars VolumeCreationFailed = 'Volume creation failed', diff --git a/src/@types/commands.ts b/src/@types/commands.ts index 395487b44..ba35502b9 100644 --- a/src/@types/commands.ts +++ b/src/@types/commands.ts @@ -312,4 +312,5 @@ export interface GetJobsCommand extends Command { environments?: string[] fromTimestamp?: string consumerAddrs?: string[] + runningJobs?: boolean } diff --git a/src/components/Indexer/index.ts b/src/components/Indexer/index.ts index fd9e94476..f40d82122 100644 --- a/src/components/Indexer/index.ts +++ b/src/components/Indexer/index.ts @@ -39,6 +39,7 @@ import { getDatabase, isReachableConnection } from '../../utils/database.js' import { sleep } from '../../utils/util.js' import { isReindexingNeeded } from './version.js' import { DB_EVENTS, ES_CONNECTION_EVENTS } from '../database/ElasticsearchConfigHelper.js' +import { getPackageVersion } from '../../utils/version.js' /** * Event emitter for DDO (Data Descriptor Object) events @@ -535,7 +536,7 @@ export class OceanIndexer { * Checks if reindexing is needed and triggers it for all chains */ public async checkAndTriggerReindexing(): Promise { - const currentVersion = process.env.npm_package_version + const currentVersion = getPackageVersion() const dbActive = this.getDatabase() if (!dbActive || !(await isReachableConnection(dbActive.getConfig().url))) { INDEXER_LOGGER.error(`Giving up reindexing. DB is not online!`) diff --git a/src/components/P2P/index.ts b/src/components/P2P/index.ts index c8861148a..deca9308e 100644 --- a/src/components/P2P/index.ts +++ b/src/components/P2P/index.ts @@ -331,20 +331,23 @@ export class OceanP2P extends EventEmitter { `/ip6/${config.p2pConfig.ipV6BindAddress}/tcp/${config.p2pConfig.ipV6BindWsPort}/ws` ) } + const listenAddrs = config.p2pConfig.enableCircuitRelayClient + ? [...bindInterfaces, '/p2p-circuit'] + : bindInterfaces let addresses = {} if ( config.p2pConfig.announceAddresses && config.p2pConfig.announceAddresses.length > 0 ) { addresses = { - listen: bindInterfaces, + listen: listenAddrs, announceFilter: (multiaddrs: any[]) => multiaddrs.filter((m) => this.shouldAnnounce(m)), appendAnnounce: config.p2pConfig.announceAddresses } } else { addresses = { - listen: bindInterfaces, + listen: listenAddrs, announceFilter: (multiaddrs: any[]) => multiaddrs.filter((m) => this.shouldAnnounce(m)) } @@ -395,7 +398,12 @@ export class OceanP2P extends EventEmitter { // eslint-disable-next-line no-constant-condition, no-self-compare if (config.p2pConfig.enableCircuitRelayServer) { P2P_LOGGER.info('Enabling Circuit Relay Server') - servicesConfig = { ...servicesConfig, ...{ circuitRelay: circuitRelayServer() } } + servicesConfig = { + ...servicesConfig, + ...{ + circuitRelay: circuitRelayServer({ reservations: { maxReservations: 2 } }) + } + } } // eslint-disable-next-line no-constant-condition, no-self-compare if (config.p2pConfig.upnp) { @@ -964,6 +972,7 @@ export class OceanP2P extends EventEmitter { // on timeout the query ends with an abort signal => CodeError: Query aborted // eslint-disable-next-line @typescript-eslint/no-explicit-any } as any) + for await (const value of f) { peersFound.push(value) } diff --git a/src/components/c2d/compute_engine_base.ts b/src/components/c2d/compute_engine_base.ts index da9de13e3..37f1067c0 100644 --- a/src/components/c2d/compute_engine_base.ts +++ b/src/components/c2d/compute_engine_base.ts @@ -327,8 +327,9 @@ export abstract class C2DEngine { for (const job of jobs) { if (job.environment === env.id) { if (job.queueMaxWaitTime === 0) { - const timeElapsed = - new Date().getTime() / 1000 - Number.parseFloat(job?.algoStartTimestamp) + const timeElapsed = job.buildStartTimestamp + ? new Date().getTime() / 1000 - Number.parseFloat(job?.buildStartTimestamp) + : new Date().getTime() / 1000 - Number.parseFloat(job?.algoStartTimestamp) totalJobs++ maxRunningTime += job.maxJobDuration - timeElapsed if (job.isFree) { diff --git a/src/components/c2d/compute_engine_docker.ts b/src/components/c2d/compute_engine_docker.ts index 224414f86..91070735a 100755 --- a/src/components/c2d/compute_engine_docker.ts +++ b/src/components/c2d/compute_engine_docker.ts @@ -1,6 +1,7 @@ /* eslint-disable security/detect-non-literal-fs-filename */ -import { Readable } from 'stream' +import { Readable, PassThrough } from 'stream' import os from 'os' +import path from 'path' import { C2DStatusNumber, C2DStatusText, @@ -35,6 +36,7 @@ import { createWriteStream, existsSync, mkdirSync, + chmodSync, rmSync, writeFileSync, appendFileSync, @@ -55,6 +57,11 @@ import { dockerRegistrysAuth, dockerRegistryAuth } from '../../@types/OceanNode. import { EncryptMethod } from '../../@types/fileObject.js' import { ZeroAddress } from 'ethers' +const C2D_CONTAINER_UID = 1000 +const C2D_CONTAINER_GID = 1000 + +const trivyImage = 'aquasec/trivy:0.69.3' // Use pinned versions for safety + export class C2DEngineDocker extends C2DEngine { private envs: ComputeEnvironment[] = [] @@ -65,13 +72,19 @@ export class C2DEngineDocker extends C2DEngine { private isInternalLoopRunning: boolean = false private imageCleanupTimer: NodeJS.Timeout | null = null private paymentClaimTimer: NodeJS.Timeout | null = null + private scanDBUpdateTimer: NodeJS.Timeout | null = null private static DEFAULT_DOCKER_REGISTRY = 'https://registry-1.docker.io' private retentionDays: number private cleanupInterval: number private paymentClaimInterval: number + private scanImages: boolean + private scanImageDBUpdateInterval: number + private trivyCachePath: string private cpuAllocations: Map = new Map() private envCpuCores: number[] = [] private cpuOffset: number + private enableNetwork: boolean + public constructor( clusterConfig: C2DClusterInfo, db: C2DDatabase, @@ -92,8 +105,11 @@ export class C2DEngineDocker extends C2DEngine { } } this.retentionDays = clusterConfig.connection.imageRetentionDays || 7 - this.cleanupInterval = clusterConfig.connection.imageCleanupInterval || 86400 // 24 hours + this.cleanupInterval = clusterConfig.connection.imageCleanupInterval this.paymentClaimInterval = clusterConfig.connection.paymentClaimInterval || 3600 // 1 hour + this.scanImages = clusterConfig.connection.scanImages || false // default is not to scan images for now, until it's prod ready + this.scanImageDBUpdateInterval = clusterConfig.connection.scanImageDBUpdateInterval + this.enableNetwork = clusterConfig.connection.enableNetwork ?? false if ( clusterConfig.connection.protocol && clusterConfig.connection.host && @@ -109,18 +125,30 @@ export class C2DEngineDocker extends C2DEngine { CORE_LOGGER.error('Could not create Docker container: ' + e.message) } } - // TO DO C2D - create envs + // trivy cache is the same for all engines + this.trivyCachePath = path.join( + process.cwd(), + this.getC2DConfig().tempFolder, + 'trivy_cache' + ) try { - if (!existsSync(clusterConfig.tempFolder)) - mkdirSync(clusterConfig.tempFolder, { recursive: true }) + if (!existsSync(this.getStoragePath())) + mkdirSync(this.getStoragePath(), { recursive: true }) + if (!existsSync(this.trivyCachePath)) + mkdirSync(this.trivyCachePath, { recursive: true }) } catch (e) { CORE_LOGGER.error( 'Could not create Docker container temporary folders: ' + e.message ) } + // envs are build on start function } + public getStoragePath(): string { + return this.getC2DConfig().tempFolder + this.getC2DConfig().hash + } + public override async start() { // let's build the env. Swarm and k8 will build multiple envs, based on arhitecture const config = await getConfiguration() @@ -321,10 +349,86 @@ export class C2DEngineDocker extends C2DEngine { if (!this.cronTimer) { this.setNewTimer() } + this.startCrons() + } + + public startCrons() { + if (!this.docker) { + CORE_LOGGER.debug('Docker not available, skipping crons') + return + } + // Start image cleanup timer - this.startImageCleanupTimer() - // Start claim timer - this.startPaymentTimer() + if (this.cleanupInterval) { + if (this.imageCleanupTimer) { + return // Already running + } + // Run initial cleanup after a short delay + setTimeout(() => { + this.cleanupOldImages().catch((e) => { + CORE_LOGGER.error(`Initial image cleanup failed: ${e.message}`) + }) + }, 60000) // Wait 1 minute after start + + // Set up periodic cleanup + this.imageCleanupTimer = setInterval(() => { + this.cleanupOldImages().catch((e) => { + CORE_LOGGER.error(`Periodic image cleanup failed: ${e.message}`) + }) + }, this.cleanupInterval * 1000) + + CORE_LOGGER.info( + `Image cleanup timer started (interval: ${this.cleanupInterval / 60} minutes)` + ) + } + // start payments cron + if (this.paymentClaimInterval) { + if (this.paymentClaimTimer) { + return // Already running + } + + // Run initial cleanup after a short delay + setTimeout(() => { + this.claimPayments().catch((e) => { + CORE_LOGGER.error(`Initial payments claim failed: ${e.message}`) + }) + }, 60000) // Wait 1 minute after start + + // Set up periodic cleanup + this.paymentClaimTimer = setInterval(() => { + this.claimPayments().catch((e) => { + CORE_LOGGER.error(`Periodic payments claim failed: ${e.message}`) + }) + }, this.paymentClaimInterval * 1000) + + CORE_LOGGER.info( + `Payments claim timer started (interval: ${this.paymentClaimInterval / 60} minutes)` + ) + } + // scan db updater cron + if (this.scanImageDBUpdateInterval) { + if (this.scanDBUpdateTimer) { + return // Already running + } + + // Run initial db cache + setTimeout(() => { + this.scanDBUpdate().catch((e) => { + CORE_LOGGER.error(`scan DB Update Initial failed: ${e.message}`) + }) + }, 30000) // Wait 30 seconds + + // Set up periodic cleanup + this.scanDBUpdateTimer = setInterval(() => { + this.scanDBUpdate().catch((e) => { + CORE_LOGGER.error(`Periodic scan DB update failed: ${e.message}`) + }) + }, this.scanImageDBUpdateInterval * 1000) + + CORE_LOGGER.info( + `scan DB update timer started (interval: ${this.scanImageDBUpdateInterval / 60} minutes)` + ) + } } public override stop(): Promise { @@ -414,11 +518,11 @@ export class C2DEngineDocker extends C2DEngine { } // Process each job to determine what operation is needed + let duration for (const job of jobs) { // Calculate algo duration - const algoDuration = - parseFloat(job.algoStopTimestamp) - parseFloat(job.algoStartTimestamp) - job.algoDuration = algoDuration + duration = parseFloat(job.algoStopTimestamp) - parseFloat(job.algoStartTimestamp) + duration += this.getValidBuildDurationSeconds(job) // Free jobs or jobs without payment info - mark as finished if (job.isFree || !job.payment) { @@ -455,7 +559,7 @@ export class C2DEngineDocker extends C2DEngine { continue } - let minDuration = Math.abs(algoDuration) + let minDuration = Math.abs(duration) if (minDuration > job.maxJobDuration) { minDuration = job.maxJobDuration } @@ -656,7 +760,7 @@ export class C2DEngineDocker extends C2DEngine { private async cleanUpUnknownLocks(chain: string, currentTimestamp: bigint) { try { - const nodeAddress = await this.getKeyManager().getEthAddress() + const nodeAddress = this.getKeyManager().getEthAddress() const jobIds: any[] = [] const tokens: string[] = [] const payer: string[] = [] @@ -667,6 +771,10 @@ export class C2DEngineDocker extends C2DEngine { '0x0000000000000000000000000000000000000000', nodeAddress ) + if (!balocks || balocks.length === 0) { + CORE_LOGGER.warn(`Could not find any locks for chain ${chain}, skipping cleanup`) + return + } for (const lock of balocks) { const lockExpiry = BigInt(lock.expiry.toString()) if (currentTimestamp > lockExpiry) { @@ -731,59 +839,6 @@ export class C2DEngineDocker extends C2DEngine { } } - private startImageCleanupTimer(): void { - if (this.imageCleanupTimer) { - return // Already running - } - - if (!this.docker) { - CORE_LOGGER.debug('Docker not available, skipping image cleanup timer') - return - } - - // Run initial cleanup after a short delay - setTimeout(() => { - this.cleanupOldImages().catch((e) => { - CORE_LOGGER.error(`Initial image cleanup failed: ${e.message}`) - }) - }, 60000) // Wait 1 minute after start - - // Set up periodic cleanup - this.imageCleanupTimer = setInterval(() => { - this.cleanupOldImages().catch((e) => { - CORE_LOGGER.error(`Periodic image cleanup failed: ${e.message}`) - }) - }, this.cleanupInterval * 1000) - - CORE_LOGGER.info( - `Image cleanup timer started (interval: ${this.cleanupInterval / 60} minutes)` - ) - } - - private startPaymentTimer(): void { - if (this.paymentClaimTimer) { - return // Already running - } - - // Run initial cleanup after a short delay - setTimeout(() => { - this.claimPayments().catch((e) => { - CORE_LOGGER.error(`Initial payments claim failed: ${e.message}`) - }) - }, 60000) // Wait 1 minute after start - - // Set up periodic cleanup - this.paymentClaimTimer = setInterval(() => { - this.claimPayments().catch((e) => { - CORE_LOGGER.error(`Periodic payments claim failed: ${e.message}`) - }) - }, this.paymentClaimInterval * 1000) - - CORE_LOGGER.info( - `Payments claim timer started (interval: ${this.paymentClaimInterval / 60} minutes)` - ) - } - // eslint-disable-next-line require-await public override async getComputeEnvironments( chainId?: number @@ -1107,6 +1162,13 @@ export class C2DEngineDocker extends C2DEngine { throw new Error(`additionalDockerFiles cannot be used with queued jobs`) } } + if ( + algorithm.meta.container && + algorithm.meta.container.dockerfile && + !env.free.allowImageBuild + ) { + throw new Error(`Building image is not allowed for free jobs`) + } const job: DBComputeJob = { clusterHash: this.getC2DConfig().hash, @@ -1147,7 +1209,9 @@ export class C2DEngineDocker extends C2DEngine { algoDuration: 0, queueMaxWaitTime: queueMaxWaitTime || 0, encryptedDockerRegistryAuth, // we store the encrypted docker registry auth in the job - output + output, + buildStartTimestamp: '0', + buildStopTimestamp: '0' } if (algorithm.meta.container && algorithm.meta.container.dockerfile) { @@ -1226,7 +1290,7 @@ export class C2DEngineDocker extends C2DEngine { let index = 0 try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/image.log' + this.getStoragePath() + '/' + jobId + '/data/logs/image.log' ) if (logStat) { res.push({ @@ -1240,7 +1304,7 @@ export class C2DEngineDocker extends C2DEngine { } catch (e) {} try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/configuration.log' + this.getStoragePath() + '/' + jobId + '/data/logs/configuration.log' ) if (logStat) { res.push({ @@ -1254,7 +1318,7 @@ export class C2DEngineDocker extends C2DEngine { } catch (e) {} try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/algorithm.log' + this.getStoragePath() + '/' + jobId + '/data/logs/algorithm.log' ) if (logStat) { res.push({ @@ -1271,7 +1335,7 @@ export class C2DEngineDocker extends C2DEngine { const jobDb = await this.db.getJob(jobId) if (jobDb.length < 1 || !jobDb[0].output) { const outputStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/outputs/outputs.tar' + this.getStoragePath() + '/' + jobId + '/data/outputs/outputs.tar' ) if (outputStat) { res.push({ @@ -1286,7 +1350,7 @@ export class C2DEngineDocker extends C2DEngine { } catch (e) {} try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/publish.log' + this.getStoragePath() + '/' + jobId + '/data/logs/publish.log' ) if (logStat) { res.push({ @@ -1348,7 +1412,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'algorithmLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/algorithm.log' + this.getStoragePath() + '/' + jobId + '/data/logs/algorithm.log' ), headers: { 'Content-Type': 'text/plain' @@ -1358,10 +1422,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'configurationLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + - '/' + - jobId + - '/data/logs/configuration.log' + this.getStoragePath() + '/' + jobId + '/data/logs/configuration.log' ), headers: { 'Content-Type': 'text/plain' @@ -1371,7 +1432,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'publishLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/publish.log' + this.getStoragePath() + '/' + jobId + '/data/logs/publish.log' ), headers: { 'Content-Type': 'text/plain' @@ -1381,7 +1442,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'imageLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/image.log' + this.getStoragePath() + '/' + jobId + '/data/logs/image.log' ), headers: { 'Content-Type': 'text/plain' @@ -1391,7 +1452,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'output') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/outputs/outputs.tar', + this.getStoragePath() + '/' + jobId + '/data/outputs/outputs.tar', offset > 0 ? { start: offset } : undefined ), headers: { @@ -1411,7 +1472,7 @@ export class C2DEngineDocker extends C2DEngine { if (!jobRes[0].isRunning) return null try { const job = jobRes[0] - const container = await this.docker.getContainer(job.jobId + '-algoritm') + const container = this.docker.getContainer(job.jobId + '-algoritm') const details = await container.inspect() if (details.State.Running === false) return null return await container.logs({ @@ -1606,6 +1667,40 @@ export class C2DEngineDocker extends C2DEngine { } if (job.status === C2DStatusNumber.ConfiguringVolumes) { + // we have the image (etiher pulled or built) + // if built, check if build process took all allocated time + // if yes, stop the job + const buildDuration = this.getValidBuildDurationSeconds(job) + if (buildDuration > 0 && buildDuration >= job.maxJobDuration) { + job.isStarted = false + job.status = C2DStatusNumber.PublishingResults + job.statusText = C2DStatusText.PublishingResults + job.algoStartTimestamp = '0' + job.algoStopTimestamp = '0' + job.isRunning = false + await this.db.updateJob(job) + return + } + // now that we have the image ready, check it for vulnerabilities + if (this.getC2DConfig().connection?.scanImages) { + const check = await this.checkImageVulnerability(job.containerImage) + const imageLogFile = + this.getStoragePath() + '/' + job.jobId + '/data/logs/image.log' + const logText = + `Image scanned for vulnerabilities\nVulnerable:${check.vulnerable}\nSummary:` + + JSON.stringify(check.summary, null, 2) + CORE_LOGGER.debug(logText) + appendFileSync(imageLogFile, logText) + if (check.vulnerable) { + job.status = C2DStatusNumber.VulnerableImage + job.statusText = C2DStatusText.VulnerableImage + job.isRunning = false + job.dateFinished = String(Date.now() / 1000) + await this.db.updateJob(job) + await this.cleanupJob(job) + return + } + } // create the volume & create container // TO DO C2D: Choose driver & size // get env info @@ -1636,7 +1731,8 @@ export class C2DEngineDocker extends C2DEngine { // create the container const mountVols: any = { '/data': {} } const hostConfig: HostConfig = { - NetworkMode: 'none', // no network inside the container + // limit number of Pids container can spawn, to avoid flooding + PidsLimit: 512, Mounts: [ { Type: 'volume', @@ -1646,6 +1742,9 @@ export class C2DEngineDocker extends C2DEngine { } ] } + if (!this.enableNetwork) { + hostConfig.NetworkMode = 'none' // no network inside the container + } // disk // if (diskSize && diskSize > 0) { // hostConfig.StorageOpt = { @@ -1675,9 +1774,10 @@ export class C2DEngineDocker extends C2DEngine { AttachStdin: false, AttachStdout: true, AttachStderr: true, - Tty: true, + Tty: false, OpenStdin: false, StdinOnce: false, + User: `${C2D_CONTAINER_UID}:${C2D_CONTAINER_GID}`, Volumes: mountVols, HostConfig: hostConfig } @@ -1692,8 +1792,10 @@ export class C2DEngineDocker extends C2DEngine { containerInfo.HostConfig.Devices = advancedConfig.Devices if (advancedConfig.GroupAdd) containerInfo.HostConfig.GroupAdd = advancedConfig.GroupAdd - if (advancedConfig.SecurityOpt) - containerInfo.HostConfig.SecurityOpt = advancedConfig.SecurityOpt + containerInfo.HostConfig.SecurityOpt = [ + 'no-new-privileges', + ...(advancedConfig.SecurityOpt ?? []) + ] if (advancedConfig.Binds) containerInfo.HostConfig.Binds = advancedConfig.Binds containerInfo.HostConfig.CapDrop = ['ALL'] for (const cap of advancedConfig.CapDrop ?? []) { @@ -1753,7 +1855,7 @@ export class C2DEngineDocker extends C2DEngine { let container let details try { - container = await this.docker.getContainer(job.jobId + '-algoritm') + container = this.docker.getContainer(job.jobId + '-algoritm') details = await container.inspect() } catch (e) { console.error( @@ -1787,10 +1889,7 @@ export class C2DEngineDocker extends C2DEngine { job.algoStopTimestamp = String(Date.now() / 1000) try { const algoLogFile = - this.getC2DConfig().tempFolder + - '/' + - job.jobId + - '/data/logs/algorithm.log' + this.getStoragePath() + '/' + job.jobId + '/data/logs/algorithm.log' writeFileSync(algoLogFile, String(e.message)) } catch (e) { CORE_LOGGER.error('Failed to write algorithm log file: ' + e.message) @@ -1814,7 +1913,13 @@ export class C2DEngineDocker extends C2DEngine { } const timeNow = Date.now() / 1000 - const expiry = parseFloat(job.algoStartTimestamp) + job.maxJobDuration + let expiry + + const buildDuration = this.getValidBuildDurationSeconds(job) + if (buildDuration > 0) { + // if job has build time, reduce the remaining algorithm runtime budget + expiry = parseFloat(job.algoStartTimestamp) + job.maxJobDuration - buildDuration + } else expiry = parseFloat(job.algoStartTimestamp) + job.maxJobDuration CORE_LOGGER.debug( 'container running since timeNow: ' + timeNow + ' , Expiry: ' + expiry ) @@ -1855,14 +1960,14 @@ export class C2DEngineDocker extends C2DEngine { job.statusText = C2DStatusText.JobSettle let container try { - container = await this.docker.getContainer(job.jobId + '-algoritm') + container = this.docker.getContainer(job.jobId + '-algoritm') } catch (e) { CORE_LOGGER.debug('Could not retrieve container: ' + e.message) job.isRunning = false job.dateFinished = String(Date.now() / 1000) try { const algoLogFile = - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/algorithm.log' + this.getStoragePath() + '/' + job.jobId + '/data/logs/algorithm.log' writeFileSync(algoLogFile, String(e.message)) } catch (e) { CORE_LOGGER.error('Failed to write algorithm log file: ' + e.message) @@ -1880,7 +1985,7 @@ export class C2DEngineDocker extends C2DEngine { job.terminationDetails.exitCode = null } const outputsArchivePath = - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/outputs/outputs.tar' + this.getStoragePath() + '/' + job.jobId + '/data/outputs/outputs.tar' try { if (container) { @@ -1964,6 +2069,14 @@ export class C2DEngineDocker extends C2DEngine { private allocateCpus(jobId: string, count: number): string | null { if (this.envCpuCores.length === 0 || count <= 0) return null + const existing = this.cpuAllocations.get(jobId) + if (existing && existing.length > 0) { + const cpusetStr = existing.join(',') + CORE_LOGGER.info( + `CPU affinity: reusing existing cores [${cpusetStr}] for job ${jobId}` + ) + return cpusetStr + } const usedCores = new Set() for (const cores of this.cpuAllocations.values()) { @@ -2044,11 +2157,11 @@ export class C2DEngineDocker extends C2DEngine { this.releaseCpus(job.jobId) try { - const container = await this.docker.getContainer(job.jobId + '-algoritm') + const container = this.docker.getContainer(job.jobId + '-algoritm') if (container) { if (job.status !== C2DStatusNumber.AlgorithmFailed) { writeFileSync( - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/algorithm.log', + this.getStoragePath() + '/' + job.jobId + '/data/logs/algorithm.log', await container.logs({ stdout: true, stderr: true, @@ -2075,33 +2188,32 @@ export class C2DEngineDocker extends C2DEngine { } try { // remove folders - rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/inputs', { + rmSync(this.getStoragePath() + '/' + job.jobId + '/data/inputs', { recursive: true, force: true }) } catch (e) { console.error( - `Could not delete inputs from path ${this.getC2DConfig().tempFolder} for job ID ${ + `Could not delete inputs from path ${this.getStoragePath()} for job ID ${ job.jobId }! ` + e.message ) } try { - rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/transformations', { + rmSync(this.getStoragePath() + '/' + job.jobId + '/data/transformations', { recursive: true, force: true }) } catch (e) { console.error( - `Could not delete algorithms from path ${ - this.getC2DConfig().tempFolder - } for job ID ${job.jobId}! ` + e.message + `Could not delete algorithms from path ${this.getStoragePath()} for job ID ${job.jobId}! ` + + e.message ) } } private deleteOutputFolder(job: DBComputeJob) { - rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/outputs/', { + rmSync(this.getStoragePath() + '/' + job.jobId + '/data/outputs/', { recursive: true, force: true }) @@ -2236,8 +2348,7 @@ export class C2DEngineDocker extends C2DEngine { private async pullImage(originaljob: DBComputeJob) { const job = JSON.parse(JSON.stringify(originaljob)) as DBComputeJob - const imageLogFile = - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/image.log' + const imageLogFile = this.getStoragePath() + '/' + job.jobId + '/data/logs/image.log' try { // Get registry auth for the image const { registry } = this.parseImage(job.containerImage) @@ -2341,7 +2452,7 @@ export class C2DEngineDocker extends C2DEngine { const imageLogFile = this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/image.log' const controller = new AbortController() - const timeoutMs = 5 * 60 * 1000 + const timeoutMs = job.maxJobDuration * 1000 const timer = setTimeout(() => controller.abort(), timeoutMs) try { const pack = tarStream.pack() @@ -2355,18 +2466,29 @@ export class C2DEngineDocker extends C2DEngine { } } pack.finalize() + job.buildStartTimestamp = String(Date.now() / 1000) + await this.db.updateJob(job) - // Build the image using the tar stream as context (Node IncomingMessage extends stream.Readable) - const buildStream = (await this.docker.buildImage(pack, { + const cpuperiod = 100000 + const ramGb = this.getResourceRequest(job.resources, 'ram') + const ramBytes = + ramGb && ramGb > 0 ? ramGb * 1024 * 1024 * 1024 : 1024 * 1024 * 1024 + + const cpus = this.getResourceRequest(job.resources, 'cpu') + const cpuquota = cpus && cpus > 0 ? Math.floor(cpus * cpuperiod) : 50000 + + const buildOptions: Dockerode.ImageBuildOptions = { t: job.containerImage, - memory: 1024 * 1024 * 1024, // 1GB RAM in bytes - memswap: -1, // Disable swap - cpushares: 512, // CPU Shares (default is 1024) - cpuquota: 50000, // 50% of one CPU (100000 = 1 CPU) - cpuperiod: 100000, // Default period + memory: ramBytes, + memswap: ramBytes, // same as memory => no swap + cpushares: 1024, // CPU Shares (default is 1024) + cpuquota, // 100000 = 1 CPU with cpuperiod=100000 + cpuperiod, nocache: true, // prevent cache poison abortSignal: controller.signal - })) as Readable + } + // Build the image using the tar stream as context (Node IncomingMessage extends stream.Readable) + const buildStream = (await this.docker.buildImage(pack, buildOptions)) as Readable const onBuildData = (data: Buffer) => { try { @@ -2405,9 +2527,23 @@ export class C2DEngineDocker extends C2DEngine { } controller.signal.addEventListener('abort', onAbort, { once: true }) const onSuccess = () => { - finish(() => { + finish(async () => { detachBuildLog() controller.signal.removeEventListener('abort', onAbort) + + // Build stream completed, but does the image actually exist? + try { + await this.docker.getImage(job.containerImage).inspect() + } catch (e) { + return reject( + new Error( + `Cannot find image '${job.containerImage}' after building. Most likely it failed: ${ + (e as Error)?.message || String(e) + }` + ) + ) + } + CORE_LOGGER.debug(`Image '${job.containerImage}' built successfully.`) this.updateImageUsage(job.containerImage).catch((e) => { CORE_LOGGER.debug(`Failed to track image usage: ${e.message}`) @@ -2430,6 +2566,7 @@ export class C2DEngineDocker extends C2DEngine { }) job.status = C2DStatusNumber.ConfiguringVolumes job.statusText = C2DStatusText.ConfiguringVolumes + job.buildStopTimestamp = String(Date.now() / 1000) await this.db.updateJob(job) } catch (err) { const aborted = @@ -2448,6 +2585,7 @@ export class C2DEngineDocker extends C2DEngine { } job.status = C2DStatusNumber.BuildImageFailed job.statusText = C2DStatusText.BuildImageFailed + job.buildStopTimestamp = String(Date.now() / 1000) job.isRunning = false job.dateFinished = String(Date.now() / 1000) await this.db.updateJob(job) @@ -2481,7 +2619,7 @@ export class C2DEngineDocker extends C2DEngine { status: C2DStatusNumber.RunningAlgorithm, statusText: C2DStatusText.RunningAlgorithm } - const jobFolderPath = this.getC2DConfig().tempFolder + '/' + job.jobId + const jobFolderPath = this.getStoragePath() + '/' + job.jobId const fullAlgoPath = jobFolderPath + '/data/transformations/algorithm' const configLogPath = jobFolderPath + '/data/logs/configuration.log' @@ -2491,10 +2629,7 @@ export class C2DEngineDocker extends C2DEngine { "Writing algocustom data to '/data/inputs/algoCustomData.json'\n" ) const customdataPath = - this.getC2DConfig().tempFolder + - '/' + - job.jobId + - '/data/inputs/algoCustomData.json' + this.getStoragePath() + '/' + job.jobId + '/data/inputs/algoCustomData.json' writeFileSync(customdataPath, JSON.stringify(job.algorithm.algocustomdata ?? {})) let storage = null @@ -2748,7 +2883,7 @@ export class C2DEngineDocker extends C2DEngine { if (existsSync(destination)) { // now, upload it to the container - const container = await this.docker.getContainer(job.jobId + '-algoritm') + const container = this.docker.getContainer(job.jobId + '-algoritm') try { // await container2.putArchive(destination, { @@ -2795,7 +2930,7 @@ export class C2DEngineDocker extends C2DEngine { private makeJobFolders(job: DBComputeJob): boolean { try { - const baseFolder = this.getC2DConfig().tempFolder + '/' + job.jobId + const baseFolder = this.getStoragePath() + '/' + job.jobId const dirs = [ baseFolder, baseFolder + '/data', @@ -2810,6 +2945,8 @@ export class C2DEngineDocker extends C2DEngine { if (!existsSync(dir)) { mkdirSync(dir, { recursive: true }) } + // update directory permissions to allow read/write from job containers + chmodSync(dir, 0o777) } return true } catch (e) { @@ -2834,7 +2971,7 @@ export class C2DEngineDocker extends C2DEngine { } // delete output folders - await this.deleteOutputFolder(job) + this.deleteOutputFolder(job) // delete the job await this.db.deleteJob(job.jobId) return true @@ -2843,6 +2980,227 @@ export class C2DEngineDocker extends C2DEngine { } return false } + + private getValidBuildDurationSeconds(job: DBComputeJob): number { + const startRaw = job.buildStartTimestamp + const stopRaw = job.buildStopTimestamp + if (!startRaw || !stopRaw) return 0 + const start = Number.parseFloat(startRaw) + const stop = Number.parseFloat(stopRaw) + if (!Number.isFinite(start) || !Number.isFinite(stop)) return 0 + if (start <= 0) return 0 + if (stop < start) return 0 + return stop - start + } + + private async checkscanDBImage(): Promise { + // 1. Pull the image if it's missing locally + try { + await this.docker.getImage(trivyImage).inspect() + return true + } catch (error) { + if (error.statusCode === 404) { + CORE_LOGGER.info(`Trivy not found. Pulling ${trivyImage}...`) + const stream = await this.docker.pull(trivyImage) + + // We must wrap the pull stream in a promise to wait for completion + await new Promise((resolve, reject) => { + this.docker.modem.followProgress(stream, (err, res) => + err ? reject(err) : resolve(res) + ) + }) + + CORE_LOGGER.info('Pull complete.') + return true + } else { + CORE_LOGGER.error(`Unable to pull ${trivyImage}: ${error.message}`) + return true + } + } + } + + private async scanDBUpdate(): Promise { + CORE_LOGGER.info('Starting Trivy database refresh cron') + const hasImage = await this.checkscanDBImage() + if (!hasImage) { + // we cannot update without image + return + } + const updater = await this.docker.createContainer({ + Image: trivyImage, + Cmd: ['image', '--download-db-only'], // Only refreshes the cache + HostConfig: { + Binds: [`${this.trivyCachePath}:/root/.cache/trivy`] + } + }) + + await updater.start() + await updater.wait() + await updater.remove() + CORE_LOGGER.info('Trivy database refreshed.') + } + + private async scanImage(imageName: string) { + if (!imageName || !imageName.trim()) return null + const hasImage = await this.checkscanDBImage() + if (!hasImage) { + // we cannot update without image + return + } + CORE_LOGGER.debug(`Starting vulnerability check for ${imageName}`) + const container = await this.docker.createContainer({ + Image: trivyImage, + Cmd: [ + 'image', + '--format', + 'json', + '--quiet', + '--no-progress', + '--skip-db-update', + '--severity', + 'CRITICAL,HIGH', + imageName + ], + HostConfig: { + Binds: [ + '/var/run/docker.sock:/var/run/docker.sock', // To see local images + `${this.trivyCachePath}:/root/.cache/trivy` // THE CACHE BIND + ] + } + }) + + await container.start() + + // Wait for completion, then parse from *demuxed stdout* to avoid corrupt JSON + // due to Docker multiplexed log framing. + const logsStream = await container.logs({ + follow: true, + stdout: true, + stderr: true + }) + + const outStream = new PassThrough() + const errStream = new PassThrough() + outStream.resume() + errStream.resume() + + const rawChunks: Buffer[] = [] + outStream.on('data', (chunk) => { + rawChunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)) + }) + + container.modem.demuxStream(logsStream, outStream, errStream) + + const logsDrained = new Promise((resolve, reject) => { + const done = () => resolve() + logsStream.once('end', done) + logsStream.once('close', done) + logsStream.once('error', reject) + }) + + await container.wait() + // Wait for the docker log stream to finish producing data. + await logsDrained + + await container.remove() + CORE_LOGGER.debug(`Vulnerability check for ${imageName} finished`) + + try { + const rawData = Buffer.concat(rawChunks).toString('utf8') + // Trivy's `--format json` output is a JSON object (it includes `SchemaVersion`). + // Prefer extracting the JSON object only; do not attempt array parsing since + // Trivy help/usage output may include `[` tokens (e.g. "[flags]") that are not JSON. + const firstBrace = rawData.indexOf('{') + const lastBrace = rawData.lastIndexOf('}') + + if (firstBrace !== -1 && lastBrace !== -1 && lastBrace > firstBrace) { + const jsonText = rawData.slice(firstBrace, lastBrace + 1).trim() + if (!jsonText.includes('"SchemaVersion"')) { + CORE_LOGGER.error( + 'Trivy output did not contain SchemaVersion in extracted JSON. Truncated output: ' + + rawData.slice(0, 500) + ) + return null + } + return JSON.parse(jsonText) + } + + CORE_LOGGER.error( + `Failed to locate JSON in Trivy output. Truncated output: ${rawData.slice( + 0, + 1000 + )}` + ) + return null + } catch (e) { + CORE_LOGGER.error('Failed to parse Trivy output: ' + e.message) + return null + } + } + + private async checkImageVulnerability(imageName: string) { + const report = await this.scanImage(imageName) + if (!report) { + // + return { vulnerable: false, summary: 'failed to scan' } + } + // Results is an array (one entry per OS package manager / language) + const allVulnerabilities = report.Results.flatMap((r: any) => r.Vulnerabilities || []) + + const severityRank = (sev: string) => { + switch (sev) { + case 'CRITICAL': + return 3 + case 'HIGH': + return 2 + default: + return 1 + } + } + + const summary = { + total: allVulnerabilities.length, + critical: allVulnerabilities.filter((v: any) => v.Severity === 'CRITICAL').length, + high: allVulnerabilities.filter((v: any) => v.Severity === 'HIGH').length, + list: (() => { + // Present the most important vulnerabilities first. + const sorted = [...allVulnerabilities].sort((a: any, b: any) => { + const diff = severityRank(b.Severity) - severityRank(a.Severity) + if (diff !== 0) return diff + return String(a.VulnerabilityID || '').localeCompare( + String(b.VulnerabilityID || '') + ) + }) + + const list: Array<{ + severity: string + id: string + package: string + title: string + }> = [] + + for (const v of sorted) { + list.push({ + severity: v.Severity, + id: v.VulnerabilityID, + package: v.PkgName, + title: v.Title || 'No description' + }) + } + + return list + })() + } + + if (summary.critical > 0) { + return { + vulnerable: true, + summary + } + } + + return { vulnerable: false, summary } + } } // this uses the docker engine, but exposes only one env, the free one diff --git a/src/components/c2d/compute_engines.ts b/src/components/c2d/compute_engines.ts index ca3ca55d0..6402d1dc7 100644 --- a/src/components/c2d/compute_engines.ts +++ b/src/components/c2d/compute_engines.ts @@ -1,4 +1,8 @@ -import { C2DClusterType, ComputeEnvironment } from '../../@types/C2D/C2D.js' +import { + C2DClusterInfo, + C2DClusterType, + ComputeEnvironment +} from '../../@types/C2D/C2D.js' import { C2DEngine } from './compute_engine_base.js' import { C2DEngineDocker } from './compute_engine_docker.js' import { OceanNodeConfig } from '../../@types/OceanNode.js' @@ -15,11 +19,10 @@ export class C2DEngines { escrow: Escrow, keyManager: KeyManager ) { - // let's see what engines do we have and initialize them one by one - // for docker, we need to add the "free" - - // TO DO - check if we have multiple config.c2dClusters with the same host - // if yes, do not create multiple engines + const crons = { + imageCleanup: false, + scanDBUpdate: false + } if (config && config.c2dClusters) { this.engines = [] let cpuOffset = 0 @@ -33,9 +36,33 @@ export class C2DEngines { `Cannot create engine ${cluster.connection.hash}.\r\nConfig.claimDurationTimeout is not high enough to claim at least ${limit} times. Either decrease environment.paymentClaimInterval${cluster.connection.paymentClaimInterval} or increase config.claimDurationTimeout(${claimDurationTimeout})` ) } else { + const cfg = JSON.parse(JSON.stringify(cluster)) as C2DClusterInfo + // make sure that crons are running only on one docker engine + if (crons.imageCleanup) { + // already running, set cron to null for this engine + cfg.connection.imageCleanupInterval = null + } else { + // not running yet, set the defaults + cfg.connection.imageCleanupInterval = + cfg.connection.imageCleanupInterval || 86400 // 24 hours + crons.imageCleanup = true + } + if (crons.scanDBUpdate) { + cfg.connection.scanImageDBUpdateInterval = null + } else { + if (cfg.connection.scanImages) { + // set the defaults + cfg.connection.scanImageDBUpdateInterval = + cfg.connection.scanImageDBUpdateInterval || 43200 // 12 hours + crons.scanDBUpdate = true + } else { + // image scanning disabled for this engine + cfg.connection.scanImageDBUpdateInterval = null + } + } this.engines.push( new C2DEngineDocker( - cluster, + cfg, db, escrow, keyManager, diff --git a/src/components/core/handler/getJobs.ts b/src/components/core/handler/getJobs.ts index a72a25add..21cdf18ed 100644 --- a/src/components/core/handler/getJobs.ts +++ b/src/components/core/handler/getJobs.ts @@ -30,7 +30,9 @@ export class GetJobsHandler extends CommandHandler { const jobs = await c2d.getJobs( task.environments, task.fromTimestamp, - task.consumerAddrs + task.consumerAddrs, + undefined, + task.runningJobs ) const sanitizedJobs = jobs.map((job) => { if (job.algorithm) { diff --git a/src/components/core/utils/statusHandler.ts b/src/components/core/utils/statusHandler.ts index 2b7d73c9c..dca790bce 100644 --- a/src/components/core/utils/statusHandler.ts +++ b/src/components/core/utils/statusHandler.ts @@ -14,6 +14,7 @@ import { typesenseSchemas } from '../../database/TypesenseSchemas.js' import { SupportedNetwork } from '../../../@types/blockchain.js' import { getAdminAddresses } from '../../../utils/auth.js' import HumanHasher from 'humanhash' +import { getPackageVersion } from '../../../utils/version.js' function getSupportedStorageTypes(config: OceanNodeConfig): StorageTypes { return { @@ -126,7 +127,7 @@ export async function status( publicKey: publicKeyHex, friendlyName: new HumanHasher().humanize(publicKeyHex), address: oceanNode.getKeyManager().getEthAddress(), - version: process.env.npm_package_version, + version: getPackageVersion(), http: config.hasHttp, p2p: config.hasP2P, provider: [], diff --git a/src/components/database/C2DDatabase.ts b/src/components/database/C2DDatabase.ts index 87146e576..2fdaedbdc 100755 --- a/src/components/database/C2DDatabase.ts +++ b/src/components/database/C2DDatabase.ts @@ -84,9 +84,16 @@ export class C2DDatabase extends AbstractDatabase { environments?: string[], fromTimestamp?: string, consumerAddrs?: string[], - status?: C2DStatusNumber + status?: C2DStatusNumber, + runningJobs?: boolean ): Promise { - return await this.provider.getJobs(environments, fromTimestamp, consumerAddrs, status) + return await this.provider.getJobs( + environments, + fromTimestamp, + consumerAddrs, + status, + runningJobs + ) } async getJobsByStatus( diff --git a/src/components/database/sqliteCompute.ts b/src/components/database/sqliteCompute.ts index d0596617e..0fa823608 100644 --- a/src/components/database/sqliteCompute.ts +++ b/src/components/database/sqliteCompute.ts @@ -48,7 +48,9 @@ function getInternalStructure(job: DBComputeJob): any { algoDuration: job.algoDuration, queueMaxWaitTime: job.queueMaxWaitTime, output: job.output, - jobIdHash: job.jobIdHash + jobIdHash: job.jobIdHash, + buildStartTimestamp: job.buildStartTimestamp, + buildStopTimestamp: job.buildStopTimestamp } return internalBlob } @@ -445,7 +447,8 @@ export class SQLiteCompute implements ComputeDatabaseProvider { environments?: string[], fromTimestamp?: string, consumerAddrs?: string[], - status?: C2DStatusNumber + status?: C2DStatusNumber, + runningJobs?: boolean ): Promise { let selectSQL = `SELECT * FROM ${this.schema.name}` @@ -458,9 +461,22 @@ export class SQLiteCompute implements ComputeDatabaseProvider { params.push(...environments) } - if (fromTimestamp) { - conditions.push(`dateFinished >= ?`) - params.push(fromTimestamp) + if (runningJobs) { + conditions.push(`status = ?`) + params.push(C2DStatusNumber.RunningAlgorithm.toString()) + if (fromTimestamp) { + conditions.push(`dateCreated >= ?`) + params.push(fromTimestamp) + } + } else { + if (fromTimestamp) { + conditions.push(`dateFinished >= ?`) + params.push(fromTimestamp) + } + if (status) { + conditions.push(`status = ?`) + params.push(status.toString()) + } } if (consumerAddrs && consumerAddrs.length > 0) { diff --git a/src/test/integration/compute.test.ts b/src/test/integration/compute.test.ts index 3d29c96bb..07cdc3134 100644 --- a/src/test/integration/compute.test.ts +++ b/src/test/integration/compute.test.ts @@ -868,7 +868,7 @@ describe('Compute', () => { }) it('should start a compute job with maxed resources', async function () { - this.timeout(130_000) // waitForAllJobsToFinish can take up to 120s + this.timeout(180_000) // waitForAllJobsToFinish can take up to 180s await waitForAllJobsToFinish(oceanNode) let balance = await paymentTokenContract.balanceOf(await consumerAccount.getAddress()) if (BigInt(balance.toString()) === BigInt(0)) { @@ -3167,19 +3167,5 @@ describe('Compute Access Restrictions', () => { ) } }) - - it('should start payment claim timer on engine start', function () { - // Verify timer methods exist - // Timer might be null if not started yet, or a NodeJS.Timeout if started - // We can't easily test the timer directly, but we can verify the method exists - assert( - typeof (dockerEngine as any).startPaymentTimer === 'function', - 'startPaymentTimer method should exist' - ) - assert( - typeof (dockerEngine as any).claimPayments === 'function', - 'claimPayments method should exist' - ) - }) }) }) diff --git a/src/test/integration/getJobs.test.ts b/src/test/integration/getJobs.test.ts index eeae02ec5..3311ec7f9 100644 --- a/src/test/integration/getJobs.test.ts +++ b/src/test/integration/getJobs.test.ts @@ -60,7 +60,9 @@ function buildJob(overrides: Partial = {}): DBComputeJob { payment: overrides.payment, additionalViewers: overrides.additionalViewers || [], algoDuration: overrides.algoDuration || 0, - queueMaxWaitTime: overrides.queueMaxWaitTime || 0 + queueMaxWaitTime: overrides.queueMaxWaitTime || 0, + buildStartTimestamp: overrides.buildStartTimestamp || '0', + buildStopTimestamp: overrides.buildStopTimestamp || '0' } } diff --git a/src/test/unit/buildImage.test.ts b/src/test/unit/buildImage.test.ts new file mode 100644 index 000000000..caafbc234 --- /dev/null +++ b/src/test/unit/buildImage.test.ts @@ -0,0 +1,157 @@ +import { expect } from 'chai' +import sinon from 'sinon' +import { mkdirSync } from 'fs' +import os from 'os' +import path from 'path' +import { Readable } from 'stream' +import { C2DStatusNumber } from '../../@types/C2D/C2D.js' +import type { DBComputeJob } from '../../@types/C2D/C2D.js' + +function ensureTestEnv() { + // Several runtime modules validate env on import; provide safe defaults for unit tests. + if (!process.env.PRIVATE_KEY) { + process.env.PRIVATE_KEY = `0x${'11'.repeat(32)}` + } +} + +async function makeEngine(opts: { tempFolder: string }) { + ensureTestEnv() + const { C2DEngineDocker } = + await import('../../components/c2d/compute_engine_docker.js') + const db = { + updateJob: sinon.stub().resolves(), + // buildImage() doesn't call getJobs*; keep minimal surface + getRunningJobs: sinon.stub().resolves([]), + getJobsByStatus: sinon.stub().resolves([]) + } as any + + const clusterConfig = { + type: 2, + hash: 'test-hash', + tempFolder: opts.tempFolder, + connection: { + // keep constructor happy + imageRetentionDays: 1, + imageCleanupInterval: 999999, + paymentClaimInterval: 999999 + } + } as any + + const engine = new C2DEngineDocker(clusterConfig, db, {} as any, {} as any, {} as any) + + // prevent side-effects during unit tests + ;(engine as any).cleanupJob = sinon.stub().resolves() + ;(engine as any).updateImageUsage = sinon.stub().resolves() + + return { engine, db } +} + +function makeJob(base: Partial = {}): DBComputeJob { + return { + jobId: 'job-123', + owner: '0x0', + environment: 'env-1', + dateCreated: String(Date.now() / 1000), + dateFinished: null as any, + clusterHash: 'test-hash', + isFree: false, + isRunning: true, + isStarted: false, + stopRequested: false, + status: C2DStatusNumber.BuildImage, + statusText: 'BuildImage', + resources: [ + { id: 'cpu', amount: 1 }, + { id: 'ram', amount: 1 }, + { id: 'disk', amount: 1 } + ], + maxJobDuration: 60, + queueMaxWaitTime: 0, + // timestamps + algoStartTimestamp: '0', + algoStopTimestamp: '0', + buildStartTimestamp: '0', + buildStopTimestamp: '0', + // algorithm/container + algorithm: { + did: 'did:op:algo', + serviceIndex: 0, + meta: { + container: { + image: 'dummy', + tag: 'latest', + entrypoint: 'node', + checksum: '0x0', + dockerfile: 'FROM alpine:3.18\nRUN echo hi\n' + } + } + } as any, + input: [] as any, + output: '' as any, + containerImage: 'ocean-node-test:job-123', + algoDuration: 0, + encryptedDockerRegistryAuth: undefined, + payment: null as any, + additionalViewers: [], + logs: null as any, + results: null as any, + jobIdHash: '1', + ...base + } as DBComputeJob +} + +describe('C2DEngineDocker.buildImage', () => { + afterEach(() => { + sinon.restore() + }) + + it('marks build as failed if image is missing after build completes', async () => { + const tempFolder = path.join(os.tmpdir(), 'ocean-node-buildimage-test') + const { engine, db } = await makeEngine({ tempFolder }) + + const job = makeJob() + mkdirSync(path.join(tempFolder, job.jobId, 'data', 'logs'), { recursive: true }) + + const buildStream = new Readable({ read() {} }) + ;(engine as any).docker = { + buildImage: sinon.stub().resolves(buildStream), + getImage: sinon.stub().returns({ + inspect: sinon.stub().rejects(new Error('no such image')) + }) + } + + const p = (engine as any).buildImage(job, null) + await new Promise((resolve) => setImmediate(resolve)) + buildStream.emit('end') + await p + + expect(db.updateJob.called).to.equal(true) + const lastUpdate = db.updateJob.lastCall.args[0] as DBComputeJob + expect(lastUpdate.status).to.equal(C2DStatusNumber.BuildImageFailed) + }) + + it('only logs success when image exists', async () => { + const tempFolder = path.join(os.tmpdir(), 'ocean-node-buildimage-test-success') + const { engine, db } = await makeEngine({ tempFolder }) + + const job = makeJob({ containerImage: 'ocean-node-test:job-123-success' }) + mkdirSync(path.join(tempFolder, job.jobId, 'data', 'logs'), { recursive: true }) + + const buildStream = new Readable({ read() {} }) + ;(engine as any).docker = { + buildImage: sinon.stub().resolves(buildStream), + getImage: sinon.stub().returns({ + inspect: sinon.stub().resolves({}) + }) + } + + const p = (engine as any).buildImage(job, null) + await new Promise((resolve) => setImmediate(resolve)) + buildStream.emit('end') + await p + + const lastUpdate = db.updateJob.lastCall.args[0] as DBComputeJob + expect(lastUpdate.status).to.equal(C2DStatusNumber.ConfiguringVolumes) + expect(Number.parseFloat(lastUpdate.buildStopTimestamp)).to.be.greaterThan(0) + }) +}) diff --git a/src/utils/blockchain.ts b/src/utils/blockchain.ts index 2c80161c8..0bf9458f7 100644 --- a/src/utils/blockchain.ts +++ b/src/utils/blockchain.ts @@ -3,7 +3,6 @@ import { ethers, Signer, Contract, - JsonRpcApiProvider, JsonRpcProvider, FallbackProvider, isAddress, @@ -15,18 +14,12 @@ import { getConfiguration } from './config.js' import { CORE_LOGGER } from './logging/common.js' import { ConnectionStatus } from '../@types/blockchain.js' import { ValidateChainId } from '../@types/commands.js' -// import { KNOWN_CONFIDENTIAL_EVMS } from '../utils/address.js' -import { OceanNodeConfig } from '../@types/OceanNode.js' import { KeyManager } from '../components/KeyManager/index.js' export class Blockchain { - private config?: OceanNodeConfig // Optional for new constructor - private static signers: Map = new Map() - private static providers: Map = new Map() private keyManager: KeyManager private signer: Signer private provider: FallbackProvider - private providers: JsonRpcProvider[] = [] private chainId: number private knownRPCs: string[] = [] @@ -65,24 +58,44 @@ export class Blockchain { public async getProvider(force: boolean = false): Promise { if (!this.provider) { - for (const rpc of this.knownRPCs) { + const configs: { + provider: JsonRpcProvider + priority: number + stallTimeout: number + }[] = [] + + const PRIMARY_RPC_TIMEOUT = 3000 + const FALLBACK_RPC_TIMEOUT = 1500 + for (let i = 0; i < this.knownRPCs.length; i++) { + const rpc = this.knownRPCs[i] const rpcProvider = new JsonRpcProvider(rpc) - // filter wrong chains or broken RPCs if (!force) { try { const { chainId } = await rpcProvider.getNetwork() if (chainId.toString() === this.chainId.toString()) { - this.providers.push(rpcProvider) - break + // primary RPC gets lowest priority = is first to be called + configs.push({ + provider: rpcProvider, + priority: i + 1, + stallTimeout: i === 0 ? PRIMARY_RPC_TIMEOUT : FALLBACK_RPC_TIMEOUT + }) } } catch (error) { CORE_LOGGER.error(`Error getting network for RPC ${rpc}: ${error}`) } } else { - this.providers.push(new JsonRpcProvider(rpc)) + configs.push({ + provider: rpcProvider, + priority: i + 1, + stallTimeout: i === 0 ? PRIMARY_RPC_TIMEOUT : FALLBACK_RPC_TIMEOUT + }) } } - this.provider = new FallbackProvider(this.providers) + // quorum=1: accept the first response to avoid calls to all configured rpcs + this.provider = + configs.length > 0 + ? new FallbackProvider(configs, undefined, { quorum: 1 }) + : new FallbackProvider([]) } return this.provider } diff --git a/src/utils/config/builder.ts b/src/utils/config/builder.ts index 23a0f5218..7a14e8cee 100644 --- a/src/utils/config/builder.ts +++ b/src/utils/config/builder.ts @@ -159,7 +159,7 @@ export function buildC2DClusters( connection: dockerC2d, hash, type: C2DClusterType.DOCKER, - tempFolder: './c2d_storage/' + hash + tempFolder: './c2d_storage/' // this is the base folder, each engine creates it's own subfolder }) count += 1 } diff --git a/src/utils/config/schemas.ts b/src/utils/config/schemas.ts index 07104524e..c60703ce2 100644 --- a/src/utils/config/schemas.ts +++ b/src/utils/config/schemas.ts @@ -153,7 +153,8 @@ export const ComputeEnvironmentFreeOptionsSchema = z.object({ .nullable() .optional() }) - .optional() + .optional(), + allowImageBuild: z.boolean().optional().default(false) }) export const C2DDockerConfigSchema = z.array( @@ -182,7 +183,10 @@ export const C2DDockerConfigSchema = z.array( fees: z.record(z.string(), z.array(ComputeEnvFeesSchema)).optional(), free: ComputeEnvironmentFreeOptionsSchema.optional(), imageRetentionDays: z.number().int().min(1).optional().default(7), - imageCleanupInterval: z.number().int().min(3600).optional().default(86400) // min 1 hour, default 24 hours + imageCleanupInterval: z.number().int().min(3600).optional().default(86400), // min 1 hour, default 24 hours + scanImages: z.boolean().optional().default(false), + scanImageDBUpdateInterval: z.number().int().min(3600).optional().default(43200), // default 43200 (12 hours) + enableNetwork: z.boolean().optional().default(false) }) .refine( (data) => diff --git a/src/utils/version.ts b/src/utils/version.ts new file mode 100644 index 000000000..470f95abe --- /dev/null +++ b/src/utils/version.ts @@ -0,0 +1,7 @@ +import { createRequire } from 'module' + +const require = createRequire(import.meta.url) + +export function getPackageVersion(): string { + return process.env.npm_package_version ?? require('../../package.json').version +}