Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,17 @@ node_modules
/dist
logs
c2d_storage
.env.local
.env
databases
.env
.env.*
.git
.github
docs
src/test
*.md
*.log
.nyc_output
coverage
docker-compose.yml
elasticsearch-compose.yml
typesense-compose.yml
91 changes: 50 additions & 41 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,44 +1,53 @@
FROM ubuntu:22.04 AS base
RUN apt-get update && apt-get -y install bash curl git wget libatomic1 python3 build-essential
COPY .nvmrc /usr/src/app/
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
ENV NVM_DIR=/usr/local/nvm
RUN mkdir $NVM_DIR
ENV NODE_VERSION=v22.15.0
# Install nvm with node and npm
RUN curl https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash \
&& source $NVM_DIR/nvm.sh \
&& nvm install $NODE_VERSION \
&& nvm alias default $NODE_VERSION \
&& nvm use default
ENV NODE_PATH=$NVM_DIR/$NODE_VERSION/lib/node_modules
ENV PATH=$NVM_DIR/versions/node/$NODE_VERSION/bin:$PATH
ENV IPFS_GATEWAY='https://ipfs.io/'
ENV ARWEAVE_GATEWAY='https://arweave.net/'

FROM base AS builder
COPY package*.json /usr/src/app/
COPY scripts/ /usr/src/app/scripts/
WORKDIR /usr/src/app/
FROM node:22.15.0-bookworm@sha256:a1f1274dadd49738bcd4cf552af43354bb781a7e9e3bc984cfeedc55aba2ddd8 AS builder
RUN apt-get update && apt-get install -y --no-install-recommends \
python3 \
build-essential \
libatomic1 \
git \
&& rm -rf /var/lib/apt/lists/*

WORKDIR /usr/src/app
COPY package*.json ./
COPY scripts/ ./scripts/
RUN npm ci
COPY . .
RUN npm run build && npm prune --omit=dev


FROM node:22.15.0-bookworm-slim@sha256:557e52a0fcb928ee113df7e1fb5d4f60c1341dbda53f55e3d815ca10807efdce AS runner
RUN apt-get update && apt-get install -y --no-install-recommends \
dumb-init \
gosu \
libatomic1 \
&& rm -rf /var/lib/apt/lists/*

ENV NODE_ENV=production \
IPFS_GATEWAY='https://ipfs.io/' \
ARWEAVE_GATEWAY='https://arweave.net/' \
P2P_ipV4BindTcpPort=9000 \
P2P_ipV4BindWsPort=9001 \
P2P_ipV6BindTcpPort=9002 \
P2P_ipV6BindWsPort=9003 \
P2P_ipV4BindWssPort=9005 \
HTTP_API_PORT=8000

EXPOSE 9000 9001 9002 9003 9005 8000

# Docker group membership is handled at runtime in docker-entrypoint.sh by
# inspecting the GID of /var/run/docker.sock, so it works across hosts.

WORKDIR /usr/src/app

COPY --chown=node:node --from=builder /usr/src/app/dist ./dist
COPY --chown=node:node --from=builder /usr/src/app/node_modules ./node_modules
COPY --chown=node:node --from=builder /usr/src/app/schemas ./schemas
COPY --chown=node:node --from=builder /usr/src/app/package.json ./
COPY --chown=node:node --from=builder /usr/src/app/config.json ./

RUN mkdir -p databases c2d_storage logs

COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh

FROM base AS runner
COPY . /usr/src/app
WORKDIR /usr/src/app/
COPY --from=builder /usr/src/app/node_modules/ /usr/src/app/node_modules/
RUN npm run build
ENV P2P_ipV4BindTcpPort=9000
EXPOSE 9000
ENV P2P_ipV4BindWsPort=9001
EXPOSE 9001
ENV P2P_ipV6BindTcpPort=9002
EXPOSE 9002
ENV P2P_ipV6BindWsPort=9003
EXPOSE 9003
ENV P2P_ipV4BindWssPort=9005
EXPOSE 9005
ENV HTTP_API_PORT=8000
EXPOSE 8000
ENV NODE_ENV='production'
CMD ["npm","run","start"]
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
CMD ["node", "--max-old-space-size=28784", "--trace-warnings", "--experimental-specifier-resolution=node", "dist/index.js"]
19 changes: 19 additions & 0 deletions docker-entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/bin/sh
set -e

# Fix ownership of directories that may be mounted as volumes (owned by root).
# Runs as root, then drops to 'node' user via gosu.
chown -R node:node /usr/src/app/databases /usr/src/app/c2d_storage /usr/src/app/logs 2>/dev/null || true

# Add node user to the docker group matching the host's /var/run/docker.sock GID,
# so compute jobs can access the socket regardless of the host's docker GID.
if [ -S /var/run/docker.sock ]; then
SOCK_GID=$(stat -c '%g' /var/run/docker.sock)
if ! getent group "$SOCK_GID" > /dev/null 2>&1; then
groupadd -g "$SOCK_GID" dockerhost 2>/dev/null || true
fi
DOCKER_GROUP=$(getent group "$SOCK_GID" | cut -d: -f1)
usermod -aG "$DOCKER_GROUP" node
fi

exec gosu node dumb-init -- "$@"
3 changes: 3 additions & 0 deletions docs/compute-pricing.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@ This guide explains how to configure your node’s Docker compute environments a
## Overview

- **Configuration**: Define compute environments via the `DOCKER_COMPUTE_ENVIRONMENTS` environment variable (JSON) or via `config.json` under `dockerComputeEnvironments`.
- **Environment**: Is a group of resources, payment and accesslists.
- **Resources**: Each environment declares resources (e.g. `cpu`, `ram`, `disk`, and optionally GPUs). You must declare a `disk` resource.
- **Pricing**: For each chain and fee token, you set a `price` per resource. Cost is computed as **price × amount × duration (in minutes, rounded up)**.
- **Free**: Environments which does not require a payment for the resources, but most likley are very limited in terms of resources available and job duration.
- **Image building**: **Free jobs cannot build images** (Dockerfiles are not allowed). For **paid jobs**, **image build time counts toward billable duration** and also consumes the job’s `maxJobDuration`.

## Pricing Units

Expand Down
49 changes: 6 additions & 43 deletions docs/env.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,49 +34,6 @@ Environmental variables are also tracked in `ENVIRONMENT_VARIABLES` within `src/
- `AUTHORIZED_PUBLISHERS_LIST`: AccessList contract addresses (per chain). If present, Node will only index assets published by the accounts present on the given access lists. Example: `"{ \"8996\": [\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"] }"`
- `VALIDATE_UNSIGNED_DDO`: If set to `false`, the node will not validate unsigned DDOs and will request a signed message with the publisher address, nonce and signature. Default is `true`. Example: `false`
- `JWT_SECRET`: Secret used to sign JWT tokens. Default is `ocean-node-secret`. Example: `"my-secret-jwt-token"`
- `NODE_OWNER_INFO`: Optional JSON object returned by the root endpoint as `ownerInfo`. Example: `"{\"imprint\":{\"legalName\":\"Example Ocean Services GmbH\"},\"termsAndConditions\":{\"url\":\"https://example.com/terms\"},\"anyCustomSection\":{\"foo\":\"bar\"}}"`

## Database

- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"`
- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"`
- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"`
- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000`
- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000`
- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"`
- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5`
- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true`
- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000`
- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true`
- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000`

## Database

- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"`
- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"`
- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"`
- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000`
- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000`
- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"`
- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5`
- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true`
- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000`
- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true`
- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000`

## Database

- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"`
- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"`
- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"`
- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000`
- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000`
- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"`
- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5`
- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true`
- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000`
- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true`
- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000`

## Database

Expand Down Expand Up @@ -179,6 +136,8 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of
[
{
"socketPath": "/var/run/docker.sock",
"scanImages": true,
"enableNetwork": false,
"imageRetentionDays": 7,
"imageCleanupInterval": 86400,
"resources": [
Expand Down Expand Up @@ -237,6 +196,9 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of
#### Configuration Options

- **socketPath**: Path to the Docker socket (e.g., docker.sock).
- **scanImages**: Whether Docker images should be scanned for vulnerabilities using Trivy. If enabled and critical vulnerabilities are found, the C2D job is rejected.
- **scanImageDBUpdateInterval**: How often to update the vulnerability database, in seconds. Default: 43200 (12 hours)
- **enableNetwork**: Whether networking is enabled for algorithm containers. Default: false
- **imageRetentionDays** - how long docker images are kept, in days. Default: 7
- **imageCleanupInterval** - how often to run cleanup for docker images, in seconds. Min: 3600 (1hour), Default: 86400 (24 hours)
- **paymentClaimInterval** - how often to run payment claiming, in seconds. Default: 3600 (1 hour)
Expand All @@ -261,6 +223,7 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of
- **maxJobDuration**: Maximum duration in seconds for a free job.
- **minJobDuration**: Minimum duration in seconds for a free job.
- **maxJobs**: Maximum number of simultaneous free jobs.
- **allowImageBuild**: If building images is allowed on free envs. Default: false
- **access**: Access control configuration for free compute jobs. Works the same as the main `access` field.
- **addresses**: Array of Ethereum addresses allowed to run free compute jobs.
- **accessLists**: Array of AccessList contract addresses for free compute access control.
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "ocean-node",
"version": "2.1.1",
"version": "2.1.2",
"description": "Ocean Node is used to run all core services in the Ocean stack",
"author": "Ocean Protocol Foundation",
"license": "Apache-2.0",
Expand Down
10 changes: 10 additions & 0 deletions src/@types/C2D/C2D.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ export interface ComputeEnvironmentFreeOptions {
maxJobs?: number // maximum number of simultaneous free jobs
resources?: ComputeResource[]
access: ComputeAccessList
allowImageBuild?: boolean
}
export interface ComputeEnvironmentBaseConfig {
description?: string // v1
Expand Down Expand Up @@ -158,6 +159,9 @@ export interface C2DDockerConfig {
imageRetentionDays?: number // Default: 7 days
imageCleanupInterval?: number // Default: 86400 seconds (24 hours)
paymentClaimInterval?: number // Default: 3600 seconds (1 hours)
scanImages?: boolean
scanImageDBUpdateInterval?: number // Default: 12 hours
enableNetwork?: boolean // whether network is enabled for algorithm containers
}

export type ComputeResultType =
Expand Down Expand Up @@ -280,6 +284,8 @@ export interface DBComputeJob extends ComputeJob {
encryptedDockerRegistryAuth?: string
output?: string // this is always an ECIES encrypted string, that decodes to ComputeOutput interface
jobIdHash: string
buildStartTimestamp?: string
buildStopTimestamp?: string
}

// make sure we keep them both in sync
Expand All @@ -299,6 +305,8 @@ export enum C2DStatusNumber {
// eslint-disable-next-line no-unused-vars
BuildImageFailed = 13,
// eslint-disable-next-line no-unused-vars
VulnerableImage = 14,
// eslint-disable-next-line no-unused-vars
ConfiguringVolumes = 20,
// eslint-disable-next-line no-unused-vars
VolumeCreationFailed = 21,
Expand Down Expand Up @@ -347,6 +355,8 @@ export enum C2DStatusText {
// eslint-disable-next-line no-unused-vars
BuildImageFailed = 'Building algorithm image failed',
// eslint-disable-next-line no-unused-vars
VulnerableImage = 'Image has vulnerabilities',
// eslint-disable-next-line no-unused-vars
ConfiguringVolumes = 'Configuring volumes',
// eslint-disable-next-line no-unused-vars
VolumeCreationFailed = 'Volume creation failed',
Expand Down
1 change: 1 addition & 0 deletions src/@types/commands.ts
Original file line number Diff line number Diff line change
Expand Up @@ -312,4 +312,5 @@ export interface GetJobsCommand extends Command {
environments?: string[]
fromTimestamp?: string
consumerAddrs?: string[]
runningJobs?: boolean
}
3 changes: 2 additions & 1 deletion src/components/Indexer/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import { getDatabase, isReachableConnection } from '../../utils/database.js'
import { sleep } from '../../utils/util.js'
import { isReindexingNeeded } from './version.js'
import { DB_EVENTS, ES_CONNECTION_EVENTS } from '../database/ElasticsearchConfigHelper.js'
import { getPackageVersion } from '../../utils/version.js'

/**
* Event emitter for DDO (Data Descriptor Object) events
Expand Down Expand Up @@ -535,7 +536,7 @@ export class OceanIndexer {
* Checks if reindexing is needed and triggers it for all chains
*/
public async checkAndTriggerReindexing(): Promise<void> {
const currentVersion = process.env.npm_package_version
const currentVersion = getPackageVersion()
const dbActive = this.getDatabase()
if (!dbActive || !(await isReachableConnection(dbActive.getConfig().url))) {
INDEXER_LOGGER.error(`Giving up reindexing. DB is not online!`)
Expand Down
15 changes: 12 additions & 3 deletions src/components/P2P/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -331,20 +331,23 @@ export class OceanP2P extends EventEmitter {
`/ip6/${config.p2pConfig.ipV6BindAddress}/tcp/${config.p2pConfig.ipV6BindWsPort}/ws`
)
}
const listenAddrs = config.p2pConfig.enableCircuitRelayClient
? [...bindInterfaces, '/p2p-circuit']
: bindInterfaces
let addresses = {}
if (
config.p2pConfig.announceAddresses &&
config.p2pConfig.announceAddresses.length > 0
) {
addresses = {
listen: bindInterfaces,
listen: listenAddrs,
announceFilter: (multiaddrs: any[]) =>
multiaddrs.filter((m) => this.shouldAnnounce(m)),
appendAnnounce: config.p2pConfig.announceAddresses
}
} else {
addresses = {
listen: bindInterfaces,
listen: listenAddrs,
announceFilter: (multiaddrs: any[]) =>
multiaddrs.filter((m) => this.shouldAnnounce(m))
}
Expand Down Expand Up @@ -395,7 +398,12 @@ export class OceanP2P extends EventEmitter {
// eslint-disable-next-line no-constant-condition, no-self-compare
if (config.p2pConfig.enableCircuitRelayServer) {
P2P_LOGGER.info('Enabling Circuit Relay Server')
servicesConfig = { ...servicesConfig, ...{ circuitRelay: circuitRelayServer() } }
servicesConfig = {
...servicesConfig,
...{
circuitRelay: circuitRelayServer({ reservations: { maxReservations: 2 } })
}
}
}
// eslint-disable-next-line no-constant-condition, no-self-compare
if (config.p2pConfig.upnp) {
Expand Down Expand Up @@ -964,6 +972,7 @@ export class OceanP2P extends EventEmitter {
// on timeout the query ends with an abort signal => CodeError: Query aborted
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any)

for await (const value of f) {
peersFound.push(value)
}
Expand Down
5 changes: 3 additions & 2 deletions src/components/c2d/compute_engine_base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -327,8 +327,9 @@ export abstract class C2DEngine {
for (const job of jobs) {
if (job.environment === env.id) {
if (job.queueMaxWaitTime === 0) {
const timeElapsed =
new Date().getTime() / 1000 - Number.parseFloat(job?.algoStartTimestamp)
const timeElapsed = job.buildStartTimestamp
? new Date().getTime() / 1000 - Number.parseFloat(job?.buildStartTimestamp)
: new Date().getTime() / 1000 - Number.parseFloat(job?.algoStartTimestamp)
totalJobs++
maxRunningTime += job.maxJobDuration - timeElapsed
if (job.isFree) {
Expand Down
Loading
Loading