From 7364c68ffb4ef582e3d0013bbe5f5f26b689b1ca Mon Sep 17 00:00:00 2001 From: bohendo Date: Tue, 8 Sep 2020 11:59:06 +0530 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20init=20a=20bunch=20of=20ops?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 168 ++--- Makefile | 136 ++++ modules/contracts/Multisig.sol | 6 +- modules/contracts/shared/LibChannelCrypto.sol | 19 + modules/contracts/shared/LibCommitment.sol | 18 + modules/engine/package-lock.json | 6 - modules/isomorphic-node/ops/Dockerfile | 10 + modules/isomorphic-node/ops/entry.sh | 63 ++ modules/isomorphic-node/ops/test.sh | 51 ++ modules/isomorphic-node/ops/webpack.config.js | 75 ++ modules/isomorphic-node/package.json | 9 +- modules/isomorphic-node/tsconfig.json | 2 +- ops/builder/Dockerfile | 10 + ops/builder/entry.sh | 19 + ops/database/Dockerfile | 6 + ops/database/backup-lifecycle.json | 21 + ops/database/backup.sh | 72 ++ ops/database/config.json | 14 + ops/database/database.json | 11 + ops/database/db.dockerfile | 6 + ops/database/entry.sh | 106 +++ ops/database/postgresql.conf | 646 ++++++++++++++++++ ops/database/run-backup.sh | 13 + ops/proxy/Dockerfile | 7 + ops/proxy/entry.sh | 140 ++++ ops/proxy/http.cfg | 68 ++ ops/proxy/https.cfg | 84 +++ ops/replace.sh | 24 + ops/search.sh | 19 + ops/setup-ubuntu.sh | 171 +++++ ops/start-indra.sh | 303 ++++++++ ops/upgrade-package.sh | 27 + ops/version-check.sh | 33 + package-lock.json | 2 +- package.json | 2 +- todo | 3 + 36 files changed, 2251 insertions(+), 119 deletions(-) create mode 100644 Makefile create mode 100644 modules/contracts/shared/LibChannelCrypto.sol create mode 100644 modules/contracts/shared/LibCommitment.sol create mode 100644 modules/isomorphic-node/ops/Dockerfile create mode 100644 modules/isomorphic-node/ops/entry.sh create mode 100644 modules/isomorphic-node/ops/test.sh create mode 100644 modules/isomorphic-node/ops/webpack.config.js create mode 100644 ops/builder/Dockerfile create mode 100644 ops/builder/entry.sh create mode 100644 ops/database/Dockerfile create mode 100644 ops/database/backup-lifecycle.json create mode 100644 ops/database/backup.sh create mode 100644 ops/database/config.json create mode 100644 ops/database/database.json create mode 100644 ops/database/db.dockerfile create mode 100644 ops/database/entry.sh create mode 100644 ops/database/postgresql.conf create mode 100644 ops/database/run-backup.sh create mode 100644 ops/proxy/Dockerfile create mode 100644 ops/proxy/entry.sh create mode 100644 ops/proxy/http.cfg create mode 100644 ops/proxy/https.cfg create mode 100644 ops/replace.sh create mode 100644 ops/search.sh create mode 100644 ops/setup-ubuntu.sh create mode 100644 ops/start-indra.sh create mode 100644 ops/upgrade-package.sh create mode 100644 ops/version-check.sh create mode 100644 todo diff --git a/.gitignore b/.gitignore index 67045665d..49828b91e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,104 +1,66 @@ -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* -lerna-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) -report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage -*.lcov - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# TypeScript v1 declaration files -typings/ - -# TypeScript cache -*.tsbuildinfo - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache +# Build Output +**/*.0x +**/.flags +**/_build/** +**/artifacts/** +**/build/** +**/dist/** +**/node_modules/** +**/tsconfig.tsbuildinfo +*.docker-compose.yml +docker-compose.yml +modules/*/package-lock.json + +# Cache +**/.bot-store +**/.buidler +**/.cache +**/.config +**/.connext-store/** +**/.git +**/.jest.cache +**/.local +**/.node-gyp +**/.npm +**/.pyEnv +**/.rpt2_cache +**/.test-store +**/cache/** + +# Data Storage +**/.bot-store/ +**/.chaindata +**/.db-snapshots + +# Docs +modules/client/docs + +# IDEs and editors +**/*.launch +**/*.sublime-workspace +**/*.sw[opq] +**/.c9/ +**/.classpath +**/.idea +**/.project +**/.settings/ +**/.vscode -# Microbundle cache -.rpt2_cache/ -.rts2_cache_cjs/ -.rts2_cache_es/ -.rts2_cache_umd/ - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variables file -.env -.env.test - -# parcel-bundler cache (https://parceljs.org/) -.cache - -# Next.js build output -.next - -# Nuxt.js build / generate output -.nuxt -dist - -# Gatsby files -.cache/ -# Comment in the public line in if your project uses Gatsby and *not* Next.js -# https://nextjs.org/blog/next-9-1#public-directory-support -# public - -# vuepress build output -.vuepress/dist - -# Serverless directories -.serverless/ - -# FuseBox cache -.fusebox/ - -# DynamoDB Local files -.dynamodb/ - -# TernJS port file -.tern-port +# Logs +**/*.*_backup +**/*.log +**/*.patch +cypress/screenshots +cypress/videos + +# OS +**/.bash_history +**/.DS_Store + +# Sensitive Data +**/.env +**/react-app-env.d.ts +.secret + +# Local address book +address-book.json diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..34a61ec73 --- /dev/null +++ b/Makefile @@ -0,0 +1,136 @@ + +SHELL=/bin/bash # shell make will use to execute commands +VPATH=.flags # prerequisite search path +$(shell mkdir -p $(VPATH)) + +######################################## +# Run shell commands to fetch info from environment + +root=$(shell cd "$(shell dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd ) +project=$(shell cat $(root)/package.json | grep '"name":' | head -n 1 | cut -d '"' -f 4) +commit=$(shell git rev-parse HEAD | head -c 8) + +# If Linux, give the container our uid & gid so we know what to reset permissions to. If Mac, the docker-VM takes care of this for us so pass root's id (ie noop) +id=$(shell if [[ "`uname`" == "Darwin" ]]; then echo 0:0; else echo "`id -u`:`id -g`"; fi) + +# Pool of images to pull cached layers from during docker build steps +image_cache=$(shell if [[ -n "${GITHUB_WORKFLOW}" ]]; then echo "--cache-from=$(project)_builder:latest,$(project)_database:latest,$(project)_ethprovider:latest,$(project)_node:latest,$(project)_proxy:latest"; else echo ""; fi) + +interactive=$(shell if [[ -t 0 && -t 2 ]]; then echo "--interactive"; else echo ""; fi) + +######################################## +# Setup more vars + +find_options=-type f -not -path "*/node_modules/*" -not -name "address-book.json" -not -name "*.swp" -not -path "*/.*" -not -path "*/cache/*" -not -path "*/build/*" -not -path "*/dist/*" -not -name "*.log" + +docker_run=docker run --name=$(project)_builder $(interactive) --tty --rm --volume=$(root):/root $(project)_builder $(id) + +startTime=.flags/.startTime +totalTime=.flags/.totalTime +log_start=@echo "=============";echo "[Makefile] => Start building $@"; date "+%s" > $(startTime) +log_finish=@echo $$((`date "+%s"` - `cat $(startTime)`)) > $(totalTime); rm $(startTime); echo "[Makefile] => Finished building $@ in `cat $(totalTime)` seconds";echo "=============";echo + +######################################## +# Build Shortcuts + +default: indra +indra: database proxy node +extras: ethprovider +all: indra extras + +######################################## +# Command & Control Shortcuts + +start: indra + bash ops/start-indra.sh + +start-testnet: ethprovider + INDRA_CHAIN_LOG_LEVEL=1 bash ops/start-testnet.sh + +stop: + bash ops/stop.sh indra + +stop-all: + bash ops/stop.sh indra + bash ops/stop.sh testnet + +clean: stop-all + docker container prune -f + rm -rf .flags/* + rm -rf node_modules/@connext modules/*/node_modules/@connext + rm -rf node_modules/@walletconnect modules/*/node_modules/@walletconnect + rm -rf modules/*/node_modules/*/.git + rm -rf modules/*/node_modules/.bin + rm -rf modules/*/build modules/*/dist + rm -rf modules/*/.*cache* modules/*/node_modules/.cache modules/contracts/cache/*.json + rm -rf modules/*/package-lock.json + +reset: stop-all + docker container prune -f + docker network rm $(project) 2> /dev/null || true + docker secret rm $(project)_database_dev 2> /dev/null || true + docker volume rm $(project)_database_dev 2> /dev/null || true + docker volume rm `docker volume ls -q -f name=$(project)_database_test_*` 2> /dev/null || true + +purge: clean reset + +dls: + @docker service ls + @echo "=====" + @docker container ls -a + +######################################## +# Begin Real Build Rules + +# All rules from here on should only depend on rules that come before it +# ie first no dependencies, last no dependents + +######################################## +# Common Prerequisites + +builder: $(shell find ops/builder) + $(log_start) + docker build --file ops/builder/Dockerfile $(image_cache) --tag $(project)_builder ops/builder + docker tag ${project}_builder ${project}_builder:$(commit) + $(log_finish) && mv -f $(totalTime) .flags/$@ + +node-modules: builder package.json $(shell ls modules/*/package.json) + $(log_start) + $(docker_run) "lerna bootstrap --hoist --no-progress" + $(log_finish) && mv -f $(totalTime) .flags/$@ + +######################################## +# Build Core JS libs & bundles +# Keep prerequisites synced w the @connext/* dependencies of each module's package.json + +isomorphic-node: node-modules $(shell find modules/isomorphic-node $(find_options)) + $(log_start) + $(docker_run) "cd modules/isomorphic-node && npm run build" + $(log_finish) && mv -f $(totalTime) .flags/$@ + +######################################## +# Build Docker Images + +database: $(shell find ops/database $(find_options)) + $(log_start) + docker build --file ops/database/Dockerfile $(image_cache) --tag $(project)_database ops/database + docker tag $(project)_database $(project)_database:$(commit) + $(log_finish) && mv -f $(totalTime) .flags/$@ + +ethprovider: + $(log_start) + @#docker build --file ops/ethprovider/Dockerfile $(image_cache) --tag $(project)_ethprovider ops/ethprovider + @#docker tag $(project)_ethprovider $(project)_ethprovider:$(commit) + $(log_finish) && mv -f $(totalTime) .flags/$@ + +node: isomorphic-node $(shell find modules/isomorphic-node/ops $(find_options)) + $(log_start) + docker build --file modules/isomorphic-node/ops/Dockerfile $(image_cache) --tag $(project)_node modules/isomorphic-node + docker tag $(project)_node $(project)_node:$(commit) + $(log_finish) && mv -f $(totalTime) .flags/$@ + +proxy: $(shell find ops/proxy $(find_options)) + $(log_start) + docker build $(image_cache) --tag $(project)_proxy ops/proxy + docker tag $(project)_proxy $(project)_proxy:$(commit) + $(log_finish) && mv -f $(totalTime) .flags/$@ diff --git a/modules/contracts/Multisig.sol b/modules/contracts/Multisig.sol index 166343228..b4a748f7f 100644 --- a/modules/contracts/Multisig.sol +++ b/modules/contracts/Multisig.sol @@ -2,8 +2,8 @@ pragma solidity ^0.6.4; pragma experimental ABIEncoderV2; -import "../../shared/libs/LibCommitment.sol"; -import "../../shared/libs/LibChannelCrypto.sol"; +import "../shared/LibCommitment.sol"; +import "../shared/LibChannelCrypto.sol"; /// @title Multisig - A channel multisig @@ -62,7 +62,7 @@ contract Multisig is LibCommitment { } - /// @notice Execute an n-of-n signed transaction specified by a (to, value, data, op) tuple + /// @notice Execute an n-of-n signed transaction specified by a (to, value, data, op) tuple /// This transaction is a message CALL /// @param to The destination address of the message call /// @param value The amount of ETH being forwarded in the message call diff --git a/modules/contracts/shared/LibChannelCrypto.sol b/modules/contracts/shared/LibChannelCrypto.sol new file mode 100644 index 000000000..1b3301465 --- /dev/null +++ b/modules/contracts/shared/LibChannelCrypto.sol @@ -0,0 +1,19 @@ + +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.6.4; +pragma experimental ABIEncoderV2; + +import "@openzeppelin/contracts/cryptography/ECDSA.sol"; + +library LibChannelCrypto { + function verifyChannelMessage(bytes32 hash, bytes memory signature) internal pure returns (address) { + bytes32 digest = toChannelSignedMessage(hash); + return ECDSA.recover(digest, signature); + } + + function toChannelSignedMessage(bytes32 hash) internal pure returns (bytes32) { + // 32 is the length in bytes of hash, + // enforced by the type signature above + return keccak256(abi.encodePacked("\x15Indra Signed Message:\n32", hash)); + } +} diff --git a/modules/contracts/shared/LibCommitment.sol b/modules/contracts/shared/LibCommitment.sol new file mode 100644 index 000000000..84400acf1 --- /dev/null +++ b/modules/contracts/shared/LibCommitment.sol @@ -0,0 +1,18 @@ + +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.6.4; +pragma experimental ABIEncoderV2; + + +/// @title LibCommitment +/// @notice Contains stuff that's useful for commitments +contract LibCommitment { + + // An ID for each commitment type + enum CommitmentTarget { + MULTISIG, + SET_STATE, + CANCEL_DISPUTE + } + +} diff --git a/modules/engine/package-lock.json b/modules/engine/package-lock.json index 5c92cdfc7..4c3d70fd5 100644 --- a/modules/engine/package-lock.json +++ b/modules/engine/package-lock.json @@ -1249,12 +1249,6 @@ "is-number": "^7.0.0" } }, - "typescript": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.0.2.tgz", - "integrity": "sha512-e4ERvRV2wb+rRZ/IQeb3jm2VxBsirQLpQhdxplZ2MEzGvDkkMmPglecnNDfSUBivMjP93vRbngYYDQqQ/78bcQ==", - "dev": true - }, "which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", diff --git a/modules/isomorphic-node/ops/Dockerfile b/modules/isomorphic-node/ops/Dockerfile new file mode 100644 index 000000000..ade021e74 --- /dev/null +++ b/modules/isomorphic-node/ops/Dockerfile @@ -0,0 +1,10 @@ +FROM node:12.16.0-alpine3.10 +WORKDIR /root +ENV HOME /root +RUN apk add --update --no-cache bash curl g++ gcc git jq make python +RUN npm config set unsafe-perm true && npm install -g npm@6.14.7 +RUN curl https://raw.githubusercontent.com/vishnubob/wait-for-it/ed77b63706ea721766a62ff22d3a251d8b4a6a30/wait-for-it.sh > /bin/wait-for && chmod +x /bin/wait-for +RUN npm install pg sqlite3 +COPY ops ops +COPY dist dist +ENTRYPOINT ["bash", "ops/entry.sh"] diff --git a/modules/isomorphic-node/ops/entry.sh b/modules/isomorphic-node/ops/entry.sh new file mode 100644 index 000000000..08dbacf34 --- /dev/null +++ b/modules/isomorphic-node/ops/entry.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -e + +if [[ -d "modules/isomorphic-node" ]] +then cd modules/isomorphic-node +fi + +######################################## +# Convert secrets to env vars + +if [[ -z "$INDRA_PG_PASSWORD" && -n "$INDRA_PG_PASSWORD_FILE" ]] +then export INDRA_PG_PASSWORD="`cat $INDRA_PG_PASSWORD_FILE`" +fi + +if [[ -z "$INDRA_MNEMONIC" && -n "$INDRA_MNEMONIC_FILE" ]] +then export INDRA_MNEMONIC="`cat $INDRA_MNEMONIC_FILE`" +fi + +######################################## +# Wait for indra stack dependencies + +function wait_for { + name=$1 + target=$2 + tmp=${target#*://} # remove protocol + host=${tmp%%/*} # remove path if present + if [[ ! "$host" =~ .*:[0-9]{1,5} ]] # no port provided + then + echo "$host has no port, trying to add one.." + if [[ "${target%://*}" == "http" ]] + then host="$host:80" + elif [[ "${target%://*}" == "https" ]] + then host="$host:443" + else echo "Error: missing port for host $host derived from target $target" && exit 1 + fi + fi + echo "Waiting for $name at $target ($host) to wake up..." + wait-for -t 60 $host 2> /dev/null +} + +wait_for "database" "$INDRA_PG_HOST:$INDRA_PG_PORT" + +######################################## +# Launch Node + +if [[ "$NODE_ENV" == "development" ]] +then + echo "Starting indra node in dev-mode" + exec ./node_modules/.bin/nodemon \ + --delay 1 \ + --exitcrash \ + --ignore *.test.ts \ + --ignore *.swp \ + --legacy-watch \ + --polling-interval 1000 \ + --watch src \ + --exec ts-node \ + ./src/main.ts +else + echo "Starting indra node in prod-mode" + exec node --no-deprecation dist/bundle.js +fi + diff --git a/modules/isomorphic-node/ops/test.sh b/modules/isomorphic-node/ops/test.sh new file mode 100644 index 000000000..a013109e9 --- /dev/null +++ b/modules/isomorphic-node/ops/test.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +if [[ -d "modules/node" ]] +then cd modules/node +fi + +cmd="${1:-test}" + +if [[ "$NODE_ENV" == "production" ]] +then opts="--forbid-only" +else opts="--bail" +fi + +if [[ "$cmd" == "watch" ]] +then + echo "Starting node watcher" + + prev_checksum="" + while true + do + checksum="`find src -type f -not -name "*.swp" -exec sha256sum {} \; | sha256sum`" + if [[ "$checksum" != "$prev_checksum" ]] + then + echo + echo "Changes detected!" + + mocha_pids="`ps | grep [m]ocha | awk '{print $1}'`" + if [[ -n "$mocha_pids" ]] + then + echo "Stopping previous test run.." + for pid in $mocha_pids + do kill $pid 2> /dev/null + done + fi + + sleep 2 + echo "Re-running tests..." + checksum="`find src -type f -not -name "*.swp" -exec sha256sum {} \; | sha256sum`" + ts-mocha --bail --check-leaks --exit --timeout 60000 'src/**/*.spec.ts' & + prev_checksum=$checksum + + # If no changes, do nothing + else sleep 2 + fi + done + +else + + echo "Starting node tester" + exec ts-mocha $opts --bail --check-leaks --exit --timeout 60000 'src/**/*.spec.ts' +fi diff --git a/modules/isomorphic-node/ops/webpack.config.js b/modules/isomorphic-node/ops/webpack.config.js new file mode 100644 index 000000000..23539f28e --- /dev/null +++ b/modules/isomorphic-node/ops/webpack.config.js @@ -0,0 +1,75 @@ +const CopyPlugin = require("copy-webpack-plugin"); +const path = require("path"); + +module.exports = { + mode: "development", + target: "node", + + context: path.join(__dirname, ".."), + + entry: path.join(__dirname, "../src/main.ts"), + + externals: { + "pg-native": "commonjs2 pg-native", + "sqlite3": "commonjs2 sqlite3", + }, + + node: { + __filename: false, + __dirname: false, + }, + + resolve: { + mainFields: ["main", "module"], + extensions: [".js", ".wasm", ".ts", ".json"], + symlinks: false, + }, + + output: { + path: path.join(__dirname, "../dist"), + filename: "bundle.js", + }, + + module: { + rules: [ + { + test: /\.js$/, + exclude: /node_modules/, + use: { + loader: "babel-loader", + options: { + presets: ["@babel/env"], + }, + }, + }, + { + test: /\.ts$/, + exclude: /node_modules/, + use: { + loader: "ts-loader", + options: { + configFile: path.join(__dirname, "../tsconfig.json"), + }, + }, + }, + { + test: /\.wasm$/, + type: "javascript/auto", + loaders: ["wasm-loader"], + }, + ], + }, + + plugins: [ + new CopyPlugin({ + patterns: [ + { + from: path.join(__dirname, "../../../node_modules/@connext/pure-evm-wasm/pure-evm_bg.wasm"), + to: path.join(__dirname, "../dist/pure-evm_bg.wasm"), + }, + ], + }), + ], + + stats: { warnings: false }, +}; diff --git a/modules/isomorphic-node/package.json b/modules/isomorphic-node/package.json index 89c9678dc..66e5f2ad6 100755 --- a/modules/isomorphic-node/package.json +++ b/modules/isomorphic-node/package.json @@ -4,13 +4,14 @@ "description": "Typescript + Clean architecture used as scaffolding", "main": "app.ts", "scripts": { + "build": "npm run compile:ts", "compile:ts": "rm -rf build/ && tsc -p .", - "test": "ts-mocha -p ./tsconfig.json src/**/*.spec.ts", - "start:script": "npm run compile:ts && node build/frameworks/script/index.js", - "start:api": "npm run compile:ts && node build/frameworks/api/index.js", + "husky:precommit": "npm run lint && npm run test", "lint": "eslint src --ext ts", "lint:fix": "eslint src --ext ts --fix", - "husky:precommit": "npm run lint && npm run test" + "start:api": "npm run compile:ts && node build/frameworks/api/index.js", + "start:script": "npm run compile:ts && node build/frameworks/script/index.js", + "test": "ts-mocha -p ./tsconfig.json src/**/*.spec.ts" }, "keywords": [ "Typescript", diff --git a/modules/isomorphic-node/tsconfig.json b/modules/isomorphic-node/tsconfig.json index 7222ecaa4..7fc5a0053 100755 --- a/modules/isomorphic-node/tsconfig.json +++ b/modules/isomorphic-node/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "outDir": "build", + "outDir": "dist", "module": "commonjs", "esModuleInterop": true, "allowSyntheticDefaultImports": true, diff --git a/ops/builder/Dockerfile b/ops/builder/Dockerfile new file mode 100644 index 000000000..f4c175495 --- /dev/null +++ b/ops/builder/Dockerfile @@ -0,0 +1,10 @@ +FROM node:12.16.0-alpine3.10 +WORKDIR /root +ENV HOME /root +RUN apk add --update --no-cache bash curl g++ gcc git jq make python +RUN npm config set unsafe-perm true && npm install -g npm@6.14.7 +RUN npm install -g lerna@3.22.1 +RUN curl https://raw.githubusercontent.com/vishnubob/wait-for-it/ed77b63706ea721766a62ff22d3a251d8b4a6a30/wait-for-it.sh > /bin/wait-for && chmod +x /bin/wait-for +COPY entry.sh /entry.sh +ENV PATH="./node_modules/.bin:${PATH}" +ENTRYPOINT ["bash", "/entry.sh"] diff --git a/ops/builder/entry.sh b/ops/builder/entry.sh new file mode 100644 index 000000000..f40a2197d --- /dev/null +++ b/ops/builder/entry.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +this_user="`id -u`:`id -g`" +user="$1" +cmd="$2" + +finish() { + if [[ "$this_user" == "$user" ]] + then echo "Same user, skipping permission fix" + else + echo "Fixing permissions for $user" + find . -not -name "*.swp" -user `id -u` -exec chown -R $user {} \; + fi +} +trap finish EXIT + +echo "Running command as "$this_user" (target user: $user)" +bash -c "$cmd" diff --git a/ops/database/Dockerfile b/ops/database/Dockerfile new file mode 100644 index 000000000..896049c2e --- /dev/null +++ b/ops/database/Dockerfile @@ -0,0 +1,6 @@ +FROM postgres:12.3-alpine +WORKDIR /root +RUN chown -R postgres:postgres /root +RUN apk add --update --no-cache coreutils groff less mailcap py-pip && pip install --upgrade awscli +COPY . . +ENTRYPOINT ["bash", "entry.sh"] diff --git a/ops/database/backup-lifecycle.json b/ops/database/backup-lifecycle.json new file mode 100644 index 000000000..ea6e01068 --- /dev/null +++ b/ops/database/backup-lifecycle.json @@ -0,0 +1,21 @@ +{ + "Rules": [ + { + "ID": "Backups Lifecycle Configuration", + "Status": "Enabled", + "Prefix": "backups/", + "Transitions": [ + { + "Days": 30, + "StorageClass": "STANDARD_IA" + } + ], + "Expiration": { + "Days": 180 + }, + "AbortIncompleteMultipartUpload": { + "DaysAfterInitiation": 2 + } + } + ] +} diff --git a/ops/database/backup.sh b/ops/database/backup.sh new file mode 100644 index 000000000..61865bb95 --- /dev/null +++ b/ops/database/backup.sh @@ -0,0 +1,72 @@ +#!/bin/bash +set -e + +project="indra" +bucket_name=backups.indra.connext.network +lifecycle=backup-lifecycle.json + +dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +timestamp="`date +"%y%m%d-%H%M%S"`" +backup_file=$timestamp.sql +backup_dir=$dir/snapshots +backup_path=$backup_dir/$backup_file +mkdir -p "`dirname $backup_path`" + +echo "Creating database snapshot..." +pg_dump --username=$project $project > $backup_path +echo "Done backing up database, snapshot saved to: $backup_path" + +if [[ -n "$AWS_ACCESS_KEY_ID" || -n "$AWS_SECRET_ACCESS_KEY" ]] +then + + # Create bucket if it doesn't exist + if [[ -z "`aws s3api list-buckets | grep '"Name":' | grep "$bucket_name"`" ]] + then + echo "Creating bucket $bucket_name" + aws s3api create-bucket --bucket $bucket_name + if [[ -f "$lifecycle" ]] + then + echo "Setting bucke's lifecycle config..." + aws s3api put-bucket-lifecycle-configuration \ + --bucket $bucket_name \ + --lifecycle-configuration file://$lifecycle + else echo "Couldn't find lifecycle config file, skipping setup: $lifecycle" + fi + else + echo "AWS S3 bucket $bucket_name already exists" + fi + + echo "Uploading db snapshot to $bucket_name" + aws s3 cp $backup_path s3://$bucket_name/backups/$backup_file --sse AES256 + echo "Done, snapshot has been stored remotely" + +else + echo "No access keys found, couldn't backup to remote storage" +fi + +# Remove old backups +for snapshot in `find $backup_dir -type f | sort` +do + # Safety measure: if a small number of snapshots remain, then stop deleting old ones + if [[ "`find $backup_dir -type f`" -lt "24" ]] + then exit; + fi + yymmdd="`echo $snapshot | cut -d "-" -f 2`" + hhmmss="`echo $snapshot | sed 's/.*-\([0-9]\+\)\..*/\1/'`" + twoDaysAgo="`date --date "2 days ago" "+%y%m%d"`" + oneDayAgo="`date --date "1 day ago" "+%y%m%d"`" + now="`date "+%H%M%S"`" + # $((10#number)) strips leading zeros & prevents octal interpretation + if [[ "$((10#$yymmdd))" -lt "$((10#$twoDaysAgo))" ]] + then + echo "Snapshot more than two days old, deleting: $snapshot" + rm $snapshot + elif [[ "$((10#$yymmdd))" -eq "$((10#$oneDayAgo))" ]] + then + if [[ "$((10#$hmmss))" -lt "$((10#$now))" ]] + then + echo "Snapshot more than 24 hours old, deleting: $snapshot" + rm $snapshot + fi + fi +done diff --git a/ops/database/config.json b/ops/database/config.json new file mode 100644 index 000000000..c2d2c45ec --- /dev/null +++ b/ops/database/config.json @@ -0,0 +1,14 @@ +{ + "dev": { + "driver": "pg", + "database": {"ENV": "POSTGRES_DB"}, + "user": {"ENV": "POSTGRES_USER"}, + "password": {"ENV": "POSTGRES_PASSWORD"} + }, + "prod": { + "driver": "pg", + "database": {"ENV": "POSTGRES_DB"}, + "user": {"ENV": "POSTGRES_USER"}, + "password": {"ENV": "POSTGRES_PASSWORD"} + } +} diff --git a/ops/database/database.json b/ops/database/database.json new file mode 100644 index 000000000..633559cc3 --- /dev/null +++ b/ops/database/database.json @@ -0,0 +1,11 @@ +{ + "test": { + "driver": "pg", + "database": "indra" + }, + + "dev": { + "driver": "pg", + "database": "indra" + } +} diff --git a/ops/database/db.dockerfile b/ops/database/db.dockerfile new file mode 100644 index 000000000..896049c2e --- /dev/null +++ b/ops/database/db.dockerfile @@ -0,0 +1,6 @@ +FROM postgres:12.3-alpine +WORKDIR /root +RUN chown -R postgres:postgres /root +RUN apk add --update --no-cache coreutils groff less mailcap py-pip && pip install --upgrade awscli +COPY . . +ENTRYPOINT ["bash", "entry.sh"] diff --git a/ops/database/entry.sh b/ops/database/entry.sh new file mode 100644 index 000000000..3ee0466a1 --- /dev/null +++ b/ops/database/entry.sh @@ -0,0 +1,106 @@ +#!/bin/bash +set -e + +######################################## +## Setup Env + +# 60 sec/min * 30 min = 1800 +backup_frequency="1800" +mkdir -p snapshots +backup_file="snapshots/`ls snapshots | sort -r | head -n 1`" +readonly_password="${INDRA_ADMIN_TOKEN:-cxt1234}" + +######################################## +## Helper functions + +function log { + echo "[ENTRY] $1" +} + +function unlock { + lock="/var/lib/postgresql/data/postmaster.pid" + sleep 2 + while [[ -f "$lock" ]] + do + mode=${1:-fast} + postmaster="`head -n1 $lock`" + log "Waiting on lock for pid $postmaster to be released..." + if [[ -n "`ps -o pid | grep $postmaster`" ]] + then log "Process $postmaster is running, killing it now.." && kill $postmaster + else log "Process $postmaster is NOT running, removing the lock now.." && rm $lock + fi + sleep 2 + done +} + +# Set an exit trap so that the database will do one final backup before shutting down +function cleanup { + log "Database exiting, creating final snapshot" + bash backup.sh + log "Shutting the database down" + kill "$db_pid" + unlock smart + log "Clean exit." +} + +trap cleanup SIGTERM + +######################################## +## Initialize against an isolated, temporary pg instance + +log "Good morning" +if [[ ! -f "/var/lib/postgresql/data/PG_VERSION" ]] +then isFresh="true" +else isFresh="false" +fi + +# Start temp database & wait until it wakes up +log "Starting temp database for initialization & backup recovery.." +unlock fast +docker-entrypoint.sh postgres & +PID=$! +while ! psql -U $POSTGRES_USER -d $POSTGRES_DB -c "select 1" > /dev/null 2>&1 +do log "Waiting for db to wake up.." && sleep 1 +done +log "Good morning, Postgres!" + +# Is this a fresh database? Should we restore data from a snapshot? +if [[ "$isFresh" == "true" && -f "$backup_file" && "$INDRA_ENV" == "prod" ]] +then + log "Fresh postgres db started w backup present, we'll restore: $backup_file" + psql --username=$POSTGRES_USER $POSTGRES_DB < $backup_file + log "Done restoring db snapshot" +else + log "Not restoring: Database exists ($isFresh) or no snapshots found ($backup_file) or not in prod-mode ($INDRA_ENV)" +fi + +# Create a readonly user +psql --username=$POSTGRES_USER $POSTGRES_DB < Starting backer upper" +while true +do sleep $backup_frequency && bash backup.sh +done & + +# Start database to serve requests from clients +log "===> Starting new database.." +docker-entrypoint.sh postgres & +db_pid=$! +wait "$db_pid" diff --git a/ops/database/postgresql.conf b/ops/database/postgresql.conf new file mode 100644 index 000000000..e0b0846d7 --- /dev/null +++ b/ops/database/postgresql.conf @@ -0,0 +1,646 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, or use "pg_ctl reload". Some +# parameters, which are marked below, require a server shutdown and restart to +# take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - Security and Authentication - + +#authentication_timeout = 1min # 1s-600s +#ssl = off # (change requires restart) +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers + # (change requires restart) +#ssl_prefer_server_ciphers = on # (change requires restart) +#ssl_ecdh_curve = 'prime256v1' # (change requires restart) +#ssl_cert_file = 'server.crt' # (change requires restart) +#ssl_key_file = 'server.key' # (change requires restart) +#ssl_ca_file = '' # (change requires restart) +#ssl_crl_file = '' # (change requires restart) +#password_encryption = on +#db_user_namespace = off +#row_security = on + +# GSSAPI using Kerberos +#krb_server_keyfile = '' +#krb_caseins_users = off + +# - TCP Keepalives - +# see "man 7 tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#maintenance_work_mem = 64MB # min 1MB +#replacement_sort_tuples = 150000 # limits use of replacement selection sort +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#max_stack_depth = 2MB # min 100kB +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # use none to disable dynamic shared memory + # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kB, or -1 for no limit + +# - Kernel Resource Usage - + +#max_files_per_process = 1000 # min 25 + # (change requires restart) +#shared_preload_libraries = '' # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 0 # taken from max_worker_processes +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables + + +#------------------------------------------------------------------------------ +# WRITE AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = minimal # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#max_wal_size = 1GB +#min_wal_size = 80MB +#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Server(s) - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 0 # max number of walsender processes + # (change requires restart) +#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 0 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # number of sync standbys and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#hot_standby = off # "on" allows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#min_parallel_relation_size = 8MB +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off + + +#------------------------------------------------------------------------------ +# ERROR REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = on # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'pg_log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +log_min_duration_statement = 0 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = on +#log_connections = on +#log_disconnections = off +log_duration = on +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +log_statement = 'all' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + + +# - Process Title - + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# RUNTIME STATISTICS +#------------------------------------------------------------------------------ + +# - Query/Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +#track_activity_query_size = 1024 # (change requires restart) +#stats_temp_directory = 'pg_stat_tmp' + + +# - Statistics Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM PARAMETERS +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 0 # min -15, max 3 +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.utf8' # locale for system error message + # strings +lc_monetary = 'en_US.utf8' # locale for monetary formatting +lc_numeric = 'en_US.utf8' # locale for number formatting +lc_time = 'en_US.utf8' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#local_preload_libraries = '' +#session_preload_libraries = '' + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) + + +#------------------------------------------------------------------------------ +# VERSION/PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#default_with_oids = off +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#sql_inheritance = on +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/ops/database/run-backup.sh b/ops/database/run-backup.sh new file mode 100644 index 000000000..134ed1697 --- /dev/null +++ b/ops/database/run-backup.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -e + +project="indra" +service=${project}_database +service_id="`docker service ps -q $service | head -n 1`" +id="`docker inspect --format '{{.Status.ContainerStatus.ContainerID}}' $service_id`" + +if [[ -z "`docker service ps -q $service`" ]] +then echo "Error: expected to see $service running" && exit 1 +fi + +docker exec $id bash backup.sh diff --git a/ops/proxy/Dockerfile b/ops/proxy/Dockerfile new file mode 100644 index 000000000..4dd594aec --- /dev/null +++ b/ops/proxy/Dockerfile @@ -0,0 +1,7 @@ +FROM haproxy:2.1.3-alpine +WORKDIR /root +ENV HOME /root +RUN apk add --update --no-cache bash ca-certificates certbot curl iputils openssl +RUN curl https://raw.githubusercontent.com/vishnubob/wait-for-it/ed77b63706ea721766a62ff22d3a251d8b4a6a30/wait-for-it.sh > /bin/wait-for && chmod +x /bin/wait-for +COPY . . +ENTRYPOINT ["bash", "/root/entry.sh"] diff --git a/ops/proxy/entry.sh b/ops/proxy/entry.sh new file mode 100644 index 000000000..8dc5a2aa9 --- /dev/null +++ b/ops/proxy/entry.sh @@ -0,0 +1,140 @@ +#!/bin/bash + +if [[ "${INDRA_ETH_PROVIDER_URL%%://*}" == "https" ]] +then export INDRA_ETH_PROVIDER_PROTOCOL="ssl" +else export INDRA_ETH_PROVIDER_PROTOCOL="" +fi + +INDRA_ETH_PROVIDER_URL=${INDRA_ETH_PROVIDER_URL#*://} + +if [[ "$INDRA_ETH_PROVIDER_PROTOCOL" == "ssl" ]] +then export INDRA_ETH_PROVIDER_HOST="${INDRA_ETH_PROVIDER_URL%%/*}:443" +else export INDRA_ETH_PROVIDER_HOST="${INDRA_ETH_PROVIDER_URL%%/*}" +fi + +if [[ "$INDRA_ETH_PROVIDER_URL" == *"/"* ]] +then export INDRA_ETH_PROVIDER_PATH="/${INDRA_ETH_PROVIDER_URL#*/}" +else export INDRA_ETH_PROVIDER_PATH="/" +fi + +echo "Proxy container launched in env:" +echo "INDRA_ETH_PROVIDER_HOST=$INDRA_ETH_PROVIDER_HOST" +echo "INDRA_ETH_PROVIDER_PATH=$INDRA_ETH_PROVIDER_PATH" +echo "INDRA_ETH_PROVIDER_PROTOCOL=$INDRA_ETH_PROVIDER_PROTOCOL" +echo "INDRA_DOMAINNAME=$INDRA_DOMAINNAME" +echo "INDRA_EMAIL=$INDRA_EMAIL" +echo "INDRA_ETH_PROVIDER_URL=$INDRA_ETH_PROVIDER_URL" +echo "INDRA_MESSAGING_TCP_URL=$INDRA_MESSAGING_TCP_URL" +echo "INDRA_MESSAGING_WS_URL=$INDRA_MESSAGING_WS_URL" +echo "INDRA_NODE_URL=$INDRA_NODE_URL" + +# Provide a message indicating that we're still waiting for everything to wake up +function loading_msg { + while true # unix.stackexchange.com/a/37762 + do echo -e "HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\nWaiting for proxy to wake up" | nc -lk -p 80 + done > /dev/null +} +loading_msg & +loading_pid="$!" + +######################################## +# Wait for downstream services to wake up +# Define service hostnames & ports we depend on + +echo "waiting for $INDRA_ETH_PROVIDER_HOST..." +wait-for -t 60 $INDRA_ETH_PROVIDER_HOST 2> /dev/null +while ! curl -s $INDRA_ETH_PROVIDER_HOST > /dev/null +do sleep 2 +done + +echo "waiting for $INDRA_MESSAGING_WS_URL..." +wait-for -t 60 $INDRA_MESSAGING_WS_URL 2> /dev/null + +echo "waiting for $INDRA_MESSAGING_TCP_URL..." +wait-for -t 60 $INDRA_MESSAGING_TCP_URL 2> /dev/null + +echo "waiting for $INDRA_NODE_URL..." +wait-for -t 60 $INDRA_NODE_URL 2> /dev/null +while ! curl -s $INDRA_NODE_URL > /dev/null +do sleep 2 +done + +# Kill the loading message server +kill "$loading_pid" && pkill nc + +if [[ -z "$INDRA_DOMAINNAME" ]] +then + cp /etc/ssl/cert.pem ca-certs.pem + echo "Entrypoint finished, executing haproxy in http mode..."; echo + exec haproxy -db -f http.cfg +fi + +######################################## +# Setup SSL Certs + +letsencrypt=/etc/letsencrypt/live +certsdir=$letsencrypt/$INDRA_DOMAINNAME +mkdir -p /etc/haproxy/certs +mkdir -p /var/www/letsencrypt + +if [[ "$INDRA_DOMAINNAME" == "localhost" && ! -f "$certsdir/privkey.pem" ]] +then + echo "Developing locally, generating self-signed certs" + mkdir -p $certsdir + openssl req -x509 -newkey rsa:4096 -keyout $certsdir/privkey.pem -out $certsdir/fullchain.pem -days 365 -nodes -subj '/CN=localhost' +fi + +if [[ ! -f "$certsdir/privkey.pem" ]] +then + echo "Couldn't find certs for $INDRA_DOMAINNAME, using certbot to initialize those now.." + certbot certonly --standalone -m $INDRA_EMAIL --agree-tos --no-eff-email -d $INDRA_DOMAINNAME -n + code=$? + if [[ "$code" -ne 0 ]] + then + echo "certbot exited with code $code, freezing to debug (and so we don't get throttled)" + sleep 9999 # FREEZE! Don't pester eff & get throttled + exit 1; + fi +fi + +echo "Using certs for $INDRA_DOMAINNAME" + +export INDRA_CERTBOT_PORT=31820 + +function copycerts { + if [[ -f $certsdir/fullchain.pem && -f $certsdir/privkey.pem ]] + then cat $certsdir/fullchain.pem $certsdir/privkey.pem > "$INDRA_DOMAINNAME.pem" + elif [[ -f "$certsdir-0001/fullchain.pem" && -f "$certsdir-0001/privkey.pem" ]] + then cat "$certsdir-0001/fullchain.pem" "$certsdir-0001/privkey.pem" > "$INDRA_DOMAINNAME.pem" + else + echo "Couldn't find certs, freezing to debug" + sleep 9999; + exit 1 + fi +} + +# periodically fork off & see if our certs need to be renewed +function renewcerts { + sleep 3 # give proxy a sec to wake up before attempting first renewal + while true + do + echo -n "Preparing to renew certs... " + if [[ -d "$certsdir" ]] + then + echo -n "Found certs to renew for $INDRA_DOMAINNAME... " + certbot renew -n --standalone --http-01-port=$INDRA_CERTBOT_PORT + copycerts + echo "Done!" + fi + sleep 48h + done +} + +renewcerts & + +copycerts + +cp /etc/ssl/cert.pem ca-certs.pem + +echo "Entrypoint finished, executing haproxy in https mode..."; echo +exec haproxy -db -f https.cfg diff --git a/ops/proxy/http.cfg b/ops/proxy/http.cfg new file mode 100644 index 000000000..7a8a15613 --- /dev/null +++ b/ops/proxy/http.cfg @@ -0,0 +1,68 @@ +global + log stdout local0 + maxconn 50000 + tune.ssl.default-dh-param 2048 + +defaults + log global + mode http + option dontlognull + option http-server-close + option httpclose + option httplog + option redispatch + timeout client 300000 # 5 minutes + timeout connect 3000 # 3 seconds + timeout server 300000 # 5 minutes + +frontend public_http + acl ethprovider_path path_beg /api/ethprovider + acl ethprovider_path path_beg /ethprovider + acl messaging_path path_beg /api/messaging + acl url_static path_beg /static /images /img /css + acl url_static path_end .css .gif .html .jpg .js .png + acl webserver path_beg /sockjs-node + bind *:80 + default_backend node + http-response del-header Access-Control-Allow-Headers + http-response del-header Access-Control-Allow-Methods + http-response del-header Access-Control-Allow-Origin + http-response add-header Access-Control-Allow-Headers "Accept, Accept-Encoding, Authorization, Cache-Control, Content-Length, Content-Type, Origin, User-Agent, X-CSRF-Token, X-Requested-With" + http-response add-header Access-Control-Allow-Origin "*" + option forwardfor + use_backend ethprovider if ethprovider_path + use_backend nats_ws if messaging_path + +frontend public_nats_ws + bind *:4221 + default_backend nats_ws + mode tcp + option tcplog + +frontend public_nats_tcp + bind *:4222 + default_backend nats_tcp + mode tcp + option tcplog + +backend ethprovider + http-request add-header Host "$INDRA_ETH_PROVIDER_HOST" + http-request del-header Host + http-request replace-path /api/ethprovider /ethprovider + http-request replace-path /ethprovider "$INDRA_ETH_PROVIDER_PATH" + http-response add-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" + server ethprovider "$INDRA_ETH_PROVIDER_HOST" "$INDRA_ETH_PROVIDER_PROTOCOL" ca-file ca-certs.pem + +backend nats_ws + http-response add-header Access-Control-Allow-Methods "GET, OPTIONS" + server nats "$INDRA_MESSAGING_WS_URL" + +backend nats_tcp + mode tcp + server nats "$INDRA_MESSAGING_TCP_URL" + +backend node + http-request replace-path /api/(.*) /\1 + http-request replace-path /indra/(.*) /\1 + http-response add-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" + server node "$INDRA_NODE_URL" diff --git a/ops/proxy/https.cfg b/ops/proxy/https.cfg new file mode 100644 index 000000000..ced5c48dd --- /dev/null +++ b/ops/proxy/https.cfg @@ -0,0 +1,84 @@ +global + chroot /root + log stdout local0 + maxconn 2048 + ssl-default-bind-ciphers kEECDH+aRSA+AES:kRSA+AES:+AES256:RC4-SHA:!kEDH:!LOW:!EXP:!MD5:!aNULL:!eNULL + ssl-default-bind-options no-sslv3 + tune.ssl.default-dh-param 2048 + +defaults + log global + mode http + option dontlognull + option http-server-close + option httpclose + option httplog + option redispatch + timeout client 300000 # 5 minutes + timeout connect 3000 # 3 seconds + timeout server 300000 # 5 minutes + +frontend public_http + bind *:80 + default_backend letsencrypt_backend + http-request add-header X-Forwarded-Proto: http + option forwardfor + redirect scheme https if !{ ssl_fc } + +frontend public_https + acl ethprovider_path path_beg /api/ethprovider + acl ethprovider_path path_beg /ethprovider + acl letsencrypt-acl path_beg /.well-known/acme-challenge/ + acl messaging_path path_beg /api/messaging + acl url_static path_beg /static /images /img /css + acl url_static path_end .css .gif .html .jpg .js .png + acl webserver path_beg /sockjs-node + bind *:443 ssl crt "/root/$INDRA_DOMAINNAME.pem" + default_backend node + http-request add-header X-Forwarded-Proto: https + http-response del-header Access-Control-Allow-Headers + http-response del-header Access-Control-Allow-Methods + http-response del-header Access-Control-Allow-Origin + http-response add-header Access-Control-Allow-Headers "Accept, Accept-Encoding, Authorization, Cache-Control, Content-Length, Content-Type, Origin, User-Agent, X-CSRF-Token, X-Requested-With" + http-response add-header Access-Control-Allow-Origin "*" + option forwardfor + use_backend ethprovider if ethprovider_path + use_backend letsencrypt_backend if letsencrypt-acl + use_backend nats_ws if messaging_path + +frontend public_nats_ws + bind *:4221 ssl crt "/root/$INDRA_DOMAINNAME.pem" + default_backend nats_ws + mode tcp + option tcplog + +frontend public_nats_tcp + bind *:4222 + default_backend nats_tcp + mode tcp + option tcplog + +backend ethprovider + http-request del-header Host + http-request add-header Host "$INDRA_ETH_PROVIDER_HOST" + http-request replace-path /api/ethprovider /ethprovider + http-request replace-path /ethprovider "$INDRA_ETH_PROVIDER_PATH" + http-response add-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" + server ethprovider "$INDRA_ETH_PROVIDER_HOST" "$INDRA_ETH_PROVIDER_PROTOCOL" ca-file ca-certs.pem + +backend letsencrypt_backend + server letsencrypt "127.0.0.1:$INDRA_CERTBOT_PORT" + +backend nats_ws + http-response add-header Access-Control-Allow-Methods "GET, OPTIONS" + server nats "$INDRA_MESSAGING_WS_URL" + +backend nats_tcp + mode tcp + server nats "$INDRA_MESSAGING_TCP_URL" + +backend node + http-request replace-path /api/(.*) /\1 + http-request replace-path /indra/(.*) /\1 + http-response add-header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" + server node "$INDRA_NODE_URL" diff --git a/ops/replace.sh b/ops/replace.sh new file mode 100644 index 000000000..131ecaad2 --- /dev/null +++ b/ops/replace.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +old="$1" +new="$2" + +if [[ -z "$old" || -z "$new" ]] +then echo "Exactly two args required: bash ops/replace.sh " && exit 1 +fi + +echo "Before:" +bash ops/search.sh "$old" +echo +echo "After:" +bash ops/search.sh "$old" | sed "s|$old|$new|g" | grep --color=always "$new" +echo +echo "Does the above replacement look good? (y/n)" +echo -n "> " +read response +echo + +if [[ "$response" == "y" ]] +then find Makefile ops docs modules/*/src modules/*/src.sol modules/*/src.ts modules/*/ops modules/*/test .github/workflows/* -type f -not -name "*.swp" -exec sed -i "s|$old|$new|g" {} \; +else echo "Goodbye" +fi diff --git a/ops/search.sh b/ops/search.sh new file mode 100644 index 000000000..d9971078e --- /dev/null +++ b/ops/search.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +target="$1"; +shift; + +if [[ -z "$target" ]] +then echo "One arg required: bash ops/search.sh " && exit 1 +fi + +grep "$@" --exclude=*.swp --exclude=*.pdf --color=auto -r "$target" \ + Makefile \ + .github/workflows/* \ + ops \ + docs \ + modules/*/src.ts \ + modules/*/src.sol \ + modules/*/ops \ + modules/*/src \ + modules/*/test diff --git a/ops/setup-ubuntu.sh b/ops/setup-ubuntu.sh new file mode 100644 index 000000000..ce7052866 --- /dev/null +++ b/ops/setup-ubuntu.sh @@ -0,0 +1,171 @@ +#!/bin/bash + +root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." >/dev/null 2>&1 && pwd )" +project="`cat $root/package.json | grep '"name":' | head -n 1 | cut -d '"' -f 4`" + +hostname="$1" +prvkey="${SSH_KEY:-$HOME/.ssh/id_rsa}" +pubkey="${PUB_KEY:-$HOME/.ssh/autodeployer.pub}" +user="ubuntu" + +secret_name="${project}_mnemonic" # name of docker secret to store mnemonic in + +# Sanity checks +if [[ -z "$1" ]] +then echo "Provide the server's hostname or ip address as the first ($1) & only arg ($2)" && exit +fi + +if [[ ! -f "$prvkey" ]] +then echo "Couldn't find the ssh private key: $prvkey" && exit +fi + +# Prepare to load the node's signing key into the server's secret store +echo "Optional: Copy the $secret_name secret to your clipboard then paste it below & hit enter (no echo)" +echo -n "> " +read -s mnemonic +echo + +# If we can login as ubuntu, then the user is already setup +echo "Attempting to login as $user to $hostname" +if ssh -q -i $prvkey $user@$hostname exit 2> /dev/null +then + echo "Login success, skipping user setup" + +# If we can login as root then setup a sudo user & turn off root login +elif ssh -q -i $prvkey root@$hostname exit 2> /dev/null +then + echo "Failed to login, configuring a new $user user." + ssh -i $prvkey root@$hostname "bash -s" <<-EOF + set -e + function createuser { + adduser --gecos "" \$1 <<-EOIF + password + password + EOIF + usermod -aG sudo \$1 + mkdir -v /home/\$1/.ssh + cat /root/.ssh/authorized_keys >> /home/\$1/.ssh/authorized_keys + chown -vR \$1:\$1 /home/\$1 + passwd --delete \$1 + } + + createuser $user + + echo "Turning off password authentication" + sed -i '/PasswordAuthentication/ c\ + PasswordAuthentication no + ' /etc/ssh/sshd_config + + echo "Turning off root login" + sed -i '/PermitRootLogin/ c\ + PermitRootLogin no + ' /etc/ssh/sshd_config + + echo "Setting up password-less sudo access for user" + echo "$user ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/90-cloudimg-ubuntu + + echo "Done with setup as root, rebooting to apply login changes" + shutdown --reboot now + EOF + echo "Waiting for server to wake up again ($?)" + + while ! ssh -q -i $prvkey $user@$hostname exit 2> /dev/null + do echo -n "." && sleep 3 + done + echo " Good morning!" +else + ssh -i $prvkey $user@$hostname exit + echo + echo "Aborting: Can't login as $user or root, idk how to setup this server." + exit 1 +fi + +if [[ -f "$pubkey" ]] +then + echo "Copying $pubkey to remote server..." + echo "scp -i $prvkey $pubkey $user@$hostname:/home/$user/.ssh/another_authorized_key" + scp -i $prvkey $pubkey $user@$hostname:/home/$user/.ssh/another_authorized_key +fi + +echo "Setting up dependencies for $user@$hostname" +ssh -i $prvkey $user@$hostname "sudo -S bash -s" < .ssh/authorized_keys + rm -f .ssh/another_authorized_key .ssh/authorized_keys.backup +fi + +# Remove stale apt cache & lock files +rm -rf /var/lib/apt/lists/* + +# Upgrade Everything without prompts +# https://askubuntu.com/questions/146921/how-do-i-apt-get-y-dist-upgrade-without-a-grub-config-prompt +apt-get update -y +DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade +apt-get autoremove -y + +# Setup firewall +ufw --force reset +ufw allow 22 &&\ +ufw --force enable + +# Install docker dependencies +apt-get install -y apt-transport-https ca-certificates curl jq make software-properties-common + +# Get the docker team's official gpg key +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + +# Add the docker repo & install +add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu \`lsb_release -cs\` stable" +apt-get update -y && apt-get install -y docker-ce + +usermod -aG docker $user +systemctl enable docker + +echo; +advertise_ip=\`ifconfig eth1 | grep 'inet ' | awk '{print \$2;exit}' | sed 's/addr://'\` +if [[ -z "\$advertise_ip" ]] +then advertise_ip=\`ifconfig eth0 | grep 'inet ' | awk '{print \$2;exit}' | sed 's/addr://'\` +fi +docker swarm init "--advertise-addr=\$advertise_ip" || true +sleep 3 +echo; + +# Setup docker secret +if [[ -z "$mnemonic" ]] +then echo "No mnemonic provided, skipping secret creation" +elif [[ -n "\`docker secret ls | grep "$secret_name"\`" ]] +then echo "A secret called $secret_name already exists, skipping secret setup" +else + id="\`echo $mnemonic | tr -d '\n\r' | docker secret create $secret_name -\`" + if [[ "$?" == "0" ]] + then + echo "Successfully loaded mnemonic into secret store" + echo "name=$secret_name id=\$id" + echo + else echo "Something went wrong creating secret called $secret_name" + fi +fi + +# Double-check upgrades +DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade +apt-get autoremove -y + +if [[ ! -d indra ]] +then git clone https://github.com/ConnextProject/indra.git +fi + +chown -R $user:$user . + +echo +echo "Done configuring server, rebooting now.." +echo + +reboot +EOF diff --git a/ops/start-indra.sh b/ops/start-indra.sh new file mode 100644 index 000000000..e6a4f592d --- /dev/null +++ b/ops/start-indra.sh @@ -0,0 +1,303 @@ +#!/usr/bin/env bash +set -e + +root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." >/dev/null 2>&1 && pwd )" +project="`cat $root/package.json | grep '"name":' | head -n 1 | cut -d '"' -f 4`" +registry="`cat $root/package.json | grep '"registry":' | head -n 1 | cut -d '"' -f 4`" + +# turn on swarm mode if it's not already on +docker swarm init 2> /dev/null || true + +# make sure a network for this project has been created +docker network create --attachable --driver overlay $project 2> /dev/null || true + +#################### +# Load env vars + +INDRA_ENV="${INDRA_ENV:-dev}" + +# Load the default env +if [[ -f "${INDRA_ENV}.env" ]] +then source "${INDRA_ENV}.env" +fi + +# Load instance-specific env vars & overrides +if [[ -f ".env" ]] +then source .env +fi + +# log level alias can override default for easy `LOG_LEVEL=5 make start` +INDRA_LOG_LEVEL="${LOG_LEVEL:-$INDRA_LOG_LEVEL}"; + +######################################## +## Docker registry & image version config + +# prod version: if we're on a tagged commit then use the tagged semvar, otherwise use the hash +if [[ "$INDRA_ENV" == "prod" ]] +then + git_tag="`git tag --points-at HEAD | grep "indra-" | head -n 1`" + if [[ -n "$git_tag" ]] + then version="`echo $git_tag | sed 's/indra-//'`" + else version="`git rev-parse HEAD | head -c 8`" + fi +else version="latest" +fi + +# Get images that we aren't building locally +function pull_if_unavailable { + if [[ -z "`docker image ls | grep ${1%:*} | grep ${1#*:}`" ]] + then + if [[ -n "`echo $1 | grep "${project}_"`" ]] + then full_name="${registry%/}/$1" + else full_name="$1" + fi + echo "Can't find image $1 locally, attempting to pull $full_name" + docker pull $full_name + docker tag $full_name $1 + fi +} + +# Initialize new secrets (random if no value is given) +function new_secret { + secret="$2" + if [[ -z "$secret" ]] + then secret=`head -c 32 /dev/urandom | xxd -plain -c 32 | tr -d '\n\r'` + fi + if [[ -z "`docker secret ls -f name=$1 | grep -w $1`" ]] + then + id=`echo "$secret" | tr -d '\n\r' | docker secret create $1 -` + echo "Created secret called $1 with id $id" + fi +} + +echo "Using docker images ${project}_name:${version} " + +#################### +# Misc Config + +builder_image="${project}_builder" + +common="networks: + - '$project' + logging: + driver: 'json-file' + options: + max-size: '100m'" + +#################### +# Proxy config + +proxy_image="${project}_proxy:$version"; +pull_if_unavailable "$proxy_image" + +if [[ -z "$INDRA_DOMAINNAME" ]] +then + public_url="http://localhost:3000" + proxy_ports="ports: + - '3000:80' + - '4221:4221' + - '4222:4222'" +else + public_url="https://localhost:443" + proxy_ports="ports: + - '80:80' + - '443:443' + - '4221:4221' + - '4222:4222'" +fi + +echo "Proxy configured" + +######################################## +## Node config + +node_port="8888" + +if [[ $INDRA_ENV == "prod" ]] +then + node_image_name="${project}_node:$version" + pull_if_unavailable "$node_image_name" + node_image="image: '$node_image_name'" +else + node_image="image: '${project}_builder' + entrypoint: 'bash modules/isomorphic-node/ops/entry.sh' + volumes: + - '$root:/root' + ports: + - '$node_port:$node_port' + - '9229:9229'" +fi + +echo "Node configured" + +######################################## +## Database config + +database_image="${project}_database:$version"; +pull_if_unavailable "$database_image" + +snapshots_dir="$root/.db-snapshots" +mkdir -p $snapshots_dir + +if [[ "$INDRA_ENV" == "prod" ]] +then + database_image="image: '$database_image'" + db_volume="database" + db_secret="${project}_database" + new_secret $db_secret +else + database_image="image: '$database_image' + ports: + - '5432:5432'" + db_volume="database_dev" + db_secret="${project}_database_dev" + new_secret "$db_secret" "$project" +fi + +# database connection settings +pg_db="$project" +pg_host="database" +pg_password_file="/run/secrets/$db_secret" +pg_port="5432" +pg_user="$project" + +echo "Database configured" + +######################################## +# Chain provider config + +# If no chain providers provided, spin up local testnets & use those +if [[ -z "$INDRA_CHAIN_PROVIDERS" ]] +then + mnemonic_secret_name="${project}_mnemonic_dev" + echo 'No $INDRA_CHAIN_PROVIDERS provided, spinning up local testnets & using those.' + eth_mnemonic="candy maple cake sugar pudding cream honey rich smooth crumble sweet treat" + bash ops/save-secret.sh "$mnemonic_secret_name" "$eth_mnemonic" + pull_if_unavailable "${project}_ethprovider:$version" + chain_id_1=1337; chain_id_2=1338; + bash ops/start-testnet.sh $chain_id_1 $chain_id_2 + INDRA_CHAIN_PROVIDERS="`cat $root/.chaindata/providers/${chain_id_1}-${chain_id_2}.json`" + INDRA_CONTRACT_ADDRESSES="`cat $root/.chaindata/addresses/${chain_id_1}-${chain_id_2}.json`" + +# If chain providers are provided, use those +else + mnemonic_secret_name="${project}_mnemonic" + echo "Using chain providers:" $INDRA_CHAIN_PROVIDERS + # Prefer top-level address-book override otherwise default to one in contracts + if [[ -f address-book.json ]] + then INDRA_CONTRACT_ADDRESSES="`cat address-book.json | tr -d ' \n\r'`" + else INDRA_CONTRACT_ADDRESSES="`cat modules/contracts/address-book.json | tr -d ' \n\r'`" + fi +fi + +INDRA_MNEMONIC_FILE="/run/secrets/$mnemonic_secret_name" +ETH_PROVIDER_URL="`echo $INDRA_CHAIN_PROVIDERS | tr -d "'" | jq '.[]' | head -n 1 | tr -d '"'`" + +# TODO: filter out extra contract addresses that we don't have any chain providers for? + +echo "Chain providers configured" + +#################### +# Launch Indra stack + +echo "Launching ${project}" + +rm -rf $root/docker-compose.yml $root/${project}.docker-compose.yml +cat - > $root/docker-compose.yml < " && exit 1 +else echo "Setting package $package to version $version in all modules that depend on it" +fi + +echo +echo "Before:" +grep -r '"'$package'": "' modules/*/package.json package.json +echo + +find modules/*/package.json package.json \ + -type f \ + -exec sed -i -E 's|"'"$package"'": "[a-z0-9.-]+"|"'"$package"'": "'"$version"'"|g' {} \; + +echo "After:" +grep -r '"'$package'": "' modules/*/package.json package.json +echo diff --git a/ops/version-check.sh b/ops/version-check.sh new file mode 100644 index 000000000..065badb98 --- /dev/null +++ b/ops/version-check.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -e + +# Packages that we should never report as being out-of-date: +# - We don't want contract addresses to change so no more solidity-related upgrades +# - Newest react-scripts version breaks daicard, don't use it +do_not_upgrade='solc @openzeppelin/contracts react-scripts @connext/' + +# Format string describing how each line looks +format='{printf("| %-32s|%8s -> %-8s|\n", $1, $3, $4)}' + +# Create the sed command to remove any ignored rows. +# the first non-default delimiter needs to be \escaped if it's the first char +filter_cmd="" +for ignored in $do_not_upgrade +do filter_cmd="$filter_cmd\| $ignored|d;" +done + +echo "==== Module: project root" +npm outdated -D | tail -n +2 | awk '$3 != $4' | awk "$format" | sed "$filter_cmd" +echo + +cd modules +for module in `ls` +do + echo "===== Module: $module" + cd $module + npm outdated | tail -n +2 | awk '$3 != $4' | awk "$format" | sed "$filter_cmd" + echo "-----" + npm outdated -D | tail -n +2 | awk '$3 != $4' | awk "$format" | sed "$filter_cmd" + cd .. + echo +done diff --git a/package-lock.json b/package-lock.json index 97775f035..984a133f5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,5 +1,5 @@ { - "name": "@connext/vector", + "name": "vector", "version": "1.0.0", "lockfileVersion": 1, "requires": true, diff --git a/package.json b/package.json index 98747f772..539c92b36 100644 --- a/package.json +++ b/package.json @@ -1,5 +1,5 @@ { - "name": "@connext/vector", + "name": "vector", "version": "1.0.0", "description": "Vector is an ultra-minimal state channel implementation that borrows ideas from the Counterfactual framework, the v1 Connext Payment Channel Hub, and the StateChannels framework.", "main": "index.js", diff --git a/todo b/todo new file mode 100644 index 000000000..e7dd54de1 --- /dev/null +++ b/todo @@ -0,0 +1,3 @@ +Misc naming suggestions: + - Calling this protocol `vector` implies that our channels are unidirectional. Direction is deeply fundamental to vectors but our channels are symmetric except for the one case of deposits. If we're trying to be mathematically accurate, we should probably call it `edge` but honestly I like the `spacefold`, `wormhole`, `timewarp` aesthetic :nerd: (I think worm holes are a legit analogy for channels too) + - Calling the funds holder a `Multisig` seems silly, it's so much more than yet another boring multisig wallet. I think it's a leftover artifact from cf, how they tried to make their system privacy preserving bc vanilla chain explorers can't tell whether the contract is a state channels wallet or a normal multisig. (1) this doesn't really matter (2) especially bc disputing on-chain breaks privacy regardless. Let's skip the foreplay & call it what it is: a `VectorChannel`