diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..417e23d2 --- /dev/null +++ b/Makefile @@ -0,0 +1,526 @@ +SHELL := /bin/bash +.DEFAULT_GOAL := help + +# --- Paths --- +ROOT := $(shell pwd) +GRADLE := JAVA_HOME=$(JAVA_HOME) ./gradlew +INFRA := $(ROOT)/modules/com.etendorx.integration.obconnector/infraestructure +RXCONFIG := $(ROOT)/rxconfig +PROPS := $(ROOT)/gradle.properties +JAVA_HOME ?= $(shell /usr/libexec/java_home -v 17 2>/dev/null || echo "$$HOME/Library/Java/JavaVirtualMachines/corretto-17.0.18/Contents/Home") + +# --- Config from gradle.properties --- +DB_URL := $(shell grep '^bbdd.url=' $(PROPS) | cut -d= -f2- | sed 's/\\:/:/g') +DB_SID := $(shell grep '^bbdd.sid=' $(PROPS) | cut -d= -f2-) +DB_USER := $(shell grep '^bbdd.user=' $(PROPS) | cut -d= -f2-) +DB_PASS := $(shell grep '^bbdd.password=' $(PROPS) | cut -d= -f2-) +CTX_NAME := $(shell grep '^context.name=' $(PROPS) 2>/dev/null | cut -d= -f2-) +ifeq ($(CTX_NAME),) + CTX_NAME := etendo +endif +COMPOSE := docker-compose -p $(CTX_NAME)-obconn + +# --- Colors --- +CYAN := \033[36m +GREEN := \033[32m +YELLOW:= \033[33m +DIM := \033[2m +RESET := \033[0m + +# --- rxconfig.path: absolute path passed to bootRun so services find their yaml --- +BOOTRUN_ARGS := -Dspring.profiles.active=local +PIDS_DIR := $(ROOT)/.run +MAX_WAIT := 120 + +# Remote debug ports (JDWP) +DBG_CONFIG ?= 5001 +DBG_AUTH ?= 5002 +DBG_DAS ?= 5003 +DBG_EDGE ?= 5004 +DBG_ASYNC ?= 5005 +DBG_SERVER ?= 5006 +DBG_WORKER ?= 5007 + +# Build a Gradle bootRun debug argument for a given port +# Uses non-suspending JDWP so services start normally and can be attached later. +debug_jvm_arg = -PdebugEnabled=true -PdebugSuspend=false -PdebugServer=true -PdebugPort=$1 + +# Wait for a service port to be ready: $(call wait_for,name,port) +define wait_for + @printf " Waiting for $(1) on :$(2) "; \ + elapsed=0; \ + while ! curl -sf http://localhost:$(2)/actuator/health > /dev/null 2>&1; do \ + if [ $$elapsed -ge $(MAX_WAIT) ]; then \ + echo -e " $(YELLOW)TIMEOUT$(RESET)"; \ + logfile=$(PIDS_DIR)/$$(echo $(1) | tr ' ' '-' | tr '[:upper:]' '[:lower:]').log; \ + if [ -f "$$logfile" ]; then \ + echo -e " $(YELLOW)--- Last 20 lines of $$logfile ---$(RESET)"; \ + tail -20 "$$logfile"; \ + echo -e " $(YELLOW)---$(RESET)"; \ + fi; \ + exit 1; \ + fi; \ + printf "."; \ + sleep 2; \ + elapsed=$$((elapsed + 2)); \ + done; \ + if [ $$elapsed -lt $(MAX_WAIT) ]; then \ + echo -e " $(GREEN)OK$(RESET) ($${elapsed}s)"; \ + fi +endef + +# ============================================================================== +# PREFLIGHT CHECKS +# ============================================================================== + +.PHONY: check-db check-java + +check-db: ## Test PostgreSQL connection using gradle.properties config + @echo -e "$(CYAN)Checking PostgreSQL connection...$(RESET)" + @echo -e " URL: $(DB_URL)/$(DB_SID)" + @echo -e " User: $(DB_USER)" + @DB_HOST=$$(echo "$(DB_URL)" | sed 's|jdbc:postgresql://||' | cut -d: -f1); \ + DB_PORT=$$(echo "$(DB_URL)" | sed 's|jdbc:postgresql://||' | cut -d: -f2); \ + if PGPASSWORD=$(DB_PASS) psql -h "$$DB_HOST" -p "$$DB_PORT" -U $(DB_USER) -d $(DB_SID) -c "SELECT 1" > /dev/null 2>&1; then \ + echo -e " $(GREEN)OK$(RESET) — PostgreSQL is reachable"; \ + else \ + echo -e " $(YELLOW)FAIL$(RESET) — Cannot connect to PostgreSQL"; \ + echo -e " Check gradle.properties (bbdd.*) and ensure Etendo Classic DB is running."; \ + exit 1; \ + fi + +check-java: ## Verify Java 17 is available + @if $(JAVA_HOME)/bin/java -version 2>&1 | grep -q "17\."; then \ + echo -e " $(GREEN)OK$(RESET) — Java 17 ($(JAVA_HOME))"; \ + else \ + echo -e " $(YELLOW)FAIL$(RESET) — Java 17 not found at JAVA_HOME=$(JAVA_HOME)"; \ + exit 1; \ + fi + +# ============================================================================== +# INFRASTRUCTURE +# ============================================================================== + +.PHONY: infra infra-kafka infra-down infra-logs infra-ps + +infra: ## Start infra with Redpanda (default, lightweight) + @echo -e "$(CYAN)Starting infrastructure (Redpanda)...$(RESET)" + cd $(INFRA) && $(COMPOSE) -f docker-compose.redpanda.yml up -d + @echo "" + @echo -e "$(GREEN) Infrastructure ready$(RESET)" + @echo -e " Redpanda Broker localhost:29092" + @echo -e " Redpanda Console http://localhost:9093" + @echo -e " Kafka Connect API http://localhost:8083" + @echo -e " Jaeger UI http://localhost:16686" + @echo "" + +infra-kafka: ## Start infra with Kafka (heavier alternative) + @echo -e "$(CYAN)Starting infrastructure (Kafka)...$(RESET)" + cd $(INFRA) && $(COMPOSE) up -d + @echo "" + @echo -e "$(GREEN) Infrastructure ready$(RESET)" + @echo -e " Kafka Broker localhost:29092" + @echo -e " Kafka UI http://localhost:9093" + @echo -e " Kafka Connect API http://localhost:8083" + @echo -e " Kafka Connect UI http://localhost:8002" + @echo -e " Jaeger UI http://localhost:16686" + @echo -e " PostgreSQL (Debezium) localhost:5465" + @echo "" + +infra-down: ## Stop all infra containers + @echo -e "$(YELLOW)Stopping infrastructure...$(RESET)" + cd $(INFRA) && $(COMPOSE) down 2>/dev/null; \ + cd $(INFRA) && $(COMPOSE) -f docker-compose.redpanda.yml down 2>/dev/null; true + +infra-logs: ## Tail infra logs + cd $(INFRA) && $(COMPOSE) logs -f --tail=50 + +infra-ps: ## Show running infra containers + cd $(INFRA) && $(COMPOSE) ps 2>/dev/null; \ + cd $(INFRA) && $(COMPOSE) -f docker-compose.redpanda.yml ps 2>/dev/null; true + +# ============================================================================== +# CONFIG +# ============================================================================== + +.PHONY: config + +config: ## Generate config files from templates (rxconfig/*.yaml from *.yaml.template) + @echo -e "$(CYAN)Generating config from templates...$(RESET)" + @for f in $(RXCONFIG)/*.yaml.template; do \ + target="$${f%.template}"; \ + if [ ! -f "$$target" ]; then \ + cp "$$f" "$$target"; \ + echo -e " $(GREEN)Created$(RESET) $$(basename $$target)"; \ + else \ + echo -e " $(YELLOW)Exists$(RESET) $$(basename $$target) (skipped)"; \ + fi \ + done + @echo -e " $(CYAN)Injecting DB config from gradle.properties into das.yaml...$(RESET)" + @sed -i.bak \ + -e 's|url:.*jdbc:.*|url: $(DB_URL)/$(DB_SID)|' \ + -e 's|username:.*|username: $(DB_USER)|' \ + -e 's|password:.*|password: $(DB_PASS)|' \ + $(RXCONFIG)/das.yaml && rm -f $(RXCONFIG)/das.yaml.bak + @echo -e " $(GREEN)OK$(RESET) das.yaml updated with gradle.properties DB config" + +# ============================================================================== +# BUILD +# ============================================================================== + +.PHONY: build build-lib build-server build-worker test test-lib + +build: ## Build all modules + @echo -e "$(CYAN)Building all modules...$(RESET)" + $(GRADLE) :com.etendorx.integration.obconn.common:build \ + :com.etendorx.integration.obconn.lib:build \ + :com.etendorx.integration.obconn.server:build \ + :com.etendorx.integration.obconn.worker:build + +build-lib: ## Build only the lib module + $(GRADLE) :com.etendorx.integration.obconn.lib:build + +build-server: ## Build only the server module + $(GRADLE) :com.etendorx.integration.obconn.server:build + +build-worker: ## Build only the worker module + $(GRADLE) :com.etendorx.integration.obconn.worker:build + +test: ## Run all unit tests + @echo -e "$(CYAN)Running tests...$(RESET)" + $(GRADLE) :com.etendorx.integration.obconn.lib:test \ + :com.etendorx.integration.obconn.server:test \ + :com.etendorx.integration.obconn.worker:test + +test-lib: ## Run lib unit tests only + $(GRADLE) :com.etendorx.integration.obconn.lib:test + +# ============================================================================== +# RUN SERVICES (each in foreground — use separate terminals or `make up`) +# ============================================================================== + +.PHONY: run-config run-auth run-das run-edge run-server run-worker run-async + +run-config: ## Start Config Server (port 8888) — MUST start first + @echo -e "$(GREEN)Starting Config Server on :8888$(RESET)" + $(GRADLE) :com.etendorx.configserver:bootRun + +run-auth: ## Start Auth Service (port 8094) + @echo -e "$(GREEN)Starting Auth on :8094$(RESET)" + $(GRADLE) :com.etendorx.auth:bootRun $(BOOTRUN_ARGS) + +run-das: ## Start DAS (port 8092) + @echo -e "$(GREEN)Starting DAS on :8092$(RESET)" + $(GRADLE) :com.etendorx.das:bootRun $(BOOTRUN_ARGS) + +run-edge: ## Start Edge Gateway (port 8096) + @echo -e "$(GREEN)Starting Edge on :8096$(RESET)" + $(GRADLE) :com.etendorx.edge:bootRun $(BOOTRUN_ARGS) + +run-server: ## Start OBConnector Server (port 8101) + @echo -e "$(GREEN)Starting OBConnector Server on :8101$(RESET)" + $(GRADLE) :com.etendorx.integration.obconn.server:bootRun $(BOOTRUN_ARGS) + +run-worker: ## Start OBConnector Worker (port 8102) + @echo -e "$(GREEN)Starting OBConnector Worker on :8102$(RESET)" + $(GRADLE) :com.etendorx.integration.obconn.worker:bootRun $(BOOTRUN_ARGS) + +run-async: ## Start Async Process service (port 8099) + @echo -e "$(GREEN)Starting Async Process on :8099$(RESET)" + $(GRADLE) :com.etendorx.asyncprocess:bootRun $(BOOTRUN_ARGS) + +# ============================================================================== +# ORCHESTRATED STARTUP (background processes) +# ============================================================================== + +.PHONY: up up-debug up-local up-kafka down status logs portal loadtest loadtest.send loadtest.receive + +up: check-java check-db infra config ## Start everything with Config Server + @mkdir -p $(PIDS_DIR) + @rm -f $(PIDS_DIR)/*.log $(PIDS_DIR)/*.pid + @echo "" + @echo -e "$(CYAN)Starting Config Server...$(RESET)" + @$(GRADLE) :com.etendorx.configserver:bootRun > $(PIDS_DIR)/configserver.log 2>&1 & echo $$! > $(PIDS_DIR)/configserver.pid + $(call wait_for,Config Server,8888) + @echo "" + @echo -e "$(CYAN)Starting Auth + DAS + Edge...$(RESET)" + @$(GRADLE) :com.etendorx.auth:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/auth.log 2>&1 & echo $$! > $(PIDS_DIR)/auth.pid + @$(GRADLE) :com.etendorx.das:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/das.log 2>&1 & echo $$! > $(PIDS_DIR)/das.pid + @$(GRADLE) :com.etendorx.edge:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/edge.log 2>&1 & echo $$! > $(PIDS_DIR)/edge.pid + @$(GRADLE) :com.etendorx.asyncprocess:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/asyncprocess.log 2>&1 & echo $$! > $(PIDS_DIR)/asyncprocess.pid + $(call wait_for,Auth,8094) + $(call wait_for,DAS,8092) + $(call wait_for,Edge,8096) + $(call wait_for,Async Process,8099) + @echo "" + @echo -e "$(CYAN)Starting OBConnector Server + Worker...$(RESET)" + @$(GRADLE) :com.etendorx.integration.obconn.server:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/obconn-server.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-server.pid + @$(GRADLE) :com.etendorx.integration.obconn.worker:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/obconn-worker.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-worker.pid + $(call wait_for,OBConn Server,8101) + $(call wait_for,OBConn Worker,8102) + @$(MAKE) --no-print-directory _banner _CONFIGSRV=" Config Server http://localhost:8888" _AUTH=" Auth Service http://localhost:8094" _EDGE=" Edge Gateway http://localhost:8096" _ASYNC=" Async Process http://localhost:8099" + +up-debug: check-java check-db infra config ## Start everything with remote debug ports enabled + @mkdir -p $(PIDS_DIR) + @rm -f $(PIDS_DIR)/*.log $(PIDS_DIR)/*.pid + @echo "" + @echo -e "$(CYAN)Starting Config Server (debug :$(DBG_CONFIG))...$(RESET)" + @$(GRADLE) :com.etendorx.configserver:bootRun $(call debug_jvm_arg,$(DBG_CONFIG)) > $(PIDS_DIR)/configserver.log 2>&1 & echo $$! > $(PIDS_DIR)/configserver.pid + $(call wait_for,Config Server,8888) + @echo "" + @echo -e "$(CYAN)Starting Auth + DAS + Edge (debug enabled)...$(RESET)" + @$(GRADLE) :com.etendorx.auth:bootRun $(BOOTRUN_ARGS) $(call debug_jvm_arg,$(DBG_AUTH)) > $(PIDS_DIR)/auth.log 2>&1 & echo $$! > $(PIDS_DIR)/auth.pid + @$(GRADLE) :com.etendorx.das:bootRun $(BOOTRUN_ARGS) $(call debug_jvm_arg,$(DBG_DAS)) > $(PIDS_DIR)/das.log 2>&1 & echo $$! > $(PIDS_DIR)/das.pid + @$(GRADLE) :com.etendorx.edge:bootRun $(BOOTRUN_ARGS) $(call debug_jvm_arg,$(DBG_EDGE)) > $(PIDS_DIR)/edge.log 2>&1 & echo $$! > $(PIDS_DIR)/edge.pid + @$(GRADLE) :com.etendorx.asyncprocess:bootRun $(BOOTRUN_ARGS) $(call debug_jvm_arg,$(DBG_ASYNC)) > $(PIDS_DIR)/asyncprocess.log 2>&1 & echo $$! > $(PIDS_DIR)/asyncprocess.pid + $(call wait_for,Auth,8094) + $(call wait_for,DAS,8092) + $(call wait_for,Edge,8096) + $(call wait_for,Async Process,8099) + @echo "" + @echo -e "$(CYAN)Starting OBConnector Server + Worker (debug enabled)...$(RESET)" + @$(GRADLE) :com.etendorx.integration.obconn.server:bootRun $(BOOTRUN_ARGS) $(call debug_jvm_arg,$(DBG_SERVER)) > $(PIDS_DIR)/obconn-server.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-server.pid + @$(GRADLE) :com.etendorx.integration.obconn.worker:bootRun $(BOOTRUN_ARGS) $(call debug_jvm_arg,$(DBG_WORKER)) -PlogLevel=DEBUG > $(PIDS_DIR)/obconn-worker.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-worker.pid + $(call wait_for,OBConn Server,8101) + $(call wait_for,OBConn Worker,8102) + @$(MAKE) --no-print-directory _banner \ + _CONFIGSRV=" Config Server http://localhost:8888 $(DIM)(debug :$(DBG_CONFIG))$(RESET)" \ + _AUTH=" Auth Service http://localhost:8094 $(DIM)(debug :$(DBG_AUTH))$(RESET)" \ + _EDGE=" Edge Gateway http://localhost:8096 $(DIM)(debug :$(DBG_EDGE))$(RESET)" \ + _ASYNC=" Async Process http://localhost:8099 $(DIM)(debug :$(DBG_ASYNC))$(RESET)" + @echo -e " DAS debug :$(DBG_DAS)" + @echo -e " Async debug :$(DBG_ASYNC)" + @echo -e " OBConn Server debug :$(DBG_SERVER)" + @echo -e " OBConn Worker debug :$(DBG_WORKER)" + +up-local: check-java check-db infra config ## Start everything WITHOUT Config Server (fastest) + @mkdir -p $(PIDS_DIR) + @echo "" + @if curl -sf http://localhost:8092/actuator/health > /dev/null 2>&1; then \ + echo -e " DAS $(GREEN)already running$(RESET)"; \ + else \ + printf " Generating entities... "; \ + $(GRADLE) generate.entities -x test > $(PIDS_DIR)/generate.log 2>&1 \ + && echo -e "$(GREEN)OK$(RESET)" \ + || { echo -e "$(YELLOW)FAIL$(RESET)"; tail -20 $(PIDS_DIR)/generate.log; exit 1; }; \ + printf " Compiling DAS... "; \ + $(GRADLE) :com.etendorx.das:build -x test > $(PIDS_DIR)/das-build.log 2>&1 \ + && echo -e "$(GREEN)OK$(RESET)" \ + || { echo -e "$(YELLOW)FAIL$(RESET)"; tail -20 $(PIDS_DIR)/das-build.log; exit 1; }; \ + echo -e " $(CYAN)Starting DAS...$(RESET)"; \ + $(GRADLE) :com.etendorx.das:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/das.log 2>&1 & echo $$! > $(PIDS_DIR)/das.pid; \ + fi + $(call wait_for,DAS,8092) + @echo "" + @if curl -sf http://localhost:8101/actuator/health > /dev/null 2>&1; then \ + echo -e " OBConn Server $(GREEN)already running$(RESET)"; \ + else \ + echo -e " $(CYAN)Starting OBConnector Server...$(RESET)"; \ + $(GRADLE) :com.etendorx.integration.obconn.server:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/obconn-server.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-server.pid; \ + fi + @if curl -sf http://localhost:8102/actuator/health > /dev/null 2>&1; then \ + echo -e " OBConn Worker $(GREEN)already running$(RESET)"; \ + else \ + echo -e " $(CYAN)Starting OBConnector Worker...$(RESET)"; \ + $(GRADLE) :com.etendorx.integration.obconn.worker:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/obconn-worker.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-worker.pid; \ + fi + @if curl -sf http://localhost:8099/actuator/health > /dev/null 2>&1; then \ + echo -e " Async Process $(GREEN)already running$(RESET)"; \ + else \ + echo -e " $(CYAN)Starting Async Process...$(RESET)"; \ + $(GRADLE) :com.etendorx.asyncprocess:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/async.log 2>&1 & echo $$! > $(PIDS_DIR)/async.pid; \ + fi + @if curl -sf http://localhost:8090/ > /dev/null 2>&1; then \ + echo -e " Mock Receiver $(GREEN)already running$(RESET)"; \ + else \ + echo -e " $(CYAN)Starting Mock Receiver...$(RESET)"; \ + $(GRADLE) :com.etendorx.integration.obconn.loadtest:bootRun --args='--spring.profiles.active=mock' > $(PIDS_DIR)/mock-receiver.log 2>&1 & echo $$! > $(PIDS_DIR)/mock-receiver.pid; \ + fi + $(call wait_for,OBConn Server,8101) + $(call wait_for,OBConn Worker,8102) + $(call wait_for,Async Process,8099) + @sleep 2 && echo -e " Mock Receiver $(GREEN)OK$(RESET) (:8090)" + @$(MAKE) --no-print-directory _banner _CONFIGSRV=" Config Server $(DIM)skipped (local mode)$(RESET)" _AUTH=" Auth Service $(DIM)skipped (local mode)$(RESET)" _EDGE=" Edge Gateway $(DIM)skipped (local mode)$(RESET)" _ASYNC=" Async Process http://localhost:8099" + +up-kafka: check-java check-db infra-kafka config ## Start everything with Kafka instead of Redpanda + @mkdir -p $(PIDS_DIR) + @rm -f $(PIDS_DIR)/*.log $(PIDS_DIR)/*.pid + @echo "" + @echo -e "$(CYAN)Starting Config Server...$(RESET)" + @$(GRADLE) :com.etendorx.configserver:bootRun > $(PIDS_DIR)/configserver.log 2>&1 & echo $$! > $(PIDS_DIR)/configserver.pid + $(call wait_for,Config Server,8888) + @echo "" + @echo -e "$(CYAN)Starting Auth + DAS + Edge...$(RESET)" + @$(GRADLE) :com.etendorx.auth:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/auth.log 2>&1 & echo $$! > $(PIDS_DIR)/auth.pid + @$(GRADLE) :com.etendorx.das:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/das.log 2>&1 & echo $$! > $(PIDS_DIR)/das.pid + @$(GRADLE) :com.etendorx.edge:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/edge.log 2>&1 & echo $$! > $(PIDS_DIR)/edge.pid + @$(GRADLE) :com.etendorx.asyncprocess:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/asyncprocess.log 2>&1 & echo $$! > $(PIDS_DIR)/asyncprocess.pid + $(call wait_for,Auth,8094) + $(call wait_for,DAS,8092) + $(call wait_for,Edge,8096) + $(call wait_for,Async Process,8099) + @echo "" + @echo -e "$(CYAN)Starting OBConnector Server + Worker...$(RESET)" + @$(GRADLE) :com.etendorx.integration.obconn.server:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/obconn-server.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-server.pid + @$(GRADLE) :com.etendorx.integration.obconn.worker:bootRun $(BOOTRUN_ARGS) > $(PIDS_DIR)/obconn-worker.log 2>&1 & echo $$! > $(PIDS_DIR)/obconn-worker.pid + $(call wait_for,OBConn Server,8101) + $(call wait_for,OBConn Worker,8102) + @$(MAKE) --no-print-directory _banner _CONFIGSRV=" Config Server http://localhost:8888" _AUTH=" Auth Service http://localhost:8094" _EDGE=" Edge Gateway http://localhost:8096" _ASYNC=" Async Process http://localhost:8099" + +_banner: + @echo "" + @echo -e "$(GREEN)=========================================$(RESET)" + @echo -e "$(GREEN) All services ready$(RESET)" + @echo -e "$(GREEN)=========================================$(RESET)" + @echo "" + @echo -e "$(CYAN) Infrastructure$(RESET)" + @echo -e " Redpanda Broker localhost:29092" + @echo -e " Redpanda Console http://localhost:9093" + @echo -e " Kafka Connect API http://localhost:8083" + @echo -e " Jaeger UI http://localhost:16686" + @echo "" + @echo -e "$(CYAN) EtendoRX Core$(RESET)" + @echo -e "$(_CONFIGSRV)" + @echo -e "$(_AUTH)" + @echo -e " DAS http://localhost:8092" + @echo -e "$(_EDGE)" + @echo -e "$(_ASYNC)" + @echo "" + @echo -e "$(CYAN) OBConnector$(RESET)" + @echo -e " Server API http://localhost:8101/api/sync/" + @echo -e " Worker http://localhost:8102" + @echo -e " Dashboard http://localhost:8102/dashboard $(YELLOW)(requires dashboard.enabled=true)$(RESET)" + @echo -e " Mock Receiver http://localhost:8090 $(DIM)(simulates external system)$(RESET)" + @echo "" + @echo -e "$(CYAN) Database$(RESET)" + @echo -e " PostgreSQL $(DB_URL)/$(DB_SID) $(DIM)(Etendo Classic)$(RESET)" + @echo "" + @echo -e "$(GREEN)=========================================$(RESET)" + @echo -e " Logs: $(CYAN)make logs$(RESET)" + @echo -e " Status: $(CYAN)make status$(RESET)" + @echo -e " Stop: $(CYAN)make down$(RESET)" + @echo -e "$(GREEN)=========================================$(RESET)" + +down: ## Stop all services + infra + @echo -e "$(YELLOW)Stopping services...$(RESET)" + @if [ -d $(PIDS_DIR) ]; then \ + for pidfile in $(PIDS_DIR)/*.pid; do \ + if [ -f "$$pidfile" ]; then \ + pid=$$(cat "$$pidfile"); \ + name=$$(basename "$$pidfile" .pid); \ + if kill -0 "$$pid" 2>/dev/null; then \ + kill "$$pid" 2>/dev/null && echo -e " Stopped $$name ($$pid)"; \ + fi; \ + rm -f "$$pidfile"; \ + fi \ + done; \ + fi + @$(MAKE) infra-down + @echo -e "$(GREEN)All stopped.$(RESET)" + +status: ## Show status of all services + @echo -e "$(CYAN)=== Infrastructure ===$(RESET)" + @cd $(INFRA) && (docker-compose ps 2>/dev/null || docker-compose -f docker-compose.redpanda.yml ps 2>/dev/null) || true + @echo "" + @echo -e "$(CYAN)=== Services ===$(RESET)" + @if [ -d $(PIDS_DIR) ]; then \ + for pidfile in $(PIDS_DIR)/*.pid; do \ + if [ -f "$$pidfile" ]; then \ + pid=$$(cat "$$pidfile"); \ + name=$$(basename "$$pidfile" .pid); \ + if kill -0 "$$pid" 2>/dev/null; then \ + echo -e " $(GREEN)RUNNING$(RESET) $$name (pid $$pid)"; \ + else \ + echo -e " $(YELLOW)STOPPED$(RESET) $$name"; \ + fi \ + fi \ + done; \ + else \ + echo " No services started via Makefile"; \ + fi + +logs: ## Tail all service logs + @exec tail -f $(PIDS_DIR)/*.log + +portal: ## Open Dev Portal (service browser on :8199) + @echo -e "$(GREEN)Dev Portal$(RESET) → http://localhost:8199" + @cd $(ROOT)/portal && exec python3 -m http.server 8199 + +THREADS ?= 4 +MESSAGES ?= 100 +POLL ?= true + +loadtest: loadtest.send loadtest.receive ## Run Send + Receive load tests sequentially + +loadtest.send: ## Run Send load test (THREADS=4 MESSAGES=100) + @echo -e "$(CYAN)Running Send load test ($(THREADS) threads × $(MESSAGES) msgs)...$(RESET)" + $(GRADLE) :com.etendorx.integration.obconn.loadtest:bootRun \ + --args='--loadtest.mode=send --loadtest.threads=$(THREADS) --loadtest.messages-per-thread=$(MESSAGES) --loadtest.poll-status=$(POLL)' + +loadtest.receive: ## Run Receive load test (THREADS=4 MESSAGES=100) + @echo -e "$(CYAN)Running Receive load test ($(THREADS) threads × $(MESSAGES) msgs)...$(RESET)" + $(GRADLE) :com.etendorx.integration.obconn.loadtest:bootRun \ + --args='--loadtest.mode=receive --loadtest.enabled=false --loadtest.threads=$(THREADS) --loadtest.messages-per-thread=$(MESSAGES) --loadtest.poll-status=$(POLL)' + +mock: ## Start mock external receiver on :8090 (for Send workflow testing) + @echo -e "$(CYAN)Starting Mock Receiver on :8090...$(RESET)" + $(GRADLE) :com.etendorx.integration.obconn.loadtest:bootRun --args='--spring.profiles.active=mock' + +.PHONY: purge purge-async + +REDPANDA_CTR := $(shell docker ps --format '{{.Names}}' 2>/dev/null | grep -m1 'redpanda-1$$' || echo "redpanda") +PURGE_TOPICS := obconnector.send obconnector.send-dlt \ + obconnector.send-retry-10000 obconnector.send-retry-20000 \ + obconnector.send-retry-40000 obconnector.send-retry-60000 \ + obconnector.receive obconnector.receive-dlt \ + obconnector.receive-retry-10000 obconnector.receive-retry-20000 \ + obconnector.receive-retry-40000 obconnector.receive-retry-60000 \ + default.public.c_bpartner + +purge-async: ## Purge async-process Kafka topics (async-process, async-process-execution, rejected-process and Streams internals) + @echo -e "$(YELLOW)Purging async-process Kafka topics...$(RESET)" + @ASYNC_TOPICS=$$(docker exec $(REDPANDA_CTR) rpk topic list 2>/dev/null | awk 'NR>1{print $$1}' | grep -E '^(async-process|rejected-process)'); \ + if [ -z "$$ASYNC_TOPICS" ]; then \ + echo -e " $(DIM)No async-process topics found$(RESET)"; \ + else \ + for topic in $$ASYNC_TOPICS; do \ + docker exec $(REDPANDA_CTR) rpk topic delete $$topic > /dev/null 2>&1 \ + && echo -e " $(GREEN)Deleted$(RESET) $$topic" \ + || echo -e " $(YELLOW)Error$(RESET) $$topic"; \ + done; \ + fi + @echo -e "$(GREEN)Done.$(RESET) Topics auto-recreate when async-process service reconnects." + +purge: ## Purge all OBConnector Kafka topics (delete + auto-recreate) + @echo -e "$(YELLOW)Purging Kafka topics...$(RESET)" + @for topic in $(PURGE_TOPICS); do \ + if docker exec $(REDPANDA_CTR) rpk topic describe $$topic > /dev/null 2>&1; then \ + docker exec $(REDPANDA_CTR) rpk topic delete $$topic > /dev/null 2>&1 \ + && echo -e " $(GREEN)Deleted$(RESET) $$topic" \ + || echo -e " $(YELLOW)Error$(RESET) $$topic"; \ + else \ + echo -e " $(DIM)Skip$(RESET) $$topic (not found)"; \ + fi \ + done + @echo -e "$(GREEN)Done.$(RESET) Topics auto-recreate when producers/consumers reconnect." + +# ============================================================================== +# HELP +# ============================================================================== + +.PHONY: help + +help: ## Show this help + @echo "" + @echo -e "$(CYAN)OBConnector Development$(RESET)" + @echo "" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ + awk 'BEGIN {FS = ":.*?## "}; {printf " $(GREEN)%-18s$(RESET) %s\n", $$1, $$2}' + @echo "" + @echo -e "$(CYAN)Quick start:$(RESET)" + @echo " make up-local # Fastest: Redpanda + services (no Config Server)" + @echo " make up # Redpanda + Config Server + services" + @echo " make up-debug # Redpanda + Config Server + services (with remote debug)" + @echo " make up-kafka # Kafka + Config Server + services" + @echo " make down # Stop everything" + @echo "" diff --git a/build.gradle b/build.gradle index 287ed416..a107a3ef 100644 --- a/build.gradle +++ b/build.gradle @@ -85,6 +85,12 @@ subprojects { apply plugin: 'jacoco' apply plugin: 'java' + plugins.withId('org.springframework.boot') { + tasks.named('bootRun') { + systemProperties System.properties + } + } + jacoco { toolVersion = "0.8.10" } diff --git a/docs/INDEX.md b/docs/INDEX.md new file mode 100644 index 00000000..87e35d27 --- /dev/null +++ b/docs/INDEX.md @@ -0,0 +1,46 @@ +# EtendoRX — Technical Documentation Index + +EtendoRX is a reactive microservices platform built on **Spring Boot 3.1.4 / Spring Cloud 2022.0.4 / Java 17** for Etendo ERP integrations. It provides a composable runtime for data access, authentication, API routing, asynchronous task processing, and synchronization workflows. + +--- + +## Service Catalog + +| Service | Port | Purpose | +|---|---|---| +| Config Server | 8888 | Spring Cloud Config — centralized configuration for all services | +| Auth | 8094 | JWT authentication — token issuance and validation | +| DAS | 8092 | Data Access Service — REST API layer over the Etendo database | +| Edge | 8096 | API Gateway — routing, load balancing, and request filtering | +| AsyncProcess | 8099 | Async task processing — background job execution and scheduling | +| OBConnector Server | 8101 | Sync REST API — orchestration endpoint for synchronization flows | +| OBConnector Worker | 8102 | Sync execution engine — worker that executes sync pipeline steps | + +--- + +## Platform Documentation + +| Document | Description | +|---|---| +| [architecture.md](./architecture.md) | Platform architecture, service dependency graph, and service catalog with roles and responsibilities | +| [getting-started.md](./getting-started.md) | Prerequisites, local installation steps, environment setup, and first run walkthrough | +| [makefile-reference.md](./makefile-reference.md) | All build and deployment commands exposed through the project Makefile | +| [configuration.md](./configuration.md) | Configuration files, Spring Cloud Config properties, per-service overrides, and environment variables | +| [infrastructure.md](./infrastructure.md) | Docker Compose setup, Kafka and Redpanda message broker configuration, and Jaeger distributed tracing | + +--- + +## Module Documentation + +| Module | Index | +|---|---| +| OBConnector | [com.etendorx.integration.obconnector — Documentation Index](../modules/com.etendorx.integration.obconnector/docs/INDEX.md) | + +--- + +## Diagrams + +| File | Description | +|---|---| +| [00-async-process.plantuml](./00-async-process.plantuml) | Sequence diagram of the async process flow — job submission, worker pickup, and result handling | +| [01-async-deployment-diagram.plantuml](./01-async-deployment-diagram.plantuml) | Deployment diagram showing async infrastructure components and their relationships | diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 00000000..eb12f33b --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,515 @@ +# EtendoRX Platform Architecture + +## Table of Contents + +1. [Platform Overview](#1-platform-overview) +2. [Service Catalog](#2-service-catalog) +3. [Architecture Diagram](#3-architecture-diagram) +4. [Module Categories](#4-module-categories) +5. [Dependency Graph](#5-dependency-graph) +6. [Technology Stack](#6-technology-stack) +7. [Key Architectural Patterns](#7-key-architectural-patterns) + +--- + +## 1. Platform Overview + +EtendoRX is a reactive microservices platform built on top of the Etendo ERP system. It exposes Etendo's business data via REST APIs, enables event-driven processing through Apache Kafka, and provides bidirectional synchronization with external systems (e.g., Openbravo POS, third-party ERPs). + +### Core capabilities + +- **REST API layer**: The Data Access Service (DAS) exposes JPA-backed REST endpoints over the Etendo PostgreSQL database, with fine-grained read/write mapping per entity. +- **Event-driven sync**: Change Data Capture (CDC) via Debezium streams PostgreSQL WAL events into Kafka. Workers consume these events and push data to external systems. +- **Bidirectional integration**: The OBConnector module handles both directions — receiving from external systems into Etendo (via DAS), and sending from Etendo to external systems (via Debezium CDC). +- **Centralized configuration**: All services fetch their configuration from a Spring Cloud Config Server at startup. No service carries its own YAML in production. +- **Unified authentication**: A dedicated Auth service issues EC-key-signed JWTs. All inter-service and client calls are validated by the Edge gateway or by individual services using the shared `utils.auth` library. + +### Build system + +| Attribute | Value | +|---|---| +| Build tool | Gradle 8.3 | +| Language | Java 17 | +| Spring Boot | 3.1.4 | +| Spring Cloud | 2022.0.4 | +| Platform version | 2.3.4 (defined in `settings.gradle`) | +| Custom Gradle plugin | `com.etendorx.gradlepluginrx:2.1.0` | + +The root project is a Gradle multi-project build named `etendorx`. It dynamically discovers subprojects by scanning five top-level directories (`libs`, `modules_core`, `modules_gen`, `modules_test`, `modules`) for `build.gradle` files. + +--- + +## 2. Service Catalog + +### Deployable services (Spring Boot applications) + +| Service Name | Spring App Name | Port | Module Path | Purpose | +|---|---|---|---|---| +| Config Server | `configserver` | 8888 | `modules_core/com.etendorx.configserver` | Spring Cloud Config Server. Central YAML distribution point for all services. All other services fetch their configuration from this service at startup. | +| Auth | `auth` | 8094 | `modules_core/com.etendorx.auth` | JWT authentication service. Issues and validates EC-signed tokens. Integrates with Spring Security OAuth2 and Spring Cloud OpenFeign. Exposes Swagger UI. | +| DAS | `das` | 8092 | `modules_core/com.etendorx.das` | Data Access Service. JPA-based REST API over the Etendo PostgreSQL database. Supports optional gRPC transport. Dynamically loads entity definitions from the code-generated `entities` module. | +| Edge | `edge` | 8096 | `modules_core/com.etendorx.edge` | Spring Cloud Gateway (reactive). Single ingress point for all external API traffic. Routes requests to DAS, Auth, and other services. Validates JWTs using `utils.auth`. | +| AsyncProcess | `asyncprocess` | 8099 | `modules_core/com.etendorx.asyncprocess` | Asynchronous task processing service. Kafka consumer for workflow status events. Exposes REST endpoints for polling async operation results. Uses Spring Cloud Stream + Reactor Kafka. | +| OBConnector Server | `obconnsrv` | 8101 | `modules/com.etendorx.integration.obconnector/com.etendorx.integration.obconn.server` | REST API entrypoint for triggering sync operations. Exposes `PUT /api/sync/{modelName}/{entityId}`, `POST /api/sync/{modelName}`, and `GET /api/sync/status/{workflowId}`. Authenticates via `X-TOKEN` header. | +| OBConnector Worker | `worker` | 8102 | `modules/com.etendorx.integration.obconnector/com.etendorx.integration.obconn.worker` | Kafka consumer that executes the full sync workflow pipeline (MAP → PRE_LOGIC → SYNC → POST_LOGIC → PROCESS_DATA → POST_ACTION). Handles both receive (external → Etendo) and send (Etendo → external) directions. | + +### Non-deployable services (library modules, not standalone applications) + +| Module | Path | Role | +|---|---|---| +| WebFlux | `modules_core/com.etendorx.webflux` | Reactive WebFlux base module with Thymeleaf and JPA. Not a standalone service. | +| Auth Client | `modules/com.etendorx.auth.client` | Client-side auth utilities. No standalone application. | +| To Openbravo (mapping) | `modules/com.etendorx.integration.to_openbravo/com.etendorx.integration.to_openbravo.mapping` | DTO mapper components for Etendo-Openbravo field transformations. Loaded by DAS at runtime (`includeInDasDependencies = true`). | +| To Openbravo (worker) | `modules/com.etendorx.integration.to_openbravo/com.etendorx.integration.to_openbravo.worker` | SPI adapters for HTTP method binding and response parsing specific to Openbravo POS. Loaded by OBConnector Worker at runtime. | + +--- + +## 3. Architecture Diagram + +### Full system view + +``` +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ CONFIGURATION PLANE │ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────────┐ │ +│ │ Config Server (port 8888) [configserver] │ │ +│ │ Spring Cloud Config — serves YAML from /rxconfig │ │ +│ └─────────┬──────────┬──────────┬──────────┬──────────┬───────────────────┘ │ +│ │ │ │ │ │ (all services │ +│ │ │ │ │ │ fetch config │ +│ v v v v v at startup) │ +└─────────────────────────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ RECEIVE DIRECTION │ +│ (External System → Etendo) │ +│ │ +│ ┌──────────────┐ ┌─────────────────┐ ┌──────────────────────────────┐ │ +│ │ External │ │ OBConnector │ │ Kafka / Redpanda │ │ +│ │ System │────>│ Server │────>│ │ │ +│ │ │ PUT │ (port 8101) │ │ topic: sync.receive.* │ │ +│ │ e.g. │ POST│ [obconnsrv] │ │ │ │ +│ │ Openbravo │ │ │ └──────────────┬───────────────┘ │ +│ │ POS │ │ REST API │ │ │ +│ └──────────────┘ │ /api/sync/* │ │ consume │ +│ └─────────────────┘ v │ +│ ┌──────────────────────────────┐│ +│ │ OBConnector Worker ││ +│ │ (port 8102) [worker] ││ +│ │ ││ +│ │ MAP → PRE_LOGIC → SYNC → ││ +│ │ POST_LOGIC → PROCESS_DATA ││ +│ │ → POST_ACTION ││ +│ └──────────────┬───────────────┘│ +│ │ HTTP (OkHttp3) │ +│ v │ +│ ┌──────────────────────────────┐│ +│ │ DAS (port 8092) ││ +│ │ [das] ││ +│ │ ││ +│ │ JPA REST API ││ +│ │ /api/ ││ +│ └──────────────┬───────────────┘│ +│ │ JDBC │ +│ v │ +│ ┌──────────────────────────────┐│ +│ │ PostgreSQL (Etendo DB) ││ +│ │ port 5432 ││ +│ └──────────────────────────────┘│ +└─────────────────────────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ SEND DIRECTION │ +│ (Etendo → External System) │ +│ │ +│ ┌──────────────────────────────┐ │ +│ │ PostgreSQL (Etendo DB) │ │ +│ │ WAL (Write-Ahead Log) │ │ +│ └──────────────┬───────────────┘ │ +│ │ CDC │ +│ v │ +│ ┌──────────────────────────────┐ ┌──────────────────────────────────────┐ │ +│ │ Debezium Kafka Connect │────>│ Kafka / Redpanda │ │ +│ │ (port 8083) │ │ │ │ +│ │ Captures INSERT/UPDATE/ │ │ topic: dbz. │ │ +│ │ DELETE events │ └──────────────┬───────────────────────┘ │ +│ └──────────────────────────────┘ │ DbzListener consumes │ +│ v │ +│ ┌──────────────────────────────────────┐ │ +│ │ OBConnector Worker │ │ +│ │ (port 8102) [worker] │ │ +│ │ │ │ +│ │ SendWorkflowImpl: │ │ +│ │ MAP → PRE_LOGIC → SYNC → │ │ +│ │ POST_LOGIC → PROCESS_DATA │ │ +│ │ → POST_ACTION │ │ +│ └──────────────┬───────────────────────┘ │ +│ │ HTTP (OkHttp3) │ +│ v │ +│ ┌──────────────────────────────────────┐ │ +│ │ External System │ │ +│ │ e.g. Openbravo POS │ │ +│ └──────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ AUTHENTICATION & ROUTING PLANE │ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────────┐ │ +│ │ Edge Gateway (port 8096) [edge] │ │ +│ │ Spring Cloud Gateway — reactive │ │ +│ │ JWT validation via utils.auth │ │ +│ └──────────────┬──────────────────────┬────────────────────────────────────┘ │ +│ │ │ │ +│ v v │ +│ ┌──────────────────────┐ ┌──────────────────────────────────────────────┐ │ +│ │ Auth (port 8094) │ │ DAS / AsyncProcess / other │ │ +│ │ [auth] │ │ internal services │ │ +│ │ EC-signed JWTs │ └──────────────────────────────────────────────┘ │ +│ │ OAuth2 client │ │ +│ │ Feign + OkHttp │ │ +│ └──────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ ASYNC WORKFLOW STATUS PLANE │ +│ │ +│ ┌──────────────────────────────────────────────────────────────────────────┐ │ +│ │ AsyncProcess (port 8099) [asyncprocess] │ │ +│ │ Spring Cloud Stream + Reactor Kafka │ │ +│ │ Consumes workflow status events from Kafka │ │ +│ │ REST API for polling async operation results │ │ +│ └──────────────────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## 4. Module Categories + +The Gradle multi-project build organizes all modules into five top-level directories. Each directory has a distinct responsibility scope. + +### 4.1 libs/ — Shared Libraries (10 modules) + +These are plain Java library JARs (no `bootJar`). They are consumed by both `modules_core` and `modules` services. They do not start standalone. + +| Module | Artifact ID | Purpose | +|---|---|---| +| `com.etendorx.clientrest_core` | `clientrest_core` | Base classes for generated Feign REST clients. Provides Jackson and HATEOAS integration for REST client code. Used by `auth` and `modules_gen/clientrest`. | +| `com.etendorx.das_core` | `das_core` | Core DAS abstractions: base JPA repository wrappers, JSONPath-based field access, Swagger model annotations, and Bean Validation support. Used by DAS, entities, and mapping modules. | +| `com.etendorx.generate_entities` | `generate-entities` | Standalone Spring Boot CLI application (not a service). Reads the Etendo database schema and generates Java source code for JPA entities, JPA repositories, REST projections, and client stubs into `modules_gen/`. Depends on `generate_entities.core` and `generate_entities.extradomaintype`. | +| `com.etendorx.generate_entities.core` | `core` | Core schema-reading infrastructure for entity generation: Hibernate metadata introspection, JDBC-based DB schema access, Jettison JSON, and JSONPath. Pulled in by `generate_entities`. | +| `com.etendorx.generate_entities.extradomaintype` | `extra-domain-type` | Extension point for registering custom Hibernate domain types during entity generation. Depends on `generate_entities.core`. | +| `com.etendorx.lib.kafka` | _(no artifact, group: `com.etendorx.lib.kafka`)_ | Thin Kafka wrapper: message envelope POJOs, Kafka Streams configuration helpers. Used by `asyncprocess`, `lib.asyncprocess`, and OBConnector lib. | +| `com.etendorx.lib.asyncprocess` | _(no artifact, group: `com.etendorx.lib`)_ | Shared async processing abstractions and Kafka integration helpers. Depends on `lib.kafka`. Used by `asyncprocess` service. | +| `com.etendorx.utils.auth` | `utils.auth` | JWT utility library. Parsing and validation using `jjwt` 0.12.x and `nimbus-jose-jwt`. EC key support. Auth0 `java-jwt` for SWS compatibility. Used by almost every service. | +| `com.etendorx.utils.common` | `utils.common` | Minimal shared utilities: SLF4J logging helpers, JSONPath support. Zero Spring dependencies. | +| `com.etendorx.lib.asyncprocess` | _(group: `com.etendorx.lib`)_ | Async task processing base with Kafka Streams integration. Depends on `lib.kafka`. | + +### 4.2 modules_core/ — Core Platform Services (6 modules, 5 deployable) + +These are the foundational Spring Boot services that every EtendoRX deployment requires. + +| Module | App Name | Deployable | Key Dependencies | Notes | +|---|---|---|---|---| +| `com.etendorx.configserver` | `configserver` | Yes | `spring-cloud-config-server` | Minimal service: serves YAML files. No custom code besides `@EnableConfigServer`. | +| `com.etendorx.auth` | `auth` | Yes | `spring-security`, `jjwt`, `openfeign`, `utils.auth`, `clientrest`, `clientrest_core` | Issues EC-signed JWTs. Feign + OkHttp for downstream calls. SpringDoc OpenAPI UI. | +| `com.etendorx.das` | `das` | Yes | `spring-data-jpa`, `das_core`, `entities` (codegen), `utils.auth`, PostgreSQL, Oracle JDBC, gRPC (optional) | Dynamically includes any module with `includeInDasDependencies = true`. JSqlParser for SQL query manipulation. | +| `com.etendorx.edge` | `edge` | Yes | `spring-cloud-starter-gateway`, `utils.auth` | Pure routing. WebFlux-based (reactive). No blocking I/O. | +| `com.etendorx.asyncprocess` | `asyncprocess` | Yes | `spring-cloud-stream-kafka`, `reactor-kafka`, `lib.kafka`, `lib.asyncprocess`, `utils.auth` | Handles async workflow tracking via Kafka topics. | +| `com.etendorx.webflux` | — | No | `spring-boot-starter-webflux`, `thymeleaf`, `spring-data-jpa`, PostgreSQL | Reactive base module. Not standalone. Experimental. | + +### 4.3 modules/ — Custom Integration Modules (5 deployable modules across 3 integration packages) + +These are business-logic modules specific to Etendo integrations. They depend on `libs/` and optionally on `modules_core/`. + +#### com.etendorx.integration.obconnector (OBConnector — 5 submodules) + +| Submodule | Artifact | Role | +|---|---|---| +| `com.etendorx.integration.obconn.common` | — (library) | Workflow contract interfaces only: `SyncWorkflow`, `SyncActivities`, `SyncOperation`, `SyncConverters`, `SyncPreLogic`, `SyncProcessData`. No implementations. Jackson annotations for model POJOs. | +| `com.etendorx.integration.obconn.lib` | — (library) | Core sync engine implementation. `SyncWorkflowBase` pipeline runner, Kafka integration (`DbzListener`, `KafkaChangeSend`, `KafkaProducerService`), configuration loading (`ExternalSystemConfiguration`), and all resilience patterns (dedup, DLT replay, HTTP retry, Saga, metrics, tracing). Depends on `obconn.common`, `lib.kafka`, `utils.auth`, `utils.common`. Uses OkHttp3, Micrometer, Thymeleaf, Commons JEXL3. | +| `com.etendorx.integration.obconn.server` | `obconn-srv` | Spring Boot REST server. `ConnectorApplication`. Exposes `/api/sync/*`. Auth via `X-TOKEN` header. Depends only on `obconn.common`. OkHttp3 and GSON for HTTP calls. | +| `com.etendorx.integration.obconn.worker` | `obconn-wrk` | Spring Boot Kafka consumer. `SyncWorkerMain`. Executes full sync pipeline. Depends on `obconn.common`, `obconn.lib`, `lib.kafka`, `utils.auth`. Runtime-loads `to_openbravo.worker` via `runtimeOnly`. GSON, JSONPath, OkHttp logging interceptor. | +| `com.etendorx.integration.obconn.loadtest` | — | Load testing module. Not deployed in production. | + +#### com.etendorx.integration.to_openbravo (Openbravo POS adapter — 2 submodules) + +| Submodule | Artifact | Role | +|---|---|---| +| `com.etendorx.integration.to_openbravo.mapping` | `mapping` | ~30 Spring `@Component` DTO mapper beans implementing `DTOReadMapping` / `DTOWriteMapping`. Loaded by DAS at runtime (`includeInDasDependencies = true`). Depends on `entities`, `das_core`, `utils.auth`. | +| `com.etendorx.integration.to_openbravo.worker` | `worker` | SPI adapters implementing `ExternalRequestMethodAdapter` and `ExternalRequestProcessResponseAdapter`. Openbravo-specific HTTP method binding (always POST) and response parsing (extracts entity ID from Openbravo JSON). Loaded by OBConnector Worker at runtime. | + +#### com.etendorx.auth.client (Auth Client — 1 module) + +| Module | Role | +|---|---| +| `com.etendorx.auth.client` | Client-side auth helper. No deployable application. Code-generated client stubs in `src-gen/`. | + +### 4.4 modules_gen/ — Code-Generated Modules (4 modules) + +These modules contain Java source code generated by `libs/com.etendorx.generate_entities` from the live Etendo PostgreSQL schema. They are regenerated when the schema changes. Do not edit manually. + +| Module | Group | Purpose | +|---|---|---| +| `com.etendorx.entities` | `com.etendorx.entities` | Generated JPA entity classes, Spring Data JPA repositories, REST projections, and field mappings. Source directories: `src/main/entities`, `src/main/jparepo`, `src/main/projections`, `src/main/mappings`. Consumed by DAS as a `codegen` classpath dependency. | +| `com.etendorx.entitiesModel` | `com.etendorx.entitiesModel` | Generated HATEOAS model classes (DTOs) for REST responses. Depends on `clientrest_core`. | +| `com.etendorx.clientrest` | `com.etendorx.entitiesModel` | Generated Feign REST client interfaces for each entity. Depends on `clientrest_core`. Used by Auth and other services that need to call DAS programmatically. | +| `com.etendorx.grpc.common` | — | Generated gRPC Protobuf stubs. Built with `protobuf-gradle-plugin`. Used by DAS when `grpc.enabled=true`. | + +### 4.5 modules_test/ — Test Modules (2 modules) + +Standalone test applications for integration testing specific infrastructure components. Not deployed in production. + +| Module | Purpose | +|---|---| +| `com.etendorx.test.eventhandler` | Integration tests for Kafka event handler flows. | +| `com.etendorx.test.grpc` | Integration tests for the gRPC transport layer in DAS. | + +--- + +## 5. Dependency Graph + +The dependency flow is strictly layered: generated modules and libs feed upward into core services and integration modules. No circular dependencies exist between layers. + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ modules_gen/ │ +│ │ +│ ┌──────────────┐ ┌─────────────────┐ ┌──────────────┐ ┌─────────────┐ │ +│ │ entities │ │ entitiesModel │ │ clientrest │ │ grpc.common │ │ +│ └──────┬───────┘ └────────┬────────┘ └──────┬───────┘ └──────┬──────┘ │ +│ │ │ │ │ │ +└─────────┼───────────────────┼───────────────────┼─────────────────┼────────┘ + │ │ │ │ + │ ┌────────────────┼───────────────────┼─────────────────┘ + │ │ │ │ +┌─────────┼──┼────────────────┼───────────────────┼───────────────────────────┐ +│ │ │ │ │ libs/ │ +│ │ │ ┌─────────────┘ ┌────────┘ │ +│ │ │ │ │ │ +│ ┌──────▼──▼──▼──┐ ┌───────────┐ ┌──▼─────────────┐ ┌───────────────┐ │ +│ │ das_core │ │ lib.kafka│ │ clientrest_core│ │ utils.auth │ │ +│ └──────┬────────┘ └─────┬─────┘ └────────────────┘ └───────┬───────┘ │ +│ │ │ │ │ +│ ┌──────▼──────────────────▼─────────┐ ┌──────────────────────┘ │ +│ │ generate_entities.* │ │ utils.common │ +│ │ (code gen tool only) │ └─────────────────────────────────┘│ +│ └───────────────────────────────────┘ │ +│ │ +│ ┌────────────────────────────────┐ │ +│ │ lib.asyncprocess │ │ +│ │ (depends on: lib.kafka) │ │ +│ └────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ │ │ │ │ + v v v v v +┌─────────────────────────────────────────────────────────────────────────────┐ +│ modules_core/ │ +│ │ +│ ┌─────────────┐ ┌──────────────┐ ┌─────────────────────────────────┐ │ +│ │ configserver│ │ auth │ │ das │ │ +│ │ │ │ │ │ │ │ +│ │ (no lib │ │ utils.auth │ │ das_core + entities (codegen) │ │ +│ │ deps) │ │ clientrest │ │ utils.auth + grpc (optional) │ │ +│ │ │ │ clientrest_ │ │ to_openbravo.mapping (runtime) │ │ +│ │ │ │ core │ │ │ │ +│ └─────────────┘ └──────────────┘ └─────────────────────────────────┘ │ +│ │ +│ ┌─────────────────────────────┐ ┌─────────────────────────────────────┐ │ +│ │ edge │ │ asyncprocess │ │ +│ │ │ │ │ │ +│ │ utils.auth │ │ lib.kafka + lib.asyncprocess │ │ +│ │ spring-cloud-gateway │ │ utils.auth │ │ +│ └─────────────────────────────┘ └─────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ │ + v v +┌─────────────────────────────────────────────────────────────────────────────┐ +│ modules/ │ +│ │ +│ ┌──────────────────────────────────────────────────────────────────────┐ │ +│ │ com.etendorx.integration.obconnector │ │ +│ │ │ │ +│ │ obconn.common (interfaces only) │ │ +│ │ │ │ │ +│ │ ├── obconn.lib (engine: depends on common, lib.kafka, │ │ +│ │ │ utils.auth, utils.common, Micrometer, JEXL3) │ │ +│ │ │ │ │ +│ │ ├── obconn.server (Spring Boot app: depends on common only) │ │ +│ │ │ │ │ +│ │ └── obconn.worker (Spring Boot app: depends on common, lib, │ │ +│ │ lib.kafka, utils.auth; │ │ +│ │ runtimeOnly: to_openbravo.worker) │ │ +│ └──────────────────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌──────────────────────────────────────────────────────────────────────┐ │ +│ │ com.etendorx.integration.to_openbravo │ │ +│ │ │ │ +│ │ mapping (depends on: entities, das_core, utils.auth) │ │ +│ │ worker (depends on: obconn.common; SPI for obconn.worker) │ │ +│ └──────────────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### Runtime loading via `includeInDasDependencies` + +DAS has a dynamic dependency mechanism: after project evaluation, it scans all `build.gradle` files across all module directories for the property `includeInDasDependencies = true`. Modules that set this property are automatically added as `implementation` dependencies of DAS. Currently this applies to: + +- `com.etendorx.integration.to_openbravo.mapping` +- `com.etendorx.integration.to_openbravo.worker` + +This allows custom integration mappers to be loaded by DAS without modifying the DAS `build.gradle`. + +--- + +## 6. Technology Stack + +### Core runtime + +| Component | Version | Usage | +|---|---|---| +| Java | 17 (LTS) | Platform-wide language version. Lombok 1.18.22 is incompatible with JDK 24+. | +| Spring Boot | 3.1.4 | Application framework for all deployable services. | +| Spring Cloud | 2022.0.4 | Config Server, Gateway, OpenFeign, Stream (Kafka binder). | +| Gradle | 8.3 | Build tool. Multi-project with dynamic subproject discovery. | +| Spring Data JPA | (Spring Boot BOM) | ORM layer in DAS and entities module. | +| Spring Security / OAuth2 | (Spring Boot BOM) | Security in Auth service. | +| Spring Cloud Gateway | (Spring Cloud BOM) | Reactive API gateway in Edge. | +| Spring Cloud Stream | (Spring Cloud BOM) | Kafka consumer/producer abstraction in AsyncProcess and OBConnector lib. | + +### Messaging and CDC + +| Component | Version | Usage | +|---|---|---| +| Apache Kafka | 3.6.0 (Streams) | Message broker for all async workflows. | +| Redpanda | Compatible | Kafka-compatible broker used in local dev (see `infraestructure/docker-compose.yml`). | +| Reactor Kafka | (Spring Cloud BOM) | Reactive Kafka consumer in AsyncProcess. | +| Debezium Kafka Connect | (Docker Compose) | CDC connector capturing PostgreSQL WAL events. Port 8083. | +| Kafka UI | (Docker Compose) | Management UI for local dev. Port 9093 (UI), 8002 (Kafka Connect UI). | + +### Database + +| Component | Version | Usage | +|---|---|---| +| PostgreSQL | (runtime) | Primary Etendo ERP database. Port 5432 (prod), 5465 (local Docker). | +| PostgreSQL JDBC | 42.3.8 / 42.6.0 | JDBC driver in DAS and das_core. | +| Oracle JDBC (ojdbc8) | 21.6.0.0.1 | Optional Oracle DB driver available in DAS. | +| H2 | 1.4.200 | In-memory DB for DAS unit tests only. | + +### HTTP and serialization + +| Component | Version | Usage | +|---|---|---| +| OkHttp3 | 4.10.0 | HTTP client for all inter-service calls from OBConnector server and worker. Connection pool: 20 idle connections, 5-minute keepalive. 30s connect/read/write timeouts. | +| OkHttp Logging Interceptor | (Spring Cloud BOM) | HTTP request/response logging in OBConnector Worker. | +| Jackson | 2.13.x / 2.14.x / 2.17.x | JSON serialization across all modules. Singleton `ObjectMapper` pattern enforced. | +| GSON | 2.8.9 | JSON parsing in OBConnector server and worker (alongside Jackson). | +| Jettison | 1.5.4 | JSON/XML bridge used in Auth and generate_entities.core. | + +### Authentication and JWT + +| Component | Version | Usage | +|---|---|---| +| JJWT (io.jsonwebtoken) | 0.9.1 (legacy) + 0.11.2 / 0.12.2 | JWT creation and parsing. Multiple versions due to legacy usage in Auth service. | +| Nimbus JOSE JWT | 9.47 | EC key handling in `utils.auth`. | +| Auth0 java-jwt | 3.1.0 | SWS compatibility in `utils.auth`. | +| Spring Security OAuth2 Jose | (Spring Boot BOM) | OAuth2 JWT support in Auth service. | + +### Observability and resilience + +| Component | Version | Usage | +|---|---|---| +| Micrometer | 1.11.5 | Metrics collection in OBConnector lib (`SyncMetricsService`). Counters for messages received/processed/errors/deduplicated/DLT; timers for workflow and HTTP duration. | +| Spring Boot Actuator | (Spring Boot BOM) | Health endpoints (`/actuator/health`) in all services. `SyncHealthIndicator` in OBConnector lib reports DOWN after 10+ consecutive errors or 5 minutes without a successful sync. | +| SLF4J MDC | (transitive) | Distributed tracing via MDC fields: `runId`, `workflow`, `entity`, `entityId`. Set in `DbzListener` and workflow runners, cleared in `finally` blocks. | +| Jaeger | (Docker Compose) | Distributed tracing UI for local dev. Port 16686. | +| JaCoCo | 0.8.10 | Code coverage reports. Aggregated at root project via `jacocoRootReport` task. | + +### Code generation and templating + +| Component | Version | Usage | +|---|---|---| +| FreeMarker | 2.3.31 | Template engine for entity code generation in `generate_entities`. | +| Thymeleaf | (Spring Boot BOM) | Template engine used in OBConnector lib for workflow configuration rendering. | +| Apache Commons JEXL3 | 3.3 | Expression language for runtime field mapping evaluation in OBConnector lib (`ExternalSystemConfiguration`). | +| Hibernate Core | 5.4.2 | Schema introspection in `generate_entities`. Not used for runtime ORM (that is Spring Data JPA). | +| Google Protobuf | 3.19.4 | gRPC message serialization. Used in DAS gRPC transport and `grpc.common` module. | +| protobuf-gradle-plugin | 0.8.18 | Generates Java classes from `.proto` files for `grpc.common`. | + +### Spring ecosystem extras + +| Component | Version | Usage | +|---|---|---| +| SpringDoc OpenAPI (webmvc) | 2.2.0 | Swagger UI generation in Auth and DAS. | +| SpringDoc OpenAPI (UI, non-starter) | 1.7.0 | Swagger UI in AsyncProcess (older variant). | +| Spring HATEOAS | 1.4.0 / 2.1.2 | Hypermedia links in REST responses (clientrest_core, Auth). | +| Spring Cloud OpenFeign | 4.0.4 | Declarative HTTP clients in Auth and clientrest_core. | +| Feign OkHttp / Feign Jackson | 12.5 | OkHttp transport and Jackson encoder/decoder for Feign clients. | +| JSqlParser | 5.1 | SQL AST parsing used in DAS for dynamic query manipulation. | +| JSONPath (Jayway) | 2.8.0 | JSONPath expression evaluation in das_core, entities, OBConnector worker. | +| json-smart | 2.5.x | JSONPath dependency. Used alongside Jayway JSONPath. | +| Apache Commons Lang3 | 3.12.0 / 3.13.0 | String utilities across multiple modules. | +| Lombok | 1.18.22 / 1.18.30 | Boilerplate reduction (`@Slf4j`, `@Data`, `@Builder`, etc.). Java 17 required. | + +--- + +## 7. Key Architectural Patterns + +### 7.1 Sync workflow pipeline + +The OBConnector uses a fixed six-step sequential pipeline defined in `SyncWorkflowBase`: + +``` +MAP → PRE_LOGIC → SYNC → POST_LOGIC → PROCESS_DATA → POST_ACTION +``` + +Each step is dispatched via `SyncActivities`. Two concrete workflow implementations exist, distinguished by Spring `@Qualifier`: + +- `"receive.workflow"` / `"receive.activity"` — Handles external system → Etendo direction. +- `"send.workflow"` / `"send.activity"` — Handles Etendo → external system direction (triggered by Debezium CDC). + +Entity-specific logic is registered via `SyncOperation.appliesTo(entityName)`. The framework routes by entity name, making it straightforward to add new entity handlers without modifying the pipeline. + +### 7.2 Resilience patterns in OBConnector lib + +| Pattern | Class | Behavior | +|---|---|---| +| Message deduplication | `MessageDeduplicationService` | In-memory `ConcurrentHashMap`. Composite key: `entity\|id\|verb\|SHA-256(data)`. Configurable TTL via `dedup.ttl.seconds` (default 300s). | +| Dead Letter Topic replay | `DltReplayService` | Failed messages stored in `ConcurrentLinkedQueue`. `replayAll()` re-publishes to original topics. Max queue via `dlt.max.queue.size` (default 1000). | +| HTTP retry with backoff | `HttpRetryHelper` | 3 retries, 1s initial delay, 2.0x multiplier. Retries on `IOException`. Wraps into `SyncException` after exhaustion. | +| Saga compensation | `SagaManager` | Tracks compensation actions per `runId`. On failure, executes compensations in reverse registration order. | +| Idempotent workflow | `SyncWorkflowBase` | Tracks `lastCompletedStep` on `SynchronizationEntity`. Skips already-completed steps on retry. | +| Kafka offset management | `StreamConfiguration` | `AckMode.RECORD` with auto-commit disabled. At-least-once delivery guarantee. | + +### 7.3 Config server bootstrap + +All services declare `spring.cloud.config.uri` pointing to the Config Server (default `http://localhost:8888`). Config is fetched before the application context starts. Configuration files are stored at `/rxconfig` on the Config Server host (e.g., `worker.yaml`, `obconnector.yaml`, `das.yaml`). + +### 7.4 Dynamic DAS dependency loading + +DAS scans all `build.gradle` files in the multi-project for the `includeInDasDependencies = true` property at configuration time. Matching modules are added to DAS's `implementation` classpath automatically. This enables shipping new entity mappers as separate Gradle subprojects without touching the DAS build file. + +### 7.5 Entity code generation pipeline + +``` +Etendo PostgreSQL schema + │ + │ (run generate_entities CLI tool) + v + FreeMarker templates + │ + v + modules_gen/ + ├── entities/ (JPA entities, repositories, projections, mappings) + ├── entitiesModel/ (HATEOAS DTOs) + └── clientrest/ (Feign client interfaces) + │ + │ (consumed by) + v + modules_core/das/ (loads entities as codegen classpath dependency) + modules_core/auth/ (uses clientrest for DAS calls) +``` + +Generated source is committed to version control. Regeneration is triggered manually when the Etendo DB schema changes by running `./gradlew :com.etendorx.generate_entities:bootRun`. diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 00000000..6f58743c --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,723 @@ +# EtendoRX Configuration Reference + +This document is the authoritative reference for all configuration files, properties, and mechanisms in EtendoRX. It covers the full configuration hierarchy, the template system, every YAML file in `rxconfig/`, key properties, local development overrides, and environment variables. + +--- + +## Table of Contents + +1. [Configuration Hierarchy](#1-configuration-hierarchy) +2. [Template System](#2-template-system) +3. [File-by-File Reference](#3-file-by-file-reference) + - [application.yaml](#applicationyaml) + - [das.yaml](#dasyaml) + - [worker.yaml](#workeryaml) + - [obconnector.yaml](#obconnectoryaml) + - [obconnsrv.yaml](#obconnsrvyaml) + - [auth.yaml](#authyaml) + - [edge.yaml](#edgeyaml) + - [asyncprocess.yaml](#asyncprocessyaml) +4. [Key Properties Table](#4-key-properties-table) +5. [Local Development Overrides](#5-local-development-overrides) +6. [Environment Variables](#6-environment-variables) + +--- + +## 1. Configuration Hierarchy + +EtendoRX uses a layered Spring configuration model. Properties are resolved in the following order (highest precedence first): + +``` +application-local.properties (active when spring.profiles.active=local) + | + v +rxconfig/.yaml (served by Spring Cloud Config Server on :8888) + | + v +rxconfig/application.yaml (global defaults, served to all services) + | + v +application.properties (embedded in each service JAR — bootstrap only) +``` + +### 1.1 Spring Cloud Config Server (port 8888) + +The Config Server (`com.etendorx.configserver`) serves YAML files from the `rxconfig/` directory at the project root. All client services bootstrap with the following in their embedded `application.properties`: + +```properties +config.server.url=http://localhost:8888 +spring.config.import=optional:configserver:${config.server.url} +spring.application.name= +``` + +The `spring.application.name` value determines which YAML file the service fetches: + +| Service module | `spring.application.name` | Config file fetched | +|---|---|---| +| `com.etendorx.das` | `das` | `rxconfig/das.yaml` | +| `com.etendorx.auth` | `auth` | `rxconfig/auth.yaml` | +| `com.etendorx.edge` | `edge` | `rxconfig/edge.yaml` | +| `com.etendorx.asyncprocess` | `asyncprocess` | `rxconfig/asyncprocess.yaml` | +| `com.etendorx.integration.obconn.worker` | `worker` | `rxconfig/worker.yaml` | +| `com.etendorx.integration.obconn.server` | `obconnsrv` | `rxconfig/obconnsrv.yaml` | + +All services additionally receive `rxconfig/application.yaml` as shared global defaults. + +The `optional:` prefix means startup does not fail if the Config Server is unreachable. This is what enables local-profile mode (see section 1.3). + +### 1.2 Service Startup Order + +When running with the Config Server, services must start in the following order: + +1. Config Server (`:8888`) — must be healthy before any other service starts +2. Auth (`:8094`), DAS (`:8092`), Edge (`:8096`) — in parallel +3. OBConnector Server (`:8101`), OBConnector Worker (`:8102`) + +`make up` enforces this order and waits on `/actuator/health` at each step. + +### 1.3 Local Profile Mode + +When `SPRING_PROFILES_ACTIVE=local` is set (passed via `-Dspring.profiles.active=local` in `BOOTRUN_ARGS` in the Makefile), each service loads its `application-local.properties` file from within its own module resources. These files contain self-contained configuration that does not require a running Config Server. + +`make up-local` uses this mode. It starts infrastructure (Redpanda), generates entities, builds and starts DAS, OBConnector Server, OBConnector Worker, and Async Process — all with local profiles, bypassing the Config Server, Auth, and Edge services entirely. + +--- + +## 2. Template System + +### 2.1 Overview + +All files in `rxconfig/` ending in `.yaml.template` are the version-controlled source of truth. The corresponding `.yaml` files (without the `.template` suffix) are **gitignored** (see `.gitignore` line: `/rxconfig/*.yaml`) and must be generated locally before running services. + +### 2.2 The `make config` Command + +Running `make config` performs the following steps: + +1. For each `rxconfig/*.yaml.template`, if the corresponding `.yaml` does not already exist, it copies the template to create the `.yaml`. Existing files are skipped to preserve manual edits. +2. After copying, it injects database connection values from `gradle.properties` into `rxconfig/das.yaml` using `sed`: + - `url:` — set to `bbdd.url/bbdd.sid` (e.g., `jdbc:postgresql://localhost:5432/etendo`) + - `username:` — set to `bbdd.user` + - `password:` — set to `bbdd.password` + +```bash +make config +``` + +This is automatically called as a dependency of `make up`, `make up-local`, and `make up-kafka`. + +### 2.3 Gradle Properties as the Source of Truth for Database Config + +The following properties in `gradle.properties` are the canonical source for database configuration injected into `das.yaml`: + +```properties +bbdd.rdbms=POSTGRE +bbdd.driver=org.postgresql.Driver +bbdd.url=jdbc:postgresql://localhost\:5432 +bbdd.sid=etendo +bbdd.systemUser=postgres +bbdd.systemPassword=syspass +bbdd.user=tad +bbdd.password=tad +bbdd.sessionConfig=select update_dateFormat('DD-MM-YYYY') +``` + +Note that `bbdd.url` uses a backslash-escaped colon (`\:`) in `gradle.properties` to prevent Gradle from misinterpreting it; the Makefile strips the backslash when injecting the value into YAML. + +### 2.4 What Templates Do Not Replace + +Most `.yaml.template` files are identical to the target `.yaml` — they act as safe defaults. The only template with active substitution logic is `das.yaml` (DB credentials via `make config`). All other service-specific secrets (JWT tokens, private keys, connector instance UUIDs) must be edited directly in the generated `.yaml` files after `make config` creates them. + +--- + +## 3. File-by-File Reference + +### `application.yaml` + +**Path:** `rxconfig/application.yaml` +**Served to:** All services (global defaults) + +This file provides shared configuration inherited by every service that connects to the Config Server. + +```yaml +classic: + url: http://localhost:8080/etendo + +das: + url: http://localhost:8092 + grpc: + ip: localhost + port: 9090 + +management: + endpoints: + web: + exposure: + include: '*' + +spring: + output: + ansi: + enabled: ALWAYS + +public-key: > + -----BEGIN PUBLIC KEY----- + MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEo0SYGXhAXy35V802Hkhbj0pcARpV + Slw2Nfm2liTNi9BPqNzS8i1hWNao37nUVGPB9wJEqDWNypn0+u4e1nuehQ== + -----END PUBLIC KEY----- +``` + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `classic.url` | `http://localhost:8080/etendo` | Base URL of the Etendo Classic (Openbravo) backend | +| `das.url` | `http://localhost:8092` | Base URL of the Data Access Service | +| `das.grpc.ip` | `localhost` | gRPC host for DAS | +| `das.grpc.port` | `9090` | gRPC port for DAS | +| `management.endpoints.web.exposure.include` | `'*'` | Actuator endpoints exposed. Restrict to `health,metrics` in production | +| `spring.output.ansi.enabled` | `ALWAYS` | ANSI color codes in console output | +| `public-key` | (EC public key, PEM) | ES256 public key used by services to verify JWT tokens issued by Auth | + +**Notes:** + +- `management.endpoints.web.exposure.include: '*'` exposes all Spring Boot Actuator endpoints (health, metrics, env, beans, etc.). For production deployments, restrict this to only the required endpoints. +- `public-key` is the EC (P-256) public key corresponding to the private key in `auth.yaml`. All services use this to verify JWT tokens without calling Auth on every request. + +--- + +### `das.yaml` + +**Path:** `rxconfig/das.yaml` +**Application name:** `das` +**Port:** `8092` + +The Data Access Service (DAS) configuration. DAS is the central data layer that exposes Etendo database entities as REST endpoints. Its database credentials are injected by `make config` from `gradle.properties`. + +```yaml +server: + port: 8092 + +spring: + datasource: + url: jdbc:postgresql://localhost:5432/etendo + username: tad + password: tad + jackson: + serialization: + FAIL_ON_EMPTY_BEANS: false + +scan: + basePackage: + +post-upsert: true +``` + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `server.port` | `8092` | HTTP port DAS listens on | +| `spring.datasource.url` | `jdbc:postgresql://localhost:5432/etendo` | JDBC URL for the Etendo PostgreSQL database. Injected from `bbdd.url/bbdd.sid` | +| `spring.datasource.username` | `tad` | Database user. Injected from `bbdd.user` | +| `spring.datasource.password` | `tad` | Database password. Injected from `bbdd.password` | +| `spring.jackson.serialization.FAIL_ON_EMPTY_BEANS` | `false` | Prevents Jackson serialization errors on entities with no serializable fields | +| `scan.basePackage` | (empty) | Base package for entity scanning. Set by code generation to `com.etendorx.integration.to_openbravo.mapping` when entities are generated | +| `post-upsert` | `true` | When `true`, REST API `POST` requests operate as upsert (insert or update). When `false`, `POST` only inserts | + +**Notes:** + +- The datasource is a direct JDBC connection to the Etendo PostgreSQL database. DAS is the only service with direct DB access; all other services access data through DAS REST or gRPC APIs. +- DAS also starts a Hibernate `CustomInterceptor` (`spring.jpa.properties.hibernate.session_factory.statement_inspector`) configured in `application.properties`, not in `das.yaml`. + +--- + +### `worker.yaml` + +**Path:** `rxconfig/worker.yaml` +**Application name:** `worker` +**Port:** `0` (random, assigned at startup) in the generated yaml; `8102` in the template + +The OBConnector Worker configuration. The worker is the Kafka consumer that executes the sync pipeline (receive and send workflows). + +```yaml +server: + port: 0 + +openbravo: + url: + +classic: + token: + +dashboard: + enabled: true +``` + +Note: The `.yaml.template` version sets `server.port: 8102` and comments out `dashboard`. The generated `worker.yaml` (after `make config`) sets `server.port: 0` by default. In practice, the local profile (`:8102`) takes precedence when running via `make up-local`. + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `server.port` | `0` (template: `8102`) | HTTP port. `0` means a random port is assigned; use local profile to fix it at `8102` | +| `openbravo.url` | (empty) | URL of the Etendo Classic instance. Must be set to the same value as `classic.url` in `application.yaml` | +| `classic.token` | (empty) | JWT token used by the worker for REST calls to Etendo Classic. Must be a valid ES256 token | +| `dashboard.enabled` | `true` | When `true`, enables the worker dashboard UI at `/dashboard`. Set to `false` to disable | + +**Notes:** + +- Kafka bootstrap servers are not explicitly set in `worker.yaml`; they are set via `spring.kafka.bootstrap-servers` in `application-local.properties` for local development, or via the Kafka cluster configuration in production. +- The `classic.token` must correspond to a valid user/role in the Etendo database and is used to authenticate calls to the `/sws/` endpoints via Bearer authorization. + +--- + +### `obconnector.yaml` + +**Path:** `rxconfig/obconnector.yaml` +**Application name:** not directly used as a Spring app name; properties loaded by the worker + +This file carries OBConnector-specific configuration that the worker loads in addition to `worker.yaml`. It defines the connector identity, the token for the async API, and the Kafka consumer group. + +```yaml +token: +connector: + instance: + user: + +async-api-url: http://localhost:8099 + +spring: + kafka: + consumer: + +openbravo: + token: + url: +``` + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `token` | (empty) | JWT token used by the connector for internal EtendoRX service calls | +| `connector.instance` | (empty) | UUID identifying this connector instance in the Etendo database (`etrx_rx_services_access` table). Must match the registered connector record | +| `connector.user` | (empty) | User ID associated with the connector for reactivity operations | +| `async-api-url` | `http://localhost:8099` | Base URL of the Async Process service | +| `spring.kafka.consumer` | (empty block) | Kafka consumer configuration. Consumer group ID and other Kafka consumer settings go here | +| `openbravo.token` | (empty) | Alternative token field for Etendo Classic calls (may overlap with `classic.token` in worker.yaml depending on usage path) | +| `openbravo.url` | (empty) | URL of the Etendo Classic instance used for obconnector-specific calls | + +**Notes:** + +- `connector.instance` is the most critical deployment-specific value. It must be the UUID of the `ETRX_RX_SERVICES_ACCESS` record that grants this connector access to the Etendo instance. An incorrect value will cause silent authorization failures. +- In local development, `connector.instance` is set in `application-local.properties` of the worker module (see Section 5). + +--- + +### `obconnsrv.yaml` + +**Path:** `rxconfig/obconnsrv.yaml` +**Application name:** `obconnsrv` +**Port:** `8101` + +The OBConnector Server configuration. The server is a lightweight REST API that receives sync requests and enqueues them for the worker. + +```yaml +server: + port: 8101 +token: +``` + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `server.port` | `8101` | HTTP port the OBConnector Server listens on | +| `token` | (empty) | JWT token used by the server for internal service-to-service authentication | + +**Notes:** + +- The server is intentionally minimal. It delegates all heavy processing to the worker via Kafka. +- REST endpoints are exposed at `/api/sync/` and require an `X-TOKEN` header for authentication. +- Auth validation can be disabled for local development via `auth.disabled=true` in `application-local.properties`. + +--- + +### `auth.yaml` + +**Path:** `rxconfig/auth.yaml` +**Application name:** `auth` +**Port:** `8094` + +The Auth service configuration. Auth is responsible for issuing and validating JWT tokens using EC (Elliptic Curve, P-256) key pairs. It supports OAuth2 client configuration and exposes a `/api/authenticate` login endpoint. + +```yaml +server: + port: 8094 + +token: + +admin.token: + +management: + endpoint: + restart: + enabled: true + +spring: + security: + oauth2: + client: + registration: + google: + client-id: placeholder + client-secret: placeholder + +private-key: > + -----BEGIN PRIVATE KEY----- + MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgM4dc5BquzL28t/+9 + BEZfjsQdFzdePiCwcNxYQbdi4BGhRANCAATDE+yXNQM6OCJP3ENNckQc2YOyW2FM + zmFsXfNSMMOppqYczKzri9q9QuU/k+9WzMAlSNQXj4AdX5k8F8bjp9We + -----END PRIVATE KEY----- + +logging: + level: + root: INFO +``` + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `server.port` | `8094` | HTTP port Auth listens on | +| `token` | (ES256 JWT) | JWT token used by Auth for calls to Etendo Classic (`/sws/` endpoints). The token encodes user, client, role, organization, and warehouse | +| `admin.token` | (ES256 JWT) | JWT token with elevated privileges (different organization/warehouse context) | +| `management.endpoint.restart.enabled` | `true` | Enables the `/actuator/restart` endpoint for hot-reloading Auth without a process restart | +| `spring.security.oauth2.client.registration.google.client-id` | `placeholder` | Google OAuth2 client ID for social login. Replace with a real credential if Google login is needed | +| `spring.security.oauth2.client.registration.google.client-secret` | `placeholder` | Google OAuth2 client secret | +| `private-key` | (EC private key, PKCS8 PEM) | ES256 private key used to sign JWT tokens. Must correspond to `public-key` in `application.yaml` | +| `logging.level.root` | `INFO` | Root log level for the Auth service | + +**Notes:** + +- The `private-key` / `public-key` pair is an EC P-256 (secp256r1) key pair. The private key is PKCS8-encoded PEM stored in `auth.yaml`; the public key is a raw SubjectPublicKeyInfo PEM stored in `application.yaml`. +- Tokens (`token`, `admin.token`) are standard ES256 JWTs. Their payload claims include `iss`, `aud`, `user`, `client`, `role`, `organization`, `warehouse`, and `iat`. Do not use tokens with an expired-by-convention `iat` in production; regenerate as needed. +- The `token` value in `auth.yaml` is used by Auth itself to call back into Etendo Classic, not for incoming client authentication. + +--- + +### `edge.yaml` + +**Path:** `rxconfig/edge.yaml` +**Application name:** `edge` +**Port:** `8096` + +The Edge service is a Spring Cloud Gateway that provides a single entry point to the EtendoRX service mesh. It routes incoming HTTP requests to the appropriate backend service. + +```yaml +server: + port: 8096 + +logging: + level: + org: + springframework: + web: DEBUG + hibernate: ERROR + +etendorx: + auth: + url: http://localhost:8094 + zapier: + url: http://localhost:8091 + +spring: + cloud: + gateway: + routes: + - id: login_auth_route + uri: ${etendorx.auth.url} + predicates: + - Method=GET,POST + - Path=/login + filters: + - RewritePath=/login, /api/authenticate + + - id: root-route + uri: no://op + predicates: + - Method=GET,POST + - Path=/ + filters: + - SetStatus=200 +``` + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `server.port` | `8096` | HTTP port the Edge gateway listens on | +| `logging.level.org.springframework.web` | `DEBUG` | Log level for Spring web layer. Set to `INFO` or `WARN` in production to reduce verbosity | +| `logging.level.org.hibernate` | `ERROR` | Hibernate log level (suppress in Edge since it has no DB) | +| `etendorx.auth.url` | `http://localhost:8094` | Base URL of the Auth service. Used as the upstream URI for login route | +| `etendorx.zapier.url` | `http://localhost:8091` | Base URL of the Zapier integration service (if deployed) | +| `spring.cloud.gateway.routes[login_auth_route].uri` | `${etendorx.auth.url}` | Upstream target for `GET/POST /login` — proxied to Auth's `/api/authenticate` | +| `spring.cloud.gateway.routes[root-route].uri` | `no://op` | No-op route that returns HTTP 200 for requests to `/` (health probe compatibility) | + +**Notes:** + +- The Edge service is skipped in `make up-local` (local mode). Services are accessed directly on their individual ports. +- Additional routes for DAS, Auth API, and custom microservices can be added under `spring.cloud.gateway.routes` in this file. +- The `DEBUG` log level for the web layer generates significant output; lower it in any environment with meaningful traffic. + +--- + +### `asyncprocess.yaml` + +**Path:** `rxconfig/asyncprocess.yaml` +**Application name:** `asyncprocess` +**Port:** `8099` + +The Async Process service configuration. This service manages asynchronous processing using Kafka Streams. + +```yaml +bootstrap_servers_config: localhost:9092 +application_id_config: async-process-queries +``` + +**Key properties:** + +| Property | Default | Description | +|---|---|---| +| `bootstrap_servers_config` | `localhost:9092` | Kafka broker address for Kafka Streams. Note: in local development this is overridden to `localhost:29092` (Redpanda) via `application-local.properties` | +| `application_id_config` | `async-process-queries` | Kafka Streams application ID. Used as the consumer group prefix and state store directory name | + +**Notes:** + +- The `asyncprocess.yaml` is minimal; most runtime configuration for this service comes from `application.yaml` (global defaults) and `application-local.properties` (local overrides for Kafka broker, state dir, etc.). +- `application_id_config` (not the standard `spring.kafka.streams.application-id`) is a custom property read directly by the service's Kafka Streams configuration bean. + +--- + +## 4. Key Properties Table + +The following table consolidates all significant properties across the configuration files. + +| Property | Default Value | Description | Used By | +|---|---|---|---| +| `classic.url` | `http://localhost:8080/etendo` | Etendo Classic base URL | All services (via `application.yaml`) | +| `das.url` | `http://localhost:8092` | DAS REST base URL | All services (via `application.yaml`) | +| `das.grpc.ip` | `localhost` | DAS gRPC host | Services using gRPC | +| `das.grpc.port` | `9090` | DAS gRPC port | Services using gRPC | +| `public-key` | EC PEM | ES256 public key for JWT verification | All services | +| `management.endpoints.web.exposure.include` | `'*'` | Actuator endpoint exposure | All services | +| `server.port` (das) | `8092` | DAS HTTP port | DAS | +| `spring.datasource.url` | `jdbc:postgresql://localhost:5432/etendo` | DAS database JDBC URL | DAS | +| `spring.datasource.username` | `tad` | DAS database username | DAS | +| `spring.datasource.password` | `tad` | DAS database password | DAS | +| `post-upsert` | `true` | Enable POST-as-upsert in DAS REST API | DAS | +| `scan.basePackage` | (empty) | Entity scan package for DAS | DAS | +| `server.port` (worker) | `0` / `8102` | Worker HTTP port | Worker | +| `openbravo.url` | (empty) | Etendo Classic URL for worker calls | Worker | +| `classic.token` | (empty) | JWT token for Etendo Classic API | Worker | +| `dashboard.enabled` | `true` | Enable worker dashboard at `/dashboard` | Worker | +| `spring.kafka.bootstrap-servers` | `localhost:29092` | Kafka broker address | Worker, Async Process | +| `connector.instance` | (empty) | UUID of the connector's `ETRX_RX_SERVICES_ACCESS` record | Worker | +| `connector.user` | (empty) | User ID for reactivity operations | Worker | +| `async-api-url` | `http://localhost:8099` | Async Process service URL | Worker, OBConn Server | +| `openbravo.token` | (empty) | JWT token for Etendo Classic API (obconnector path) | Worker | +| `server.port` (obconnsrv) | `8101` | OBConnector Server HTTP port | OBConn Server | +| `token` (obconnsrv) | (empty) | JWT for internal auth | OBConn Server | +| `server.port` (auth) | `8094` | Auth service HTTP port | Auth | +| `token` (auth) | ES256 JWT | Token used by Auth to call Etendo Classic | Auth | +| `admin.token` | ES256 JWT | Elevated privilege token | Auth | +| `private-key` | EC PKCS8 PEM | ES256 private key for signing JWTs | Auth | +| `management.endpoint.restart.enabled` | `true` | Enable actuator restart endpoint | Auth | +| `spring.security.oauth2.client.registration.google.client-id` | `placeholder` | Google OAuth2 client ID | Auth | +| `spring.security.oauth2.client.registration.google.client-secret` | `placeholder` | Google OAuth2 client secret | Auth | +| `logging.level.root` (auth) | `INFO` | Auth root log level | Auth | +| `server.port` (edge) | `8096` | Edge gateway HTTP port | Edge | +| `etendorx.auth.url` | `http://localhost:8094` | Auth service URL for gateway routing | Edge | +| `etendorx.zapier.url` | `http://localhost:8091` | Zapier integration service URL | Edge | +| `logging.level.org.springframework.web` | `DEBUG` | Spring web log level in Edge | Edge | +| `bootstrap_servers_config` | `localhost:9092` | Kafka Streams broker (asyncprocess) | Async Process | +| `application_id_config` | `async-process-queries` | Kafka Streams application ID | Async Process | +| `auth.disabled` | (not set) | When `true`, bypasses JWT validation. Development only | Worker, OBConn Server, DAS | +| `dedup.ttl.seconds` | `300` | Message deduplication TTL in seconds | Worker (lib) | +| `dlt.max.queue.size` | `1000` | Max DLT replay queue size | Worker (lib) | + +--- + +## 5. Local Development Overrides + +When `spring.profiles.active=local` is active, each service loads its module-local `application-local.properties`. These files take precedence over Config Server-supplied YAML and allow running without a Config Server. + +### 5.1 Worker: `application-local.properties` + +**Path:** `modules/com.etendorx.integration.obconnector/com.etendorx.integration.obconn.worker/src/main/resources/application-local.properties` + +```properties +server.port=8102 + +# Shared config (normally from application.yaml via Config Server) +classic.url=http://localhost:8080/etendo +das.url=http://localhost:8092 +public-key=MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEwxPslzUDOjgiT9xDTXJEHNmDslthTM5hbF3zUjDDqaamHMys64vavULlP5PvVszAJUjUF4+AHV+ZPBfG46fVng== + +# Worker-specific (normally from worker.yaml via Config Server) +openbravo.url=http://localhost:8080/etendo +classic.token= +token= +connector.instance=AC5535A2F4284094AA72B875769D0B0E +connector.user= + +# Kafka +spring.kafka.bootstrap-servers=localhost:29092 + +# Dashboard +dashboard.enabled=true + +# Disable auth for local development +auth.disabled=true + +# Expose actuator metrics +management.endpoints.web.exposure.include=health,metrics +``` + +**Properties explained:** + +| Property | Value | Notes | +|---|---|---| +| `server.port` | `8102` | Fixes the worker port (overrides `0` in `worker.yaml`) | +| `classic.url` | `http://localhost:8080/etendo` | Replaces `application.yaml` global default | +| `das.url` | `http://localhost:8092` | DAS URL, same as global default | +| `public-key` | (base64 EC key) | Inline DER-encoded public key (not PEM-wrapped). Different encoding from the PEM form in `application.yaml` | +| `openbravo.url` | `http://localhost:8080/etendo` | Etendo Classic URL for worker REST calls | +| `classic.token` | (empty — must be set) | JWT token for Etendo Classic. Must be filled in for sync operations to work | +| `token` | (ES256 JWT) | Token for internal EtendoRX service calls | +| `connector.instance` | `AC5535A2F4284094AA72B875769D0B0E` | Local dev connector instance UUID | +| `connector.user` | (empty) | Optional; user ID for reactivity | +| `spring.kafka.bootstrap-servers` | `localhost:29092` | Redpanda broker (port `29092`, not the default Kafka `9092`) | +| `dashboard.enabled` | `true` | Enables the sync dashboard at `http://localhost:8102/dashboard` | +| `auth.disabled` | `true` | Bypasses JWT verification. **Never set in production** | +| `management.endpoints.web.exposure.include` | `health,metrics` | Exposes only health and metrics actuator endpoints (narrower than `'*'` in `application.yaml`) | + +### 5.2 OBConnector Server: `application-local.properties` + +**Path:** `modules/com.etendorx.integration.obconnector/com.etendorx.integration.obconn.server/src/main/resources/application-local.properties` + +```properties +server.port=8101 + +# Disable auth for local development +auth.disabled=true + +# Expose actuator metrics +management.endpoints.web.exposure.include=health,metrics +``` + +The server local profile is minimal because the server does not directly interact with Kafka or the database. It only disables auth and fixes the port. + +### 5.3 DAS: `application-local.properties` + +**Path:** `modules_core/com.etendorx.das/src/main/resources/application-local.properties` + +```properties +server.port=8092 +spring.datasource.url=jdbc:postgresql://localhost:5432/etendo +spring.datasource.username=tad +spring.datasource.password=tad +spring.jackson.serialization.FAIL_ON_EMPTY_BEANS=false +scan.basePackage=com.etendorx.integration.to_openbravo.mapping +post-upsert=true + +# Disable auth for local development +auth.disabled=true + +# Expose actuator metrics +management.endpoints.web.exposure.include=health,metrics +``` + +Note that `scan.basePackage` is explicitly set here to `com.etendorx.integration.to_openbravo.mapping`, which is the package generated by `./gradlew generate.entities`. This must match the actual generated code package. + +### 5.4 Async Process: `application-local.properties` + +**Path:** `modules_core/com.etendorx.asyncprocess/src/main/resources/application-local.properties` + +```properties +server.port=8099 +bootstrap_server=localhost:29092 +spring.cloud.stream.kafka.binder.brokers=localhost:29092 +kafka.streams.host.info=localhost:8099 +kafka.streams.state.dir=/tmp/kafka-streams/async-process-local +spring.config.import=optional:configserver:http://localhost:8888 +management.endpoints.web.exposure.include=health,metrics +auth.disabled=true +``` + +Note that unlike other services, the async process local profile still declares `spring.config.import` pointing at the Config Server — the `optional:` prefix means it will proceed if the server is unavailable. This is a belt-and-suspenders approach in case the Config Server is running. + +--- + +## 6. Environment Variables + +### `JAVA_HOME` + +Must point to a Java 17 JDK. The Makefile auto-detects it using `/usr/libexec/java_home -v 17` (macOS) or falls back to `~/Library/Java/JavaVirtualMachines/corretto-17.0.18/Contents/Home`. + +```bash +# Set explicitly if auto-detection fails: +export JAVA_HOME=$(/usr/libexec/java_home -v 17) +# or for Amazon Corretto: +export JAVA_HOME=~/Library/Java/JavaVirtualMachines/corretto-17.0.18/Contents/Home +``` + +**Why Java 17 is required:** Lombok 1.18.22 (used across all modules) is incompatible with JDK 21+ due to changes in the compiler API for annotation processing. All Gradle builds and `bootRun` tasks must use Java 17. + +You can verify the configured Java version with: + +```bash +make check-java +``` + +### `SPRING_PROFILES_ACTIVE` + +When set to `local`, activates the `application-local.properties` profile in each service module, bypassing the Config Server. + +```bash +export SPRING_PROFILES_ACTIVE=local +``` + +The Makefile passes this via the Gradle JVM argument flag: + +```makefile +BOOTRUN_ARGS := -Dspring.profiles.active=local +``` + +This is appended to all `bootRun` invocations (e.g., `$(GRADLE) :com.etendorx.das:bootRun $(BOOTRUN_ARGS)`). + +### Summary of Environment Variables + +| Variable | Required | Default | Description | +|---|---|---|---| +| `JAVA_HOME` | Yes | Auto-detected (macOS `/usr/libexec/java_home -v 17`) | Path to Java 17 JDK home directory | +| `SPRING_PROFILES_ACTIVE` | No | (unset) | Set to `local` to use local profile overrides instead of Config Server | + +--- + +## Appendix: Service Port Reference + +| Service | Port | Config file | Application name | +|---|---|---|---| +| Spring Cloud Config Server | `8888` | (embedded) | `configserver` | +| Auth | `8094` | `rxconfig/auth.yaml` | `auth` | +| DAS | `8092` | `rxconfig/das.yaml` | `das` | +| Edge Gateway | `8096` | `rxconfig/edge.yaml` | `edge` | +| Async Process | `8099` | `rxconfig/asyncprocess.yaml` | `asyncprocess` | +| OBConnector Server | `8101` | `rxconfig/obconnsrv.yaml` | `obconnsrv` | +| OBConnector Worker | `8102` | `rxconfig/worker.yaml` | `worker` | +| Mock Receiver (loadtest) | `8090` | (in-module profile) | — | +| Redpanda Broker | `29092` | (docker-compose) | — | +| Redpanda Console | `9093` | (docker-compose) | — | +| Kafka Connect API | `8083` | (docker-compose) | — | +| Jaeger UI | `16686` | (docker-compose) | — | diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 00000000..1da9e056 --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,497 @@ +# EtendoRX OBConnector — Getting Started + +This guide covers everything needed to clone, configure, build, and run the EtendoRX OBConnector locally. All commands assume a Unix-like shell (bash/zsh/fish) on macOS or Linux. + +--- + +## 1. Prerequisites + +### Java 17 + +Java 17 is required. **Amazon Corretto 17 is the recommended distribution.** + +> **Important:** GraalVM is NOT compatible. It causes Lombok annotation processing failures at compile time. If your `JAVA_HOME` points to a GraalVM installation, the build will fail with cryptic errors about missing generated classes. + +To verify your Java version: + +```bash +java -version +# Must output: openjdk version "17.x.x" ... +``` + +To install Amazon Corretto 17 on macOS via Homebrew: + +```bash +brew install --cask corretto17 +export JAVA_HOME=$(/usr/libexec/java_home -v 17) +``` + +The Makefile auto-detects Java 17 using `/usr/libexec/java_home -v 17` on macOS, falling back to `$HOME/Library/Java/JavaVirtualMachines/corretto-17.0.18/Contents/Home`. You can override it explicitly: + +```bash +export JAVA_HOME=/path/to/corretto-17 +``` + +### Docker and Docker Compose + +Required for running the message broker infrastructure (Redpanda or Kafka) and related services. + +```bash +docker --version # Docker 20.x or later +docker compose version # Docker Compose v2.x or later +``` + +### Git + +Required for cloning and managing submodules. + +```bash +git --version +``` + +### PostgreSQL with Etendo ERP + +The services connect to a running Etendo Classic (OpenBravo-based) PostgreSQL database. Default connection parameters: + +| Parameter | Default value | +|-----------|--------------| +| Host | localhost | +| Port | 5432 | +| Database | etendo | +| User | tad | +| Password | tad | + +The database must have Etendo ERP installed and initialized. The DAS service generates entity classes from the database schema at startup — a missing or empty database will cause the build to fail. + +You can verify connectivity at any time after configuring `gradle.properties` with: + +```bash +make check-db +``` + +### GitHub Personal Access Token + +The Gradle build pulls dependencies from the private Maven registry at `maven.pkg.github.com/etendosoftware`. You need a GitHub account with access to the Etendo organization and a Personal Access Token (PAT) with at least `read:packages` scope. + +Generate a token at: https://github.com/settings/tokens + +--- + +## 2. Clone and Setup + +```bash +git clone git@github.com:etendosoftware/etendo_rx.git +cd etendo_rx +git submodule update --init --recursive +``` + +The `--recursive` flag is required. Several core services (DAS, Auth, Edge, Config Server, Async Process) live in submodules under the project root. Skipping this step results in empty module directories and a broken build. + +--- + +## 3. Configure gradle.properties + +Copy the example file and edit it: + +```bash +cp gradle.properties.example gradle.properties +``` + +Open `gradle.properties` and set the following values: + +```properties +# --- Database (Etendo Classic) --- +bbdd.url=jdbc:postgresql://localhost:5432 +bbdd.sid=etendo +bbdd.user=tad +bbdd.password=tad + +# --- GitHub Maven registry --- +githubUser=your-github-username +githubToken=ghp_your_personal_access_token + +# --- Context name (used as Docker Compose project name) --- +context.name=etendo_conn +``` + +**Notes:** + +- `bbdd.url` must NOT include the database name — that comes from `bbdd.sid`. The Makefile constructs the full JDBC URL as `${bbdd.url}/${bbdd.sid}`. +- `context.name` is used as the Docker Compose project prefix. Containers will be named `etendo_conn-obconn-*`. Changing this value after the first run requires stopping and removing the old containers manually. +- The `githubToken` value is sensitive. Do not commit `gradle.properties` to version control (it is listed in `.gitignore`). + +--- + +## 4. Generate Configuration + +```bash +make config +``` + +This command does two things: + +1. Copies every `rxconfig/*.yaml.template` file to `rxconfig/*.yaml` (skipping files that already exist, so it is safe to re-run). +2. Injects the database connection parameters from `gradle.properties` into `rxconfig/das.yaml`, updating the `url`, `username`, and `password` fields under the datasource section. + +The generated YAML files in `rxconfig/` are the runtime configuration consumed by each service when running with the `local` Spring profile (i.e., without a Config Server). + +Run `make config` again any time you change database credentials in `gradle.properties`. + +--- + +## 5. Start Infrastructure + +The OBConnector requires a Kafka-compatible message broker. Two options are available: + +### Option A: Redpanda (default, recommended) + +```bash +make infra +``` + +Redpanda is a Kafka-compatible broker implemented in C++. It is significantly lighter than the full Confluent Kafka stack and starts in a few seconds. This is the default choice for local development. + +After startup the following endpoints are available: + +| Service | URL | +|---------|-----| +| Redpanda Broker (Kafka protocol) | `localhost:29092` | +| Redpanda Console (web UI) | http://localhost:9093 | +| Kafka Connect API | http://localhost:8083 | +| Jaeger UI (distributed tracing) | http://localhost:16686 | + +### Option B: Full Confluent Kafka Stack + +```bash +make infra-kafka +``` + +This starts the full Confluent platform, which includes a ZooKeeper-dependent Kafka broker, Kafka UI, Kafka Connect UI, and a dedicated PostgreSQL instance for Debezium. Use this option if you need to test the real Debezium CDC connector behavior. + +Additional endpoints (beyond what Redpanda provides): + +| Service | URL | +|---------|-----| +| Kafka Connect UI | http://localhost:8002 | +| PostgreSQL for Debezium | `localhost:5465` | + +### Verify Infrastructure + +```bash +make infra-ps +``` + +Shows the status of all running infrastructure containers. All containers should show `Up` status before proceeding. + +To tail infrastructure logs: + +```bash +make infra-logs +``` + +> **Timing note:** Redpanda typically takes 3-5 seconds to be ready. The full Kafka stack can take 30-60 seconds. If services fail to connect to the broker on first startup, wait and retry. + +--- + +## 6. Build + +```bash +make build +``` + +This compiles the four OBConnector modules in order: + +1. `com.etendorx.integration.obconn.common` — shared domain model and utilities +2. `com.etendorx.integration.obconn.lib` — core business logic (mapping, transformation) +3. `com.etendorx.integration.obconn.server` — HTTP inbound API (port 8101) +4. `com.etendorx.integration.obconn.worker` — Kafka consumer/producer (port 8102) + +Individual modules can be built separately if needed: + +```bash +make build-lib # lib module only +make build-server # server module only +make build-worker # worker module only +``` + +**If the worker build fails on entity generation**, you can skip its compilation to unblock other modules: + +```bash +./gradlew :com.etendorx.integration.obconn.server:build \ + -x :com.etendorx.integration.to_openbravo.worker:compileJava +``` + +The first build will be slow (5-15 minutes) because it downloads all dependencies. Subsequent builds use the Gradle cache and are significantly faster. + +--- + +## 7. Run + +### Fastest: Local Mode (no Config Server) + +```bash +make up-local +``` + +This is the recommended mode for day-to-day development. It skips the Config Server and Auth/Edge gateway services, using local YAML files from `rxconfig/` directly via `-Dspring.profiles.active=local`. + +What `make up-local` does, in order: + +1. Runs `check-java` and `check-db` preflight checks. +2. Starts infrastructure (Redpanda) via `make infra`. +3. Runs `make config` to ensure YAML files are up to date. +4. Checks if DAS is already running on `:8092`; if not, generates entities from the database schema, compiles DAS, and starts it. +5. Starts OBConnector Server (`:8101`), OBConnector Worker (`:8102`), Async Process (`:8099`), and Mock Receiver (`:8090`) in background processes. +6. Waits for each service to respond on its `/actuator/health` endpoint (timeout: 120 seconds). +7. Prints a summary of all running endpoints. + +All processes run in the background. PIDs and logs are tracked under `.run/`: + +``` +.run/ + das.pid + das.log + obconn-server.pid + obconn-server.log + obconn-worker.pid + obconn-worker.log + async.pid + async.log + mock-receiver.pid + mock-receiver.log +``` + +### Full Mode (with Config Server) + +```bash +make up +``` + +This starts the complete EtendoRX stack including Config Server (`:8888`), Auth Service (`:8094`), DAS (`:8092`), and Edge Gateway (`:8096`), in addition to the OBConnector Server and Worker. Config Server must be fully ready before the other services are launched (the Makefile polls `/actuator/health` on `:8888`). + +Use this mode when you need to test JWT authentication flows or the edge gateway routing behavior. + +### Kafka Mode (full stack + Confluent Kafka) + +```bash +make up-kafka +``` + +Same as `make up` but uses the full Confluent Kafka infrastructure instead of Redpanda. Useful for testing production-equivalent Debezium CDC connector behavior. + +--- + +## 8. Verify + +### Service Status + +```bash +make status +``` + +Shows: +- Docker Compose container status (infrastructure). +- For each `.run/*.pid` file: whether the process is `RUNNING` or `STOPPED`. + +### Health Endpoints + +Each service exposes a Spring Boot Actuator health endpoint: + +| Service | Health URL | +|---------|-----------| +| Config Server | http://localhost:8888/actuator/health | +| Auth | http://localhost:8094/actuator/health | +| DAS | http://localhost:8092/actuator/health | +| Edge | http://localhost:8096/actuator/health | +| OBConn Server | http://localhost:8101/actuator/health | +| OBConn Worker | http://localhost:8102/actuator/health | +| Async Process | http://localhost:8099/actuator/health | + +A healthy response looks like: + +```json +{"status":"UP"} +``` + +### Dev Portal + +```bash +make portal +``` + +Opens a lightweight static HTML service browser at http://localhost:8199. Lists all services, their ports, and quick links. + +### Dashboard + +If `dashboard.enabled=true` is set in the worker configuration: + +``` +http://localhost:8102/dashboard +``` + +The dashboard shows real-time sync job status, retry queue depth, dead-letter topic contents, and throughput metrics. + +### Tail Logs + +```bash +make logs +``` + +Tails all `.run/*.log` files simultaneously. Use `Ctrl+C` to stop. + +--- + +## 9. First Sync Test + +Once all services are running, use the load test targets to verify end-to-end behavior. + +### Receive Workflow (external system → Etendo) + +```bash +make loadtest.receive +``` + +Sends a series of BusinessPartner JSON payloads via HTTP POST to the OBConnector Server at `http://localhost:8101/api/sync/`. The server publishes them to the `obconnector.receive` Kafka topic. The Worker picks them up, transforms them, and calls DAS to persist them in the Etendo database. + +Expected output: the command exits after sending 5 messages per thread (default: 1 thread). Check the Worker logs or dashboard for processing results. + +### Send Workflow (Etendo → external system) + +```bash +make loadtest.send +``` + +Simulates a Debezium CDC event on the `default.public.c_bpartner` Kafka topic, as if a record changed in the Etendo database. The Worker consumes the event, transforms it, and calls the Mock Receiver at `http://localhost:8090` to simulate delivery to an external system. + +Expected output: the Mock Receiver logs the received payload. Check the Worker logs for the full transformation and delivery trace. + +### Run Both + +```bash +make loadtest +``` + +Runs `loadtest.send` followed by `loadtest.receive` sequentially. + +--- + +## 10. Stopping + +```bash +make down +``` + +This kills all background service processes tracked in `.run/*.pid` and then stops all Docker infrastructure containers via `make infra-down`. It handles both Redpanda and Kafka compose files. + +To stop only the infrastructure containers without touching the services: + +```bash +make infra-down +``` + +--- + +## 11. Troubleshooting + +### JAVA_HOME points to wrong JVM + +**Symptom:** Build fails with errors like `cannot find symbol` on Lombok-generated methods, or `Fatal error compiling: invalid target release: 17`. + +**Fix:** Ensure `JAVA_HOME` points to a Java 17 installation, not GraalVM or any other version. + +```bash +make check-java +# Should print: OK — Java 17 (/path/to/corretto-17) + +export JAVA_HOME=$(/usr/libexec/java_home -v 17) +``` + +If you have multiple JDKs installed, use `/usr/libexec/java_home -V` to list them all and identify the correct path. + +### Database connection fails + +**Symptom:** `make check-db` prints `FAIL — Cannot connect to PostgreSQL`, or DAS crashes at startup with a `Connection refused` or authentication error. + +**Fix:** + +1. Verify `gradle.properties` has the correct `bbdd.*` values. +2. Verify the database is running: `pg_isready -h localhost -p 5432`. +3. Verify the user and password: `psql -h localhost -U tad -d etendo`. +4. Run `make config` after any credential change to re-inject them into `rxconfig/das.yaml`. + +### Kafka broker not ready + +**Symptom:** OBConnector Worker fails to start with `org.apache.kafka.common.errors.TimeoutException: Topic not available`, or services print repeated `WARN` messages about broker connection refused. + +**Fix:** + +1. Wait 10-15 seconds after `make infra` before starting services. Redpanda needs a moment to initialize its internal topics. +2. Check container status: `make infra-ps`. +3. Tail container logs: `make infra-logs`. +4. If using Confluent Kafka (`make infra-kafka`), wait up to 60 seconds for all components to be ready. + +### Port already in use + +**Symptom:** A service fails to bind its port with `Address already in use`. + +**Fix:** + +1. Check what is running: `make status`. +2. If a stale process is listed as `STOPPED` but the port is occupied, find and kill it: + ```bash + lsof -ti:8101 | xargs kill -9 # example for port 8101 + ``` +3. Re-run `make up-local` — it checks each port before starting and skips already-running services. + +### Build fails on to_openbravo.worker + +**Symptom:** The build exits with a compilation error inside `com.etendorx.integration.to_openbravo.worker`. + +**Fix:** Exclude that subproject from compilation while working on the OBConnector: + +```bash +./gradlew :com.etendorx.integration.obconn.common:build \ + :com.etendorx.integration.obconn.lib:build \ + :com.etendorx.integration.obconn.server:build \ + :com.etendorx.integration.obconn.worker:build \ + -x :com.etendorx.integration.to_openbravo.worker:compileJava +``` + +### Topic messages are stale from a previous run + +**Symptom:** The Worker processes old or duplicate events from a previous test session. + +**Fix:** Purge all OBConnector Kafka topics and let them be auto-recreated: + +```bash +make purge +``` + +This deletes the following topics: `obconnector.send`, `obconnector.receive`, their DLT variants, all retry topics (`-retry-10000`, `-retry-20000`, `-retry-40000`, `-retry-60000`), and the Debezium source topic `default.public.c_bpartner`. Topics are automatically recreated when the producer or consumer reconnects. + +--- + +## Quick Reference + +| Command | Description | +|---------|-------------| +| `make up-local` | Fastest startup: Redpanda + services, no Config Server | +| `make up` | Full stack: Redpanda + Config Server + all services | +| `make up-kafka` | Full stack with Confluent Kafka instead of Redpanda | +| `make down` | Stop all services and infrastructure | +| `make status` | Show running services and containers | +| `make logs` | Tail all service logs | +| `make infra` | Start Redpanda only | +| `make infra-kafka` | Start Confluent Kafka stack only | +| `make infra-down` | Stop infrastructure containers | +| `make infra-ps` | Show infrastructure container status | +| `make infra-logs` | Tail infrastructure container logs | +| `make config` | Generate YAML config from templates | +| `make build` | Compile all modules | +| `make test` | Run all unit tests | +| `make check-java` | Verify Java 17 is available | +| `make check-db` | Test PostgreSQL connectivity | +| `make loadtest.receive` | Send test payloads via HTTP (Receive workflow) | +| `make loadtest.send` | Simulate Debezium CDC event (Send workflow) | +| `make purge` | Delete and reset all OBConnector Kafka topics | +| `make portal` | Open Dev Portal at http://localhost:8199 | diff --git a/docs/infrastructure.md b/docs/infrastructure.md new file mode 100644 index 00000000..2612aec4 --- /dev/null +++ b/docs/infrastructure.md @@ -0,0 +1,358 @@ +# OBConnector Infrastructure + +This document describes the local development infrastructure for the OBConnector module. Two compose stacks are provided, selectable via `make` targets. Both expose the same external port assignments so that application configuration does not change between modes. + +--- + +## Two Infrastructure Modes + +| Mode | Make Target | Compose File | Use Case | +|------|-------------|--------------|----------| +| Redpanda (default) | `make infra` | `docker-compose.redpanda.yml` | Development, CI, lightweight environments | +| Full Kafka (Confluent) | `make infra-kafka` | `docker-compose.yml` | Production parity, Confluent feature testing | + +The Redpanda stack is the recommended default for development. It eliminates the Zookeeper dependency, uses significantly less memory (512 MB for the broker), and starts faster. Both stacks provide Kafka-compatible endpoints on the same ports, so no application config changes are required when switching modes. + +--- + +## Redpanda Stack (Default) + +File: `modules/com.etendorx.integration.obconnector/infraestructure/docker-compose.redpanda.yml` + +### Services + +#### Redpanda Broker + +```yaml +image: docker.redpanda.com/redpandadata/redpanda:v24.1.1 +``` + +Redpanda is a Kafka-compatible streaming platform written in C++. It does not require Zookeeper — coordination is handled internally via Raft consensus. This single-node configuration is suitable for local development. + +| Parameter | Value | +|-----------|-------| +| Image | `redpandadata/redpanda:v24.1.1` | +| External port (Kafka) | `29092` | +| Internal port (Kafka) | `9092` (container-to-container) | +| Pandaproxy (HTTP) | `18082` (external), `8082` (internal) | +| Schema Registry | `18081` (external), `8081` (internal) | +| Memory limit | `512M` | +| CPU threads | `1` (`--smp 1`) | + +The broker advertises two listener addresses: +- `internal://redpanda:9092` — used by other containers (Kafka Connect, etc.) +- `external://localhost:29092` — used by host-machine clients (application code, `rpk` CLI) + +A health check using `rpk cluster health --exit-when-healthy` gates dependent services; Kafka Connect and the console will not start until the broker is healthy. + +#### Redpanda Console + +```yaml +image: docker.redpanda.com/redpandadata/console:v2.6.0 +``` + +A web UI for browsing topics, inspecting messages, monitoring consumer group lag, and viewing partition assignments. + +| Parameter | Value | +|-----------|-------| +| Image | `redpandadata/console:v2.6.0` | +| Host port | `9093` (mapped to container port `8080`) | +| Config | Mounted from `./redpanda-console-config.yml` | + +The console depends on the broker being healthy before starting. Configuration is provided via a mounted config file at `/tmp/config.yml` inside the container. + +Access: `http://localhost:9093` + +#### Kafka Connect (Debezium) + +```yaml +image: quay.io/debezium/connect:2.3.0.Final +``` + +Kafka Connect running the Debezium connector plugins. Debezium implements Change Data Capture (CDC) — it monitors the PostgreSQL Write-Ahead Log (WAL) and publishes row-level change events (INSERT, UPDATE, DELETE) to Kafka topics. + +| Parameter | Value | +|-----------|-------| +| Image | `quay.io/debezium/connect:2.3.0.Final` | +| Host port | `8083` | +| Bootstrap servers | `redpanda:9092` (internal listener) | +| Connector group ID | `1` | +| Config storage topic | `my_connect_configs` | +| Offset storage topic | `my_connect_offsets` | +| Status storage topic | `my_connect_statuses` | + +Connectors are configured at runtime via the REST API at `http://localhost:8083`. The `CONNECT_REST_ADVERTISED_HOST_NAME` is set to `kafka-connect` to allow inter-container communication. + +#### Jaeger + +```yaml +image: jaegertracing/all-in-one:latest +``` + +Jaeger is an end-to-end distributed tracing system. The OBConnector propagates `runId`, `workflow`, `entity`, and `entityId` through SLF4J MDC and OpenTelemetry trace headers, enabling full request tracing across the server and worker. + +| Parameter | Value | +|-----------|-------| +| Image | `jaegertracing/all-in-one:latest` | +| UI port | `16686` | +| gRPC (OTLP) | `4317` | +| Model port | `14250` | +| OTLP enabled | `true` | + +Access: `http://localhost:16686` + +--- + +## Full Kafka Stack (Confluent) + +File: `modules/com.etendorx.integration.obconnector/infraestructure/docker-compose.yml` + +This stack uses the official Confluent images and includes a dedicated PostgreSQL instance pre-configured for Debezium (logical replication enabled). It adds Zookeeper as the Kafka coordination layer and includes a Kafka Connect UI for visual connector management. + +### Services + +#### Zookeeper + +```yaml +image: confluentinc/cp-zookeeper:latest +``` + +Apache ZooKeeper provides distributed coordination for Kafka brokers. Required by the Confluent Kafka image. + +| Parameter | Value | +|-----------|-------| +| Host port | `22181` (mapped to container port `2181`) | +| Client port | `2181` | +| Tick time | `2000ms` | + +#### Kafka Broker + +```yaml +image: confluentinc/cp-kafka:latest +``` + +Apache Kafka broker from Confluent. Depends on Zookeeper for metadata and leader election. + +| Parameter | Value | +|-----------|-------| +| Host port (external) | `29092` | +| Host port (internal) | `9092` | +| Broker ID | `1` | +| Zookeeper connection | `zookeeper:2181` | +| Offsets replication factor | `1` (single-node, no replication) | + +Listeners: +- `PLAINTEXT://kafka:9092` — inter-container communication +- `PLAINTEXT_HOST://localhost:29092` — host-machine client access + +#### Kafka UI + +```yaml +image: provectuslabs/kafka-ui:latest +``` + +Web UI for managing Kafka topics, consumer groups, and messages. Equivalent to Redpanda Console in the Redpanda stack. + +| Parameter | Value | +|-----------|-------| +| Host port | `9093` (mapped to container port `8080`) | +| Cluster name | `local` | +| Bootstrap servers | `kafka:9092` | +| Metrics port | `9997` | + +Access: `http://localhost:9093` + +#### Debezium PostgreSQL + +```yaml +image: quay.io/debezium/example-postgres:2.3.0.Final +``` + +A PostgreSQL instance pre-configured with logical replication enabled (`wal_level=logical`), which is required for Debezium CDC. This is an isolated database for integration testing and is separate from the main Etendo database. + +| Parameter | Value | +|-----------|-------| +| Host port | `5465` (mapped to container port `5432`) | +| User | `postgres` | +| Password | `syspass` | + +#### Kafka Connect (Debezium) + +```yaml +image: quay.io/debezium/connect:2.3.0.Final +``` + +Same Debezium Connect image as in the Redpanda stack, but connected to the Confluent Kafka broker instead. + +| Parameter | Value | +|-----------|-------| +| Host port | `8083` | +| Bootstrap servers | `kafka:9092` | +| Group ID | `1` | + +#### Kafka Connect UI + +```yaml +image: landoop/kafka-connect-ui +``` + +A visual interface for managing Kafka Connect connectors. Allows creating, updating, and monitoring connector configurations without using the REST API directly. + +| Parameter | Value | +|-----------|-------| +| Host port | `8002` (mapped to container port `8000`) | +| Connect URL | `http://kafka-connect:8083/` | + +Access: `http://localhost:8002` + +#### Jaeger + +Same image and configuration as in the Redpanda stack. See the Redpanda section above. + +--- + +## Kafka Topics + +The following topics are used by the OBConnector module. They are created automatically on first use or via the `make purge` target. + +| Topic | Direction | Description | +|-------|-----------|-------------| +| `obconnector.send` | Etendo → External | CDC events captured from Etendo's PostgreSQL WAL. `DbzListener` consumes these events and triggers the Send workflow (`MAP → PRE_LOGIC → SYNC → POST_LOGIC → PROCESS_DATA → POST_ACTION`). | +| `obconnector.receive` | External → Etendo | Messages pushed by an external system into Etendo. The Receive workflow processes these into Etendo entities via the DAS. | +| `obconnector.send.DLT` | Dead Letter | Failed messages from the Send workflow. Stored by `@DltHandler` in `DltReplayService`'s in-memory queue. Replayed via `replayAll()`. | +| `obconnector.receive.DLT` | Dead Letter | Failed messages from the Receive workflow. Same DLT handling as the Send DLT. | +| `async-process-execution` | Internal | Workflow step status updates published by the AsyncProcess service. Tracks step progression and completion state for idempotent retry. | + +### Dead Letter Topic (DLT) Behavior + +When a message fails all retry attempts, Spring Cloud Stream routes it to the corresponding `.DLT` topic. The `DltReplayService` stores the failed message in a `ConcurrentLinkedQueue` (max size configurable via `dlt.max.queue.size`, default 1000). Failed messages can be replayed to their original topics via `replayAll()`, which re-publishes each queued message. + +--- + +## Debezium CDC (Change Data Capture) + +Debezium monitors the PostgreSQL Write-Ahead Log (WAL) in real time. When a row is inserted, updated, or deleted in a monitored table, Debezium reads the change from the WAL and publishes a structured event to the corresponding Kafka topic. + +### How It Works + +1. PostgreSQL must have `wal_level=logical` configured (the Debezium example image has this pre-configured; the main Etendo database requires manual configuration). +2. A Debezium PostgreSQL connector is registered via the REST API at `http://localhost:8083/connectors`. +3. The connector reads the WAL via the PostgreSQL logical replication protocol. +4. Change events are serialized in Debezium JSON format and published to Kafka topics (by default, one topic per table: `..
`). +5. The OBConnector's `DbzListener` subscribes to the `obconnector.send` topic and processes each event through the Send workflow. + +### Message Deduplication + +Before forwarding a CDC event to the workflow, `DbzListener` checks `MessageDeduplicationService`. The dedup key is a composite of `entity|id|verb|SHA-256(data)`. Duplicate messages within the TTL window (default 300 seconds, configurable via `dedup.ttl.seconds`) are silently dropped to prevent double-processing caused by Kafka at-least-once delivery. + +### Connector Configuration via REST + +Register a connector: + +```bash +curl -X POST http://localhost:8083/connectors \ + -H "Content-Type: application/json" \ + -d '{ + "name": "obconnector-source", + "config": { + "connector.class": "io.debezium.connector.postgresql.PostgresConnector", + "database.hostname": "host.docker.internal", + "database.port": "5432", + "database.user": "etendo", + "database.password": "etendo", + "database.dbname": "etendo", + "topic.prefix": "obconnector", + "table.include.list": "public.c_order,public.m_product" + } + }' +``` + +List registered connectors: + +```bash +curl http://localhost:8083/connectors +``` + +Check connector status: + +```bash +curl http://localhost:8083/connectors/obconnector-source/status +``` + +--- + +## Topic Management + +### Purge (Clean State) + +```bash +make purge +``` + +Deletes and recreates all OBConnector topics. Use this to reset the message state during development without restarting the entire stack. + +For the Redpanda stack, this uses `rpk topic delete` followed by `rpk topic create`. For the Kafka stack, this uses `kafka-topics.sh --delete` followed by `kafka-topics.sh --create`. + +Topics recreated by `make purge`: +- `obconnector.send` +- `obconnector.receive` +- `obconnector.send.DLT` +- `obconnector.receive.DLT` +- `async-process-execution` + +--- + +## Complete Ports Reference + +### Infrastructure Services + +| Service | Port | Stack | Purpose | +|---------|------|-------|---------| +| Kafka / Redpanda Broker | `29092` | Both | External Kafka-compatible message broker endpoint | +| Kafka / Redpanda Broker | `9092` | Both | Internal broker endpoint (container-to-container) | +| Redpanda Console | `9093` | Redpanda | Topic browser, consumer groups, message viewer | +| Kafka UI | `9093` | Kafka | Topic browser, consumer groups, message viewer | +| Redpanda Pandaproxy | `18082` | Redpanda | HTTP/REST Kafka proxy | +| Redpanda Schema Registry | `18081` | Redpanda | Avro schema registry | +| Kafka Connect | `8083` | Both | CDC connector management REST API | +| Kafka Connect UI | `8002` | Kafka only | Visual connector management | +| Zookeeper | `22181` | Kafka only | Kafka broker coordination | +| Debezium PostgreSQL | `5465` | Kafka only | Example PostgreSQL with logical replication | +| Jaeger UI | `16686` | Both | Distributed tracing UI | +| Jaeger OTLP gRPC | `4317` | Both | OpenTelemetry trace ingestion | +| Jaeger model | `14250` | Both | Jaeger internal model port | + +### Application Services + +| Service | Port | Purpose | +|---------|------|---------| +| Config Server | `8888` | Central configuration server (Spring Cloud Config) | +| Auth | `8094` | Authentication service, issues JWT tokens | +| DAS | `8092` | Data Access Service — Etendo entity read/write | +| Edge | `8096` | API Gateway — routes external requests to services | +| AsyncProcess | `8099` | Async task processing, publishes to `async-process-execution` | +| OBConnector Server | `8101` | Sync REST API (`PUT /api/sync/{model}/{id}`, `POST /api/sync/{model}`) | +| OBConnector Worker | `8102` | Sync workflow execution (Receive and Send runners) | +| Dev Portal | `8199` | Development portal | +| Mock Receiver | `8090` | Test external system, simulates the remote endpoint | + +--- + +## Startup Order + +### Redpanda Stack + +1. **Redpanda** starts and passes the health check (`rpk cluster health`). +2. **Redpanda Console** starts after Redpanda is healthy. +3. **Kafka Connect** starts after Redpanda is healthy. +4. **Jaeger** starts independently (no dependencies). + +### Full Kafka Stack + +1. **Zookeeper** starts. +2. **Kafka** depends on Zookeeper. +3. **Kafka UI**, **Kafka Connect**, and **PostgreSQL** depend on Kafka (implicit via links/environment). +4. **Kafka Connect UI** depends on Kafka Connect. +5. **Jaeger** starts independently. + +Application services (Config Server, Auth, DAS, Edge, etc.) must be started separately after the infrastructure stack is running. diff --git a/docs/makefile-reference.md b/docs/makefile-reference.md new file mode 100644 index 00000000..59a86acb --- /dev/null +++ b/docs/makefile-reference.md @@ -0,0 +1,975 @@ +# Makefile Reference — EtendoRX OBConnector + +## 1. Overview + +The Makefile at the root of `etendo_rx/` orchestrates the full development lifecycle of the OBConnector module: preflight validation, infrastructure provisioning, configuration generation, compilation, and orchestrated startup/shutdown of all microservices. + +**Shell:** GNU Make with `SHELL := /bin/bash`. All recipes run in bash, enabling features like `&&`, process substitution, ANSI color codes (`\033[…m`), and `printf`. + +**Default goal:** `help` — running `make` with no arguments prints the target list. + +**Key design decisions:** + +- Services are started as background Gradle `bootRun` processes. PIDs are stored in `.run/*.pid` and logs in `.run/*.log`, enabling `make status`, `make logs`, and `make down` to manage them without an external process manager. +- The `wait_for` function polls `/actuator/health` via `curl` up to `MAX_WAIT` seconds before proceeding to the next service group, ensuring ordered startup. +- `up-local` is the fastest path: it skips Config Server, Auth, and Edge entirely and also skips already-running services (idempotent restarts). +- DB credentials are read from `gradle.properties` at Makefile parse time (shell expansion), so no manual secret management is required in shell sessions. + +--- + +## 2. Variables Reference + +| Variable | Default / Source | Description | +|---|---|---| +| `ROOT` | `$(shell pwd)` | Absolute path to the repo root. Used as the base for all other paths. | +| `JAVA_HOME` | `/usr/libexec/java_home -v 17` (macOS), fallback to `~/Library/Java/JavaVirtualMachines/corretto-17.0.18/Contents/Home` | Path to a Java 17 JDK. Overridable via environment: `JAVA_HOME=/path make up`. | +| `GRADLE` | `JAVA_HOME=$(JAVA_HOME) ./gradlew` | The Gradle wrapper invocation, with `JAVA_HOME` injected so all Gradle subprocesses use the correct JDK. | +| `INFRA` | `$(ROOT)/modules/com.etendorx.integration.obconnector/infraestructure` | Directory containing the docker-compose files for infrastructure services (Redpanda/Kafka, Kafka Connect, Jaeger, etc.). | +| `RXCONFIG` | `$(ROOT)/rxconfig` | Directory where service YAML configuration files live. Templates (`*.yaml.template`) are copied here and then patched with DB credentials by `make config`. | +| `PROPS` | `$(ROOT)/gradle.properties` | Source of truth for DB connection parameters. Read at parse time via `grep`/`cut`/`sed`. | +| `DB_URL` | Parsed from `bbdd.url` in `gradle.properties` | JDBC URL, with escaped colons (`\:`) unescaped to `:`. Example: `jdbc:postgresql://localhost:5432`. | +| `DB_SID` | Parsed from `bbdd.sid` in `gradle.properties` | Database name/SID. Example: `etendo`. | +| `DB_USER` | Parsed from `bbdd.user` in `gradle.properties` | PostgreSQL username. | +| `DB_PASS` | Parsed from `bbdd.password` in `gradle.properties` | PostgreSQL password. | +| `CTX_NAME` | Parsed from `context.name` in `gradle.properties`; defaults to `etendo` if absent | Docker Compose project name prefix. The actual project name is `$(CTX_NAME)-obconn`, which namespaces all containers. | +| `COMPOSE` | `docker-compose -p $(CTX_NAME)-obconn` | Docker Compose invocation with project name set, ensuring containers are grouped under one project. | +| `BOOTRUN_ARGS` | `-Dspring.profiles.active=local` | JVM system property passed to every `bootRun` invocation. Activates the `local` Spring profile, which reads YAML files from `rxconfig/`. | +| `PIDS_DIR` | `$(ROOT)/.run` | Directory where `.pid` and `.log` files are stored for background services. Created on demand by `make up` / `make up-local`. | +| `MAX_WAIT` | `120` (seconds) | Maximum time `wait_for` polls a service health endpoint before aborting with a timeout error. | +| `REDPANDA_CTR` | Detected at parse time via `docker ps` matching `redpanda-1$` | Name of the running Redpanda container. Used by `make purge` to exec `rpk` inside the container. Falls back to `"redpanda"` if not found. | +| `PURGE_TOPICS` | Hardcoded list (see Utilities section) | All OBConnector Kafka topic names that `make purge` deletes and allows to auto-recreate. | +| `CYAN`, `GREEN`, `YELLOW`, `DIM`, `RESET` | ANSI escape sequences | Terminal color codes used in `echo -e` calls for readable output. Not configurable. | + +--- + +## 3. Target Categories + +### 3.1 Preflight Checks + +These targets validate the environment before attempting builds or service startups. They are automatically invoked as dependencies by `make up`, `make up-local`, and `make up-kafka`. + +--- + +#### `check-db` + +**Description:** Tests that the PostgreSQL database defined in `gradle.properties` is reachable and accepting connections. + +**Usage:** +```bash +make check-db +``` + +**What it does under the hood:** + +1. Reads `DB_URL`, `DB_SID`, `DB_USER`, `DB_PASS` from `gradle.properties` (already parsed at Makefile load time). +2. Extracts host and port from the JDBC URL by stripping the `jdbc:postgresql://` prefix with `sed`, then splitting on `:` with `cut`. +3. Runs `psql -h -p -U -d -c "SELECT 1"` with `PGPASSWORD` set in the environment (avoids interactive password prompt). +4. Prints `OK` in green on success, `FAIL` in yellow on failure and exits with code 1. + +**Dependencies:** None. + +**Failure hint:** If this fails, verify that: +- `gradle.properties` has correct `bbdd.*` entries. +- Etendo Classic's PostgreSQL is running and accessible from localhost. +- The user has login privileges on the target database. + +--- + +#### `check-java` + +**Description:** Verifies that Java 17 is available at the resolved `JAVA_HOME`. + +**Usage:** +```bash +make check-java +``` + +**What it does under the hood:** + +Runs `$(JAVA_HOME)/bin/java -version 2>&1 | grep -q "17\."`. If the pattern matches, prints `OK` with the resolved `JAVA_HOME`. Otherwise prints `FAIL` and exits with code 1. + +**Dependencies:** None. + +**Failure hint:** If `JAVA_HOME` auto-detection fails (non-macOS system or missing `java_home` utility), set it explicitly: +```bash +JAVA_HOME=/path/to/jdk17 make up-local +``` + +--- + +### 3.2 Infrastructure + +Infrastructure targets manage the Docker Compose stack that provides the message broker (Redpanda or Kafka), Kafka Connect (Debezium), and Jaeger tracing. All Docker operations use the project name `$(CTX_NAME)-obconn` to avoid conflicts with other Compose stacks. + +--- + +#### `infra` + +**Description:** Starts the lightweight Redpanda-based infrastructure stack. Redpanda is the default and recommended option for local development due to its lower resource consumption compared to a full Kafka deployment. + +**Usage:** +```bash +make infra +``` + +**What it does under the hood:** + +```bash +cd $(INFRA) && docker-compose -p $(CTX_NAME)-obconn -f docker-compose.redpanda.yml up -d +``` + +Starts containers defined in `docker-compose.redpanda.yml` in detached mode. After the command returns, it prints the endpoint summary: + +| Service | Address | +|---|---| +| Redpanda Broker | `localhost:29092` | +| Redpanda Console UI | `http://localhost:9093` | +| Kafka Connect API | `http://localhost:8083` | +| Jaeger UI | `http://localhost:16686` | + +**Dependencies:** None (but implicitly requires Docker daemon to be running). + +--- + +#### `infra-kafka` + +**Description:** Starts the heavier Kafka-based infrastructure stack. Use this when you need full Kafka semantics or are debugging Kafka-specific behavior. Includes an additional PostgreSQL instance used by Debezium. + +**Usage:** +```bash +make infra-kafka +``` + +**What it does under the hood:** + +```bash +cd $(INFRA) && docker-compose -p $(CTX_NAME)-obconn up -d +``` + +Uses the default `docker-compose.yml` (Kafka, not Redpanda). After startup prints: + +| Service | Address | +|---|---| +| Kafka Broker | `localhost:29092` | +| Kafka UI | `http://localhost:9093` | +| Kafka Connect API | `http://localhost:8083` | +| Kafka Connect UI | `http://localhost:8002` | +| Jaeger UI | `http://localhost:16686` | +| PostgreSQL (Debezium) | `localhost:5465` | + +**Dependencies:** None. + +--- + +#### `infra-down` + +**Description:** Stops and removes all infrastructure containers for both Compose files. + +**Usage:** +```bash +make infra-down +``` + +**What it does under the hood:** + +Runs `docker-compose down` for both `docker-compose.yml` and `docker-compose.redpanda.yml`, suppressing errors from whichever is not running. Both commands are attempted regardless of failures (`; true` at the end). + +**Dependencies:** None. Also called as a dependency by `make down`. + +--- + +#### `infra-logs` + +**Description:** Tails the Docker Compose logs for whichever infrastructure stack is running. + +**Usage:** +```bash +make infra-logs +``` + +**What it does under the hood:** + +```bash +cd $(INFRA) && docker-compose -p $(CTX_NAME)-obconn logs -f --tail=50 +``` + +Follows log output from all containers in the Compose project, starting from the last 50 lines. Uses the default `docker-compose.yml`; if using Redpanda, this may show no containers. Use `make infra-ps` first to confirm which stack is active. + +**Dependencies:** None. + +--- + +#### `infra-ps` + +**Description:** Shows the running state of all infrastructure containers across both Compose files. + +**Usage:** +```bash +make infra-ps +``` + +**What it does under the hood:** + +Runs `docker-compose ps` for both `docker-compose.yml` and `docker-compose.redpanda.yml` sequentially, ignoring errors (`; true`). Useful for quickly verifying which containers are up without switching to the Docker CLI. + +**Dependencies:** None. + +--- + +### 3.3 Configuration + +#### `config` + +**Description:** Generates YAML configuration files for all EtendoRX services from their `.yaml.template` counterparts in `rxconfig/`, then injects database credentials into `das.yaml` directly from `gradle.properties`. + +**Usage:** +```bash +make config +``` + +**What it does under the hood:** + +1. Iterates over all `$(RXCONFIG)/*.yaml.template` files. +2. For each template, copies it to the same path without the `.template` suffix **only if the target does not already exist** (skips existing files to preserve local edits). +3. After the copy loop, patches `das.yaml` in-place using `sed -i.bak` with three substitutions: + - Replaces the `url:` line containing a JDBC URL with `url: $(DB_URL)/$(DB_SID)`. + - Replaces the `username:` line with `username: $(DB_USER)`. + - Replaces the `password:` line with `password: $(DB_PASS)`. +4. Removes the `.bak` backup file created by `sed -i`. + +**Important:** `config` is idempotent for file creation (skips existing files) but always re-patches `das.yaml` with the current `gradle.properties` values. If you have manually customized `das.yaml` beyond the DB credentials, re-running `make config` will overwrite those specific lines. + +**Dependencies:** None. Called automatically by `make up`, `make up-local`, and `make up-kafka`. + +--- + +### 3.4 Build + +Build targets invoke Gradle to compile and package the OBConnector modules. They use the `build` Gradle task (which includes `compileJava`, resources processing, and JAR assembly, but respects `-x test` if needed). The modules are: + +| Gradle Project ID | Role | +|---|---| +| `com.etendorx.integration.obconn.common` | Shared domain model and utilities | +| `com.etendorx.integration.obconn.lib` | Core business logic, tested independently | +| `com.etendorx.integration.obconn.server` | HTTP API service (port 8101) | +| `com.etendorx.integration.obconn.worker` | Kafka consumer/processor service (port 8102) | + +--- + +#### `build` + +**Description:** Builds all four OBConnector modules in a single Gradle invocation. + +**Usage:** +```bash +make build +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew \ + :com.etendorx.integration.obconn.common:build \ + :com.etendorx.integration.obconn.lib:build \ + :com.etendorx.integration.obconn.server:build \ + :com.etendorx.integration.obconn.worker:build +``` + +Gradle executes the tasks in dependency order. Since `server` and `worker` depend on `common` and `lib`, Gradle ensures correct compilation order. + +**Dependencies:** None (Makefile-level). Gradle resolves inter-module dependencies internally. + +--- + +#### `build-lib` + +**Description:** Builds only the `lib` module. + +**Usage:** +```bash +make build-lib +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.lib:build +``` + +**Dependencies:** None. + +--- + +#### `build-server` + +**Description:** Builds only the `server` module. + +**Usage:** +```bash +make build-server +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.server:build +``` + +**Dependencies:** None (Makefile-level). + +--- + +#### `build-worker` + +**Description:** Builds only the `worker` module. + +**Usage:** +```bash +make build-worker +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.worker:build +``` + +**Dependencies:** None (Makefile-level). + +--- + +#### `test` + +**Description:** Runs unit tests for `lib`, `server`, and `worker` modules. + +**Usage:** +```bash +make test +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew \ + :com.etendorx.integration.obconn.lib:test \ + :com.etendorx.integration.obconn.server:test \ + :com.etendorx.integration.obconn.worker:test +``` + +Test reports are generated by Gradle in each module's `build/reports/tests/` directory. + +**Dependencies:** None. + +--- + +#### `test-lib` + +**Description:** Runs unit tests for the `lib` module only. + +**Usage:** +```bash +make test-lib +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.lib:test +``` + +**Dependencies:** None. + +--- + +### 3.5 Individual Services + +These targets start a single service in the **foreground** of the current terminal. They are intended for development and debugging of individual services. For running the full stack, use `make up` or `make up-local` instead. + +All individual service targets use `$(BOOTRUN_ARGS)` (`-Dspring.profiles.active=local`), which instructs Spring Boot to load configuration from `rxconfig/` via the `local` profile. The exception is `run-config`, which does not use `BOOTRUN_ARGS` because the Config Server reads its own bootstrap configuration directly. + +--- + +#### `run-config` + +**Description:** Starts the Spring Cloud Config Server on port **8888**. This service must be started before any other EtendoRX service when operating in full (non-local) mode, because other services fetch their configuration from it on startup. + +**Usage:** +```bash +make run-config +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.configserver:bootRun +``` + +Note: no `$(BOOTRUN_ARGS)` — the Config Server uses its own bootstrap configuration. + +**Dependencies:** None. + +--- + +#### `run-auth` + +**Description:** Starts the Authentication Service on port **8094**. Handles JWT issuance and validation for the EtendoRX API gateway. + +**Usage:** +```bash +make run-auth +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.auth:bootRun -Dspring.profiles.active=local +``` + +**Dependencies:** None (Makefile-level). In practice, requires Config Server if not using `local` profile. + +--- + +#### `run-das` + +**Description:** Starts the Data Access Service (DAS) on port **8092**. DAS is the EtendoRX persistence layer that maps Etendo Classic entities to REST endpoints via generated JPA repositories. + +**Usage:** +```bash +make run-das +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.das:bootRun -Dspring.profiles.active=local +``` + +**Dependencies:** None (Makefile-level). Requires a running PostgreSQL with the Etendo Classic schema. + +--- + +#### `run-edge` + +**Description:** Starts the Edge Gateway on port **8096**. Acts as the API gateway, routing and authenticating requests to backend services. + +**Usage:** +```bash +make run-edge +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.edge:bootRun -Dspring.profiles.active=local +``` + +**Dependencies:** None (Makefile-level). + +--- + +#### `run-server` + +**Description:** Starts the OBConnector Server on port **8101**. Exposes the HTTP API at `/api/sync/` that receives integration payloads and publishes them to the `obconnector.receive` Kafka topic. + +**Usage:** +```bash +make run-server +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.server:bootRun -Dspring.profiles.active=local +``` + +**Dependencies:** None (Makefile-level). Requires Kafka/Redpanda broker to be running. + +--- + +#### `run-worker` + +**Description:** Starts the OBConnector Worker on port **8102**. Consumes messages from `obconnector.receive` and `obconnector.send` Kafka topics, processes them (calls Etendo Classic or external system), and handles retry/DLT logic. + +**Usage:** +```bash +make run-worker +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.worker:bootRun -Dspring.profiles.active=local +``` + +An optional dashboard is available at `http://localhost:8102/dashboard` when `dashboard.enabled=true` is set in the worker configuration. + +**Dependencies:** None (Makefile-level). Requires Kafka/Redpanda and DAS to be running. + +--- + +#### `run-async` + +**Description:** Starts the Async Process service on port **8099**. Handles asynchronous background processing tasks within the EtendoRX platform. + +**Usage:** +```bash +make run-async +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.asyncprocess:bootRun -Dspring.profiles.active=local +``` + +**Dependencies:** None (Makefile-level). + +--- + +### 3.6 Orchestrated Startup + +These targets start the entire stack as background processes, managing PIDs and logs in `$(ROOT)/.run/`. They use the `wait_for` macro to enforce startup ordering. + +**The `wait_for` macro** polls `http://localhost:/actuator/health` every 2 seconds. If the service does not respond within `MAX_WAIT` (120) seconds, it prints `TIMEOUT`, shows the last 20 lines of the service's log file, and exits with code 1. This prevents dependent services from starting against an unhealthy dependency. + +--- + +#### `up` + +**Description:** Full orchestrated startup with Config Server. Runs `check-java`, `check-db`, `infra`, and `config` as prerequisites, then starts all services in the correct order with health-gate waits between groups. + +**Usage:** +```bash +make up +``` + +**What it does under the hood (in order):** + +1. Runs `check-java`, `check-db`, `infra`, `config` as Make prerequisites. +2. Creates `$(PIDS_DIR)` and clears any existing `.log` and `.pid` files. +3. Starts Config Server in background, writes PID to `configserver.pid`, logs to `configserver.log`. +4. Waits for Config Server on `:8888`. +5. Starts Auth, DAS, and Edge in background (in parallel — three `&` commands), writing individual PIDs and logs. +6. Waits for Auth on `:8094`, DAS on `:8092`, Edge on `:8096` sequentially. +7. Starts OBConnector Server and Worker in background. +8. Waits for OBConn Server on `:8101`, OBConn Worker on `:8102`. +9. Calls `_banner` (internal target) which prints the full endpoint summary. + +**Dependencies:** `check-java`, `check-db`, `infra`, `config`. + +--- + +#### `up-local` + +**Description:** Fastest orchestrated startup. Skips Config Server, Auth, and Edge. Checks if each service is already running (by hitting its `/actuator/health`) and skips it if so, making this target idempotent. Also starts the Mock Receiver automatically. + +**Usage:** +```bash +make up-local +``` + +**What it does under the hood (in order):** + +1. Runs `check-java`, `check-db`, `infra`, `config` as Make prerequisites. +2. Creates `$(PIDS_DIR)`. +3. **DAS:** If already healthy on `:8092`, prints "already running". Otherwise: + a. Runs `generate.entities -x test` to generate JPA entity sources from the Etendo Classic schema. + b. Runs `:com.etendorx.das:build -x test` to compile DAS with the generated entities. + c. Starts DAS in background. +4. Waits for DAS on `:8092`. +5. **OBConn Server:** If already healthy on `:8101`, skips. Otherwise starts in background. +6. **OBConn Worker:** If already healthy on `:8102`, skips. Otherwise starts in background. +7. **Async Process:** If already healthy on `:8099`, skips. Otherwise starts in background. +8. **Mock Receiver:** If already responding on `:8090`, skips. Otherwise starts `loadtest:bootRun --spring.profiles.active=mock` in background. +9. Waits for OBConn Server, OBConn Worker, and Async Process. +10. Waits 2 seconds then prints Mock Receiver status. +11. Calls `_banner` with local-mode labels (Config Server / Auth / Edge shown as "skipped"). + +The `generate.entities` step is only performed when DAS is not already running, preventing redundant entity regeneration on subsequent `make up-local` calls. + +**Dependencies:** `check-java`, `check-db`, `infra`, `config`. + +--- + +#### `up-kafka` + +**Description:** Full orchestrated startup using Kafka instead of Redpanda. Identical flow to `make up` except it depends on `infra-kafka` instead of `infra`. + +**Usage:** +```bash +make up-kafka +``` + +**What it does under the hood:** + +Identical to `make up` with the following differences: +- Depends on `infra-kafka` (starts `docker-compose.yml` instead of `docker-compose.redpanda.yml`). +- Resets `.pid` and `.log` files before starting. +- Starts Config Server, waits, then Auth/DAS/Edge, waits, then OBConn Server/Worker, waits, then prints banner. + +**Dependencies:** `check-java`, `check-db`, `infra-kafka`, `config`. + +--- + +#### `down` + +**Description:** Stops all background services started by `make up` / `make up-local` / `make up-kafka`, then stops the infrastructure containers. + +**Usage:** +```bash +make down +``` + +**What it does under the hood:** + +1. Iterates over all `$(PIDS_DIR)/*.pid` files. +2. For each PID file, reads the PID, checks if the process is alive with `kill -0`, and sends `SIGTERM` (`kill `) if so. Removes the `.pid` file. +3. Calls `make infra-down` to stop Docker Compose containers. +4. Prints "All stopped." + +**Note:** This sends `SIGTERM` to the Gradle daemon wrapper process. The JVM hosting the Spring Boot application may take several seconds to shut down gracefully. If processes do not stop, use `kill -9 ` manually or restart your terminal. + +**Dependencies:** `infra-down` (called as a Make dependency internally). + +--- + +#### `status` + +**Description:** Shows the current state of all infrastructure containers and all background services tracked by the Makefile. + +**Usage:** +```bash +make status +``` + +**What it does under the hood:** + +1. Runs `docker-compose ps` (tries both Compose files, ignores errors). +2. Iterates over all `$(PIDS_DIR)/*.pid` files and checks each PID with `kill -0`. Prints `RUNNING` (green) or `STOPPED` (yellow) with the service name and PID. + +**Dependencies:** None. + +--- + +#### `logs` + +**Description:** Tails all service log files in `$(PIDS_DIR)` simultaneously. + +**Usage:** +```bash +make logs +``` + +**What it does under the hood:** + +```bash +exec tail -f $(PIDS_DIR)/*.log +``` + +Uses `exec` to replace the Make subprocess with `tail`, so the process exits cleanly when interrupted with Ctrl+C. All log files from all services are interleaved in a single stream. + +**Dependencies:** None. Requires services to have been started via `make up` or `make up-local`. + +--- + +### 3.7 Utilities + +#### `portal` + +**Description:** Starts a Python HTTP server on port **8199** serving the `portal/` directory. The portal is a local developer UI for browsing services and documentation. + +**Usage:** +```bash +make portal +``` + +**What it does under the hood:** + +```bash +cd $(ROOT)/portal && exec python3 -m http.server 8199 +``` + +Uses `exec` so the Python process replaces the Make subprocess. Access at `http://localhost:8199`. + +**Dependencies:** None. Requires Python 3 in `PATH`. + +--- + +#### `loadtest` + +**Description:** Runs both the Send and Receive load tests sequentially. + +**Usage:** +```bash +make loadtest +``` + +**What it does under the hood:** + +Calls `loadtest.send` followed by `loadtest.receive` as Make dependencies (in that order). + +**Dependencies:** `loadtest.send`, `loadtest.receive`. + +--- + +#### `loadtest.send` + +**Description:** Runs the Send load test, which simulates Debezium CDC events being published to the `obconnector.send` Kafka topic (the "Etendo Classic to external system" direction). + +**Usage:** +```bash +make loadtest.send +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.loadtest:bootRun -Dspring.profiles.active=local +``` + +Starts the `loadtest` module in its default mode (send). The test produces synthetic change events that the Worker consumes. + +**Dependencies:** None (Makefile-level). Requires Worker and Kafka/Redpanda to be running. + +--- + +#### `loadtest.receive` + +**Description:** Runs the Receive load test, which sends HTTP POST payloads to the OBConnector Server (`/api/sync/`) to simulate an external system pushing data into Etendo Classic. + +**Usage:** +```bash +make loadtest.receive +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.loadtest:bootRun \ + --args='--loadtest.mode=receive --loadtest.enabled=false --loadtest.threads=1 \ + --loadtest.messages-per-thread=5 --loadtest.poll-status=true' +``` + +Key parameters: +- `loadtest.mode=receive` — activates the receive (HTTP ingest) test path. +- `loadtest.threads=1` — single-threaded execution. +- `loadtest.messages-per-thread=5` — sends 5 messages. +- `loadtest.poll-status=true` — polls the async status endpoint after each send to verify end-to-end processing. + +**Dependencies:** None (Makefile-level). Requires OBConnector Server and Worker to be running. + +--- + +#### `mock` + +**Description:** Starts a mock HTTP receiver on port **8090** that simulates an external system accepting payloads from the OBConnector Worker (Send workflow). Used for testing the outbound integration path without a real external endpoint. + +**Usage:** +```bash +make mock +``` + +**What it does under the hood:** + +```bash +JAVA_HOME=$(JAVA_HOME) ./gradlew :com.etendorx.integration.obconn.loadtest:bootRun \ + --args='--spring.profiles.active=mock' +``` + +The `loadtest` module, when started with `spring.profiles.active=mock`, activates a minimal Spring Boot HTTP server that logs all incoming requests and returns `200 OK`. This mock is also started automatically by `make up-local`. + +**Dependencies:** None. + +--- + +#### `purge` + +**Description:** Deletes all OBConnector-related Kafka topics from Redpanda, allowing them to auto-recreate when producers and consumers reconnect. Use this to reset the message queue state without restarting the broker. + +**Usage:** +```bash +make purge +``` + +**What it does under the hood:** + +1. Resolves the Redpanda container name at Makefile parse time via `docker ps | grep redpanda-1$`. Falls back to `"redpanda"`. +2. Iterates over the full list of topics defined in `PURGE_TOPICS`: + - `obconnector.send` + - `obconnector.send-dlt` + - `obconnector.send-retry-10000` + - `obconnector.send-retry-20000` + - `obconnector.send-retry-40000` + - `obconnector.send-retry-60000` + - `obconnector.receive` + - `obconnector.receive-dlt` + - `obconnector.receive-retry-10000` + - `obconnector.receive-retry-20000` + - `obconnector.receive-retry-40000` + - `obconnector.receive-retry-60000` + - `default.public.c_bpartner` (Debezium CDC source topic) +3. For each topic, runs `docker exec rpk topic describe ` to check existence. If found, runs `rpk topic delete `. Prints `Deleted`, `Error`, or `Skip (not found)` per topic. + +**Note:** Topics auto-recreate the next time a producer publishes or a consumer subscribes to them. The retry and DLT topics are created by Spring Kafka's retry configuration on first use. + +**Dependencies:** None. Requires Redpanda container to be running. + +--- + +#### `help` + +**Description:** Prints a formatted list of all documented targets (those with `## comment` annotations) and a quick-start reference. + +**Usage:** +```bash +make help +# or simply: +make +``` + +**What it does under the hood:** + +```bash +grep -E '^[a-zA-Z_-]+:.*?## .*$' $(MAKEFILE_LIST) | \ + awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $1, $2}' +``` + +Extracts all targets annotated with `## ` using a regex, then formats them as a two-column table with `awk`. Internal targets prefixed with `_` (such as `_banner`) are excluded by design since they do not match `^[a-zA-Z_-]+:`. + +**Dependencies:** None. This is the `.DEFAULT_GOAL`. + +--- + +## 4. Startup Order Diagram + +``` +make config + | + v +make infra (or infra-kafka) + | + v +make up ─────────────────────────────────────────────────────┐ + | | + ├── run-config :8888 ← wait_for (120s) │ (make up / make up-kafka only) + | │ + ├── run-auth :8094 ─┐ | + ├── run-das :8092 ─┤ (parallel) ← wait_for each │ + ├── run-edge :8096 ─┘ | + | | + ├── run-server :8101 ─┐ | + └── run-worker :8102 ─┘ (parallel) ← wait_for each | + │ +make up-local ────────────────────────────────────────────────┘ + | (skips Config Server, Auth, Edge) + | + ├── generate.entities (only if DAS not running) + ├── das:build (only if DAS not running) + ├── run-das :8092 ← wait_for + ├── run-server :8101 ← wait_for + ├── run-worker :8102 ← wait_for + ├── run-async :8099 ← wait_for + └── mock :8090 (loadtest module in mock profile) +``` + +**Port summary:** + +| Port | Service | Started by | +|---|---|---| +| 8888 | Config Server | `up`, `up-kafka` | +| 8094 | Auth Service | `up`, `up-kafka` | +| 8092 | DAS | all `up*` targets | +| 8096 | Edge Gateway | `up`, `up-kafka` | +| 8099 | Async Process | `up-local` | +| 8090 | Mock Receiver | `up-local`, `mock` | +| 8101 | OBConn Server | all `up*` targets | +| 8102 | OBConn Worker | all `up*` targets | +| 29092 | Redpanda/Kafka Broker | `infra`, `infra-kafka` | +| 9093 | Redpanda Console / Kafka UI | `infra`, `infra-kafka` | +| 8083 | Kafka Connect API | `infra`, `infra-kafka` | +| 8002 | Kafka Connect UI | `infra-kafka` only | +| 16686 | Jaeger UI | `infra`, `infra-kafka` | +| 5465 | PostgreSQL (Debezium) | `infra-kafka` only | +| 8199 | Dev Portal | `portal` | + +--- + +## 5. Common Workflows + +### Fresh start from scratch + +```bash +make config && make infra && make build && make up-local +``` + +Steps: +1. `make config` — generates `rxconfig/*.yaml` from templates and injects DB credentials. +2. `make infra` — starts Redpanda and Kafka Connect via Docker Compose. +3. `make build` — compiles all four OBConnector modules. +4. `make up-local` — starts DAS (with entity generation), OBConn Server, OBConn Worker, Async Process, and Mock Receiver. + +### Rebuild and restart a single service + +```bash +make build-worker && make run-worker +``` + +Rebuilds the worker JAR and starts it in the foreground. Use this during active development of the worker to get fast feedback. Run in a dedicated terminal. + +```bash +make build-server && make run-server +``` + +Same pattern for the server module. + +### Run the receive load test + +```bash +make loadtest.receive +``` + +Requires OBConnector Server (`:8101`) and Worker (`:8102`) to already be running. Sends 5 HTTP POST messages to the server and polls the async status endpoint for each. + +### Run the send load test with the mock receiver + +```bash +# Terminal 1 — start the mock receiver +make mock + +# Terminal 2 — run the send test +make loadtest.send +``` + +The mock receiver on `:8090` simulates the external system. The send test publishes CDC events that the Worker picks up and forwards to the mock. + +### Clean reset (purge topics and restart) + +```bash +make down && make purge && make infra && make up-local +``` + +Steps: +1. `make down` — stops all services and Docker Compose containers. +2. `make purge` — deletes all OBConnector Kafka topics so no stale messages carry over. +3. `make infra` — brings infrastructure back up with a clean Redpanda state. +4. `make up-local` — starts all services fresh. + +### Verify environment before first run + +```bash +make check-java && make check-db +``` + +Run these before any `up*` target to confirm the JDK and database are correctly configured. Both are also called automatically as prerequisites by `make up`, `make up-local`, and `make up-kafka`. + +### Monitor running services + +```bash +# In one terminal +make logs + +# In another terminal +make status +``` + +`make logs` tails all `.run/*.log` files interleaved. `make status` shows PID liveness and Docker container state at a point in time. diff --git a/docs/plans/2026-02-27-technical-documentation-design.md b/docs/plans/2026-02-27-technical-documentation-design.md new file mode 100644 index 00000000..37e207f6 --- /dev/null +++ b/docs/plans/2026-02-27-technical-documentation-design.md @@ -0,0 +1,43 @@ +# Technical Documentation Design + +**Date:** 2026-02-27 +**Audience:** Internal developers + External integrators +**Structure:** Two-level (platform + connector module) + +## Overview + +Create comprehensive English technical documentation for EtendoRX platform and the OBConnector integration module. Documentation lives in two locations matching the repo structure. + +## Location 1: EtendoRX Platform (`etendo_rx/docs/`) + +| File | Description | +|------|-------------| +| `INDEX.md` | Master table of contents with links to all platform and connector docs | +| `architecture.md` | Platform services (DAS, Auth, Edge, Config, AsyncProcess), ports, interaction diagram | +| `getting-started.md` | Prerequisites, clone, configure, build, run, verify | +| `makefile-reference.md` | All 33 Makefile targets with descriptions, args, examples, dependency order | +| `configuration.md` | rxconfig/ YAML files, gradle.properties, template system, env vars | +| `infrastructure.md` | Docker Compose (Redpanda vs Kafka), Jaeger, Debezium, ports reference | + +## Location 2: OBConnector Module (`modules/com.etendorx.integration.obconnector/docs/`) + +| File | Description | +|------|-------------| +| `INDEX.md` | Connector-specific TOC | +| `architecture.md` | Module structure (common/lib/server/worker/loadtest), dependency graph, class hierarchy | +| `workflows.md` | Send & Receive pipelines: each step (MAP→POST_ACTION) with HTTP calls, data flow, step messages | +| `api-reference.md` | Server REST API: POST/PUT/GET endpoints with request/response examples | +| `dashboard.md` | Setup, access, HTMX polling, step detail messages, endpoints | +| `worker.md` | Filters, converters, operations, FK resolution (subEntityMap), DAS HTTP calls | +| `loadtest.md` | Send vs Receive load tests, config options, Makefile targets, mock receiver | +| `resilience.md` | Dedup, DLT replay, HTTP retry, saga compensation, idempotent steps, health indicator | +| `extending.md` | Adding new entities, custom post-actions, custom converters | +| `configuration.md` | Worker/Server properties, auth bypass, token setup, connector instance | + +## Principles + +- Technical and detailed — include class names, file paths, config keys, HTTP examples +- Each file is self-contained but links to related docs +- Code examples where useful (config snippets, curl commands, Java snippets) +- No screenshots — describe UI elements textually +- Architecture diagrams in ASCII/text format diff --git a/docs/plans/2026-02-27-technical-documentation.md b/docs/plans/2026-02-27-technical-documentation.md new file mode 100644 index 00000000..a173be56 --- /dev/null +++ b/docs/plans/2026-02-27-technical-documentation.md @@ -0,0 +1,558 @@ +# Technical Documentation Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Create comprehensive English technical documentation for EtendoRX platform and OBConnector module. + +**Architecture:** Two-level docs — platform docs in `etendo_rx/docs/` (6 files), connector docs in `modules/com.etendorx.integration.obconnector/docs/` (10 files). Each location has an INDEX.md linking all topics. Files are self-contained markdown with cross-references. + +**Tech Stack:** Markdown, ASCII diagrams, curl examples, Java code snippets + +--- + +## Wave 1: Platform Docs (6 files, parallelizable) + +All files in `/Users/sebastianbarrozo/Documents/work/epic/obconnector/etendo_rx/docs/` + +### Task 1: Platform INDEX.md + +**Files:** +- Create: `docs/INDEX.md` + +**Step 1: Write the index file** + +Master table of contents for the entire project. Must include: +- Project overview (EtendoRX = reactive microservices platform for Etendo ERP) +- Links to all 5 platform doc files +- Links to OBConnector docs at `../modules/com.etendorx.integration.obconnector/docs/INDEX.md` +- Quick reference: services table (name, port, purpose) + +**Step 2: Verify links are correct** + +Check that all referenced file paths exist or will exist after plan completion. + +--- + +### Task 2: Platform Architecture (`docs/architecture.md`) + +**Files:** +- Create: `docs/architecture.md` +- Read: `settings.gradle`, `build.gradle`, all module dirs + +**Step 1: Write architecture doc** + +Must cover: +- **Platform overview** — EtendoRX as Spring Boot microservices platform +- **Service catalog** — table with: service name, module path, port, purpose, Spring Boot app name + - Config Server (8888) — Spring Cloud Config, central configuration + - Auth (8094) — JWT authentication/authorization + - DAS (8092) — Data Access Service, REST API to Etendo DB + - Edge (8096) — API Gateway + - AsyncProcess (8099) — Async task processing, Kafka consumer +- **ASCII architecture diagram** — show services, Kafka, DB, external systems +- **Dependency graph** — libs/, modules_core/, modules/, modules_gen/ relationships +- **Module categories** — explain each dir (libs, modules_core, modules, modules_gen, modules_test) +- **Technology stack** — Spring Boot 3.1.4, Spring Cloud 2022.0.4, Java 17, Gradle 8.3, Kafka/Redpanda, PostgreSQL + +--- + +### Task 3: Getting Started (`docs/getting-started.md`) + +**Files:** +- Create: `docs/getting-started.md` +- Read: `Makefile`, `gradle.properties` + +**Step 1: Write getting started guide** + +Must cover: +- **Prerequisites** — Java 17 (Corretto recommended), Docker, Docker Compose, Git, PostgreSQL with Etendo installed +- **Clone** — git clone + submodule init +- **Configure** — `gradle.properties` setup (DB credentials, GitHub tokens), `make config` to generate YAML from templates +- **Infrastructure** — `make infra` (Redpanda) or `make infra-kafka` (full Kafka) +- **Build** — `make build` (compiles all modules) +- **Run** — `make up` (with Config Server) or `make up-local` (without, fastest) +- **Verify** — `make status`, access Dev Portal at :8199, check each service health endpoint +- **First sync** — brief overview of how to trigger a Send or Receive workflow +- **Troubleshooting** — common issues (JAVA_HOME, DB connection, Kafka not ready) + +--- + +### Task 4: Makefile Reference (`docs/makefile-reference.md`) + +**Files:** +- Create: `docs/makefile-reference.md` +- Read: `Makefile` (complete) + +**Step 1: Write Makefile reference** + +Must cover every target organized by category: + +- **Preflight Checks** — `check-db`, `check-java` with what they verify +- **Infrastructure** — `infra`, `infra-kafka`, `infra-down`, `infra-logs`, `infra-ps` with Docker Compose details +- **Configuration** — `config` with template system explanation, variable injection from gradle.properties +- **Build** — `build`, `build-lib`, `build-server`, `build-worker`, `test`, `test-lib` with Gradle commands underneath +- **Individual Services** — `run-config`, `run-auth`, `run-das`, `run-edge`, `run-server`, `run-worker`, `run-async` with ports and startup order +- **Orchestrated Startup** — `up`, `up-local`, `up-kafka`, `down`, `status`, `logs` with PID management, background processes +- **Utilities** — `portal`, `loadtest`, `loadtest.send`, `loadtest.receive`, `mock`, `purge`, `help` + +For each target: description, usage example, dependencies, key environment variables. + +Include **dependency order diagram**: +``` +make config → make infra → make up + ├── run-config (8888, if not up-local) + ├── run-auth (8094) + ├── run-das (8092) + ├── run-edge (8096) + ├── run-async (8099) + ├── run-server (8101) + └── run-worker (8102) +``` + +--- + +### Task 5: Configuration Reference (`docs/configuration.md`) + +**Files:** +- Create: `docs/configuration.md` +- Read: `rxconfig/*.yaml`, `rxconfig/*.yaml.template`, `gradle.properties` + +**Step 1: Write configuration reference** + +Must cover: +- **Configuration hierarchy** — Spring Cloud Config Server → application YAML → application-local.properties +- **Template system** — `.yaml.template` files, `make config` variable injection, `__VARIABLE__` placeholders +- **File-by-file reference:** + - `application.yaml` — global Spring properties + - `das.yaml` — DAS database connection (injected from gradle.properties) + - `worker.yaml` — Kafka bootstrap, dashboard toggle + - `obconnector.yaml` — connector instance, tokens, async-api-url + - `obconnsrv.yaml` — server config + - `auth.yaml` — JWT keys, token config + - `edge.yaml` — gateway routes + - `asyncprocess.yaml` — async processor config +- **Key properties table** — property name, default, description, which service uses it +- **Local overrides** — `application-local.properties` in each module, `spring.profiles.active=local` +- **Environment variables** — JAVA_HOME, SPRING_PROFILES_ACTIVE, etc. + +--- + +### Task 6: Infrastructure (`docs/infrastructure.md`) + +**Files:** +- Create: `docs/infrastructure.md` +- Read: `modules/com.etendorx.integration.obconnector/infraestructure/docker-compose*.yml` + +**Step 1: Write infrastructure doc** + +Must cover: +- **Two infrastructure modes:** + - Redpanda (default, lightweight) — `docker-compose.redpanda.yml` + - Full Kafka — `docker-compose.yml` +- **Services table** per compose file — container, image, port, purpose +- **Redpanda stack:** + - Redpanda broker (:29092) — Kafka-compatible, single node + - Redpanda Console (:9093) — Topic browser, consumer groups + - Kafka Connect (:8083) — Debezium CDC connectors + - Jaeger (:16686) — Distributed tracing +- **Full Kafka stack:** + - Zookeeper (:22181), Kafka (:29092), Kafka UI (:9093) + - Debezium Postgres (:5465), Kafka Connect (:8083), Connect UI (:8002) + - Jaeger (:16686) +- **Kafka topics** — `obconnector.send`, `obconnector.receive`, DLT topics, async process topics +- **Debezium CDC** — how Change Data Capture works with PostgreSQL WAL +- **Topic management** — `make purge` to reset topics +- **Ports reference table** — all ports used by infra + services + +--- + +## Wave 2: Connector Docs — Core (5 files, parallelizable) + +All files in `modules/com.etendorx.integration.obconnector/docs/` + +### Task 7: Connector INDEX.md + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/INDEX.md` + +**Step 1: Write connector index** + +- OBConnector overview — bidirectional sync between Etendo ERP and external systems +- Links to all 9 connector doc files +- Link back to platform docs at `../../../docs/INDEX.md` +- Quick reference: sub-project table (name, purpose, key classes) + +--- + +### Task 8: Connector Architecture (`docs/architecture.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/architecture.md` +- Read: All `build.gradle` files in sub-projects + +**Step 1: Write connector architecture doc** + +Must cover: +- **Module structure** — 5 sub-projects with purpose: + - `common` — Interfaces and models (SyncWorkflow, SyncActivities, SynchronizationEntity) + - `lib` — Core engine (workflow runners, Kafka integration, dashboard, resilience) + - `server` — REST API entry point (SyncController, port 8101) + - `worker` — Sync execution (converters, operations, filters, DAS/external HTTP) + - `loadtest` — Performance testing (Send/Receive modes) +- **Dependency diagram:** + ``` + common ← lib ← worker + ← server (common only) + ← loadtest + ``` +- **Class hierarchy** — key interfaces and their implementations +- **Pipeline pattern** — MAP → PRE_LOGIC → SYNC → POST_LOGIC → PROCESS_DATA → POST_ACTION +- **Two workflow directions:** + - Send: Etendo DB change → Debezium CDC → Kafka → Worker → External System + - Receive: External System → HTTP POST → Server → Kafka → Worker → DAS → Etendo DB +- **ASCII data flow diagram** for each direction + +--- + +### Task 9: Workflows (`docs/workflows.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/workflows.md` +- Read: `SyncWorkflowBase.java`, `ReceiveWorkflowRunner.java`, `SendWorkflowRunner.java`, all step implementations + +**Step 1: Write workflows doc** + +Must cover for **each workflow** (Send and Receive): + +**Receive Workflow (External → Etendo):** +- Entry: `POST /api/sync/{modelName}` → SyncController → SyncServices → AsyncApi (Kafka) +- Worker: KafkaListener on `obconnector.receive` → ReceiveWorkflowRunner +- Filters: ReceiveLogicFilter → ReceiveOrganizationFilter → ReceiveClientFilter +- Pipeline steps: + - **MAP** (DasConverter): Lookup external→internal ID via `GET /connector/ETRX_instance_externalid/...`, determine POST/PUT, resolve FK fields via `subEntityMap()` + - **PRE_LOGIC** (DasPreLogic): No-op (extensible) + - **SYNC** (DasOperation): `POST/PUT {dasUrl}/{projection}/{entity}` to DAS, returns created/updated entity + - **POST_LOGIC**: No-op (extensible) + - **PROCESS_DATA** (DasProcessData): Store ID mapping via `POST /connector/ETRX_instance_externalid` + - **POST_ACTION**: Execute registered WorkerActions +- Step messages: What `DashboardEventRecorder.setStepDetail()` reports at each step +- Error handling: catch(Exception) → dashboard event → compensate → DLT + +**Send Workflow (Etendo → External):** +- Entry: Debezium CDC → Kafka topic → SendWorkflowRunner +- Filters: SendLogicFilter → SendOrganizationFilter → SendClientFilter +- Pipeline steps: + - **MAP** (SendDasConverter): Extract internal ID, lookup external ID, fetch entity from DAS, resolve FK fields + - **PRE_LOGIC** (SendDasPreLogic): No-op (extensible) + - **SYNC** (SendDasOperation): `POST/PUT` to external system via ExternalRequestService + - **POST_LOGIC**: No-op (extensible) + - **PROCESS_DATA** (SendDasProcessData): Store ID mapping + - **POST_ACTION**: Execute registered WorkerActions +- Step messages at each step + +--- + +### Task 10: API Reference (`docs/api-reference.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/api-reference.md` +- Read: `SyncController.java`, `SyncServices.java` + +**Step 1: Write API reference** + +Must cover: +- **Base URL:** `http://localhost:8101/api/sync` +- **Authentication:** `X-TOKEN` header (JWT) + +**Endpoints:** + +1. `POST /api/sync/{modelName}` — Create entity + - Path params: modelName (must match tableName in connector config, e.g., "BusinessPartner") + - Headers: X-TOKEN, Content-Type: application/json + - Body: `{ "data": { "id": "external-id", "name": "...", ... } }` + - Response: `{ "workflowId": "uuid", "status": "ACCEPTED" }` + - curl example + +2. `PUT /api/sync/{modelName}/{entityId}` — Update entity + - Path params: modelName, entityId (external ID) + - Same headers/body structure + - curl example + +3. `GET /api/sync/status/{workflowId}` — Check workflow status + - Response: `{ "status": "DONE|STARTED|ERROR|RETRY", "steps": [...] }` + - curl example + +- **Error responses** — 400, 401, 404, 500 with examples +- **Model name resolution** — how modelName maps to connector instance tableName in ExternalSystemConfiguration + +--- + +### Task 11: Dashboard (`docs/dashboard.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/dashboard.md` +- Read: `DashboardController.java`, `DashboardEventRecorder.java`, `SyncEventStore.java`, all templates + +**Step 1: Write dashboard doc** + +Must cover: +- **Enable:** `dashboard.enabled=true` in worker application properties +- **Access:** `http://localhost:8102/dashboard` +- **Technology:** Thymeleaf + HTMX (polling every 2-5s) +- **Sections:** + - **Metrics panel** — Total messages, processed, errors, filtered, in-progress counts + - **Resources panel** — CPU/memory usage of the worker JVM + - **Kafka Lag panel** — Consumer group lag per topic/partition, total lag + - **Events feed** — Live workflow events, expandable rows showing step-by-step progress + - **DLT panel** — Dead letter messages with replay button +- **REST endpoints:** + - `GET /dashboard` — Main page + - `GET /dashboard/metrics` — Metrics fragment + - `GET /dashboard/resources` — Resources fragment + - `GET /dashboard/lag` — Kafka lag fragment + - `GET /dashboard/events` — Events fragment + - `GET /dashboard/dlt` — DLT fragment +- **Step detail messages** — What each step reports: + - MAP: entity, extId, verb, FK resolution + - SYNC: HTTP method, URL, status code, internalId + - PROCESS_DATA: ID map stored/skipped, IDs +- **Status badges:** STARTED, PROCESSING, DONE, ERROR, RETRY, FILTERED, DLT +- **DashboardEventRecorder API:** + - `setStepDetail(String)` — ThreadLocal, set from step implementations + - `getStepDetail()` / `clearStepDetail()` — Lifecycle + - `recordStepComplete(runId, workflow, entity, step, syncEntity)` — Called by SyncWorkflowBase + - `recordWorkflowError(runId, workflow, entity, errorMessage)` — Called on exception + +--- + +## Wave 3: Connector Docs — Advanced (5 files, parallelizable) + +### Task 12: Worker Internals (`docs/worker.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/worker.md` +- Read: Worker classes (filters, converters, operations, DasRequestService, ExternalRequestService) + +**Step 1: Write worker doc** + +Must cover: +- **Worker overview** — Spring Boot app on :8102, Kafka consumer, executes sync workflows +- **Filters** — Chain of filters before workflow execution: + - `SendLogicFilter` / `ReceiveLogicFilter` — Check model name matches connector config + - `SendOrganizationFilter` / `ReceiveOrganizationFilter` — Organization-based filtering + - `SendClientFilter` / `ReceiveClientFilter` — Client-based filtering +- **Converters** — MAP step implementations: + - `DasConverter` (Receive) — External→Etendo format, FK resolution + - `SendDasConverter` (Send) — Etendo→External format, FK resolution +- **Operations** — SYNC step implementations: + - `DasOperation` (Receive) — POST/PUT to DAS API + - `SendDasOperation` (Send) — POST/PUT to external system +- **DasRequestService** — All HTTP calls to DAS: + - `getEtendoEntity()` — Lookup external→internal ID mapping + - `getExternalEntity()` — Lookup internal→external ID mapping + - `getExternalEntityMap()` — Get full entity representation + - `insertEntity()` — POST to DAS + - `updateEntity()` — PUT to DAS + - `addEtendoIdMap()` — Store ID mapping + - `getProjectionName()` — Resolve projection from connector config +- **ExternalRequestService** — HTTP calls to external system +- **SubEntitySynchronizationUtils** — FK field resolution: + - How `subEntityMap()` works + - JSONPath expressions for FK fields + - External ID → Internal ID lookup for each FK + - Fallback behavior when mapping not found +- **WorkerConfigService** — Configuration caching, role/org extraction from JWT + +--- + +### Task 13: Load Testing (`docs/loadtest.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/loadtest.md` +- Read: `LoadTestRunner.java`, `ReceiveLoadTestRunner.java`, `MessageGenerator.java`, `MockReceiverServer.java` + +**Step 1: Write loadtest doc** + +Must cover: +- **Two modes:** + - `loadtest.send` — Simulates Debezium CDC messages to Kafka + - `loadtest.receive` — Sends HTTP POST to Server endpoint +- **Send load test** (LoadTestRunner): + - Publishes Debezium-format messages to Kafka topic + - Configurable: threads, messages-per-thread, topic + - MessageGenerator: creates realistic CDC payloads +- **Receive load test** (ReceiveLoadTestRunner): + - HTTP POST to `http://localhost:8101/api/sync/BusinessPartner` + - Generates BusinessPartner payloads with all required fields + - Optional status polling after completion +- **Configuration properties:** + - `loadtest.mode` — send/receive + - `loadtest.enabled` — enable/disable (default true for send) + - `loadtest.threads` — concurrent thread count + - `loadtest.messages-per-thread` — messages per thread + - `loadtest.delay-ms` — delay between messages + - `loadtest.poll-status` — poll workflow status (receive only) + - `loadtest.server.url` — server URL (receive only) + - `loadtest.server.token` — auth token (receive only) + - `loadtest.model` — entity model name (receive only) +- **Makefile targets:** + - `make loadtest` — Run both sequentially + - `make loadtest.send` — Send only + - `make loadtest.receive` — Receive only +- **Mock receiver:** + - `make mock` — Start MockReceiverServer on :8090 + - Logs all incoming requests + - Useful for testing Send workflow without real external system + +--- + +### Task 14: Resilience (`docs/resilience.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/resilience.md` +- Read: Resilience classes in lib + +**Step 1: Write resilience doc** + +Must cover: +- **Message deduplication** (MessageDeduplicationService): + - In-memory ConcurrentHashMap with TTL (300s default) + - Dedup key: message ID or composite key + - Prevents re-processing of same message on Kafka rebalance +- **Dead Letter Topic (DLT):** + - Failed messages after retry exhaustion go to DLT + - `DltReplayService` — replay individual or all DLT messages + - Dashboard DLT panel for manual replay +- **HTTP retry** (HttpRetryHelper): + - 3 attempts, 1s initial delay, 2x exponential backoff + - Wraps OkHttp and java.net.http calls + - `withRetry(operationName, callable)` — static method +- **Saga compensation** (SagaManager): + - Register compensation actions per runId + - On failure: execute compensations in reverse order + - Tracked in-memory per workflow execution +- **Idempotent step execution:** + - `lastCompletedStep` on SynchronizationEntity + - On retry, steps up to lastCompletedStep are skipped + - Prevents re-executing successful steps +- **Health indicator** (SyncHealthIndicator): + - DOWN after 10+ consecutive errors + - DOWN after 5min without successful processing + - Exposed via `/actuator/health` +- **Metrics** (SyncMetricsService): + - Micrometer counters: messages received, processed, errors, filtered + - Exposed via `/actuator/metrics` +- **Distributed tracing:** + - SLF4J MDC: runId, workflow, entity propagated across threads + - CorrelationHeaders for cross-service tracing + +--- + +### Task 15: Extending the Connector (`docs/extending.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/extending.md` + +**Step 1: Write extending guide** + +Must cover: +- **Adding a new entity type:** + 1. Configure in Etendo AD: ETRX_Instance_Connector mappings + 2. Set tableName, integrationDirection, projectionName + 3. Worker auto-discovers via WorkerConfigService +- **Custom converters:** + - Implement `SyncConverters` interface + - Use `@Qualifier("send.converter")` or `@Qualifier("receive.converter")` + - Use `@Order(N)` for priority + - `appliesTo(entityName)` — return true for your entity + - `convert(data, entityName)` — transform data +- **Custom operations:** + - Implement `SyncOperation` interface + - `appliesTo(entityName)` — entity routing + - `operation(data)` — execute sync +- **Custom pre/post logic:** + - Implement `SyncPreLogic` or `SyncProcessData` + - Register with appropriate qualifier +- **Custom post-actions:** + - Implement `WorkerAction` interface + - `@Qualifier("send.post.action")` or `@Qualifier("receive.post.action")` + - Example: `BookOrder` — books an order after sync +- **Custom filters:** + - Implement `KafkaChangeFilter` + - `isValid(kafkaChange)` — return false to skip message + +--- + +### Task 16: Connector Configuration (`docs/configuration.md`) + +**Files:** +- Create: `modules/com.etendorx.integration.obconnector/docs/configuration.md` +- Read: All `application*.properties` in server and worker + +**Step 1: Write connector configuration doc** + +Must cover: +- **Server configuration** (`application-local.properties` in server): + - `server.port` (default 8101) + - `auth.disabled` — bypass auth for development + - `async-api-url` — AsyncProcess service URL +- **Worker configuration** (`application-local.properties` in worker): + - `server.port` (default 8102) + - `das.url` — DAS service URL + - `classic.url` / `openbravo.url` — Etendo Classic URL + - `token` — JWT token for DAS authentication + - `public-key` — EC public key for JWT validation + - `classic.token` — Token for Classic API + - `connector.instance` — UUID of the ETRX_Instance_Connector + - `connector.user` — User ID for operations + - `spring.kafka.bootstrap-servers` — Kafka broker + - `dashboard.enabled` — Enable dashboard UI + - `auth.disabled` — Bypass incoming HTTP auth +- **Connector instance setup:** + - How to find your connector instance UUID + - ETRX_Instance_Connector table structure + - Table mappings: tableName, integrationDirection, projectionName +- **Token setup:** + - Generate JWT from auth.yaml private key + - Derive public key: `openssl ec -pubout` + - Set in worker properties +- **External system configuration:** + - ExternalSystemConfiguration loaded from DAS + - Cached in WorkerConfigService + - Mappings define which entities sync in which direction + +--- + +## Wave 4: Commit + +### Task 17: Final commit and push + +**Step 1: Review all files exist** + +Verify all 16 markdown files are created. + +**Step 2: Commit platform docs** + +```bash +cd /Users/sebastianbarrozo/Documents/work/epic/obconnector/etendo_rx +git add docs/INDEX.md docs/architecture.md docs/getting-started.md docs/makefile-reference.md docs/configuration.md docs/infrastructure.md +git commit -m "Add EtendoRX platform technical documentation" +``` + +**Step 3: Commit connector docs** + +```bash +cd modules/com.etendorx.integration.obconnector +git add docs/ +git commit -m "Add OBConnector technical documentation" +git push origin feature/ETP-3459 +``` + +**Step 4: Push parent** + +```bash +cd /Users/sebastianbarrozo/Documents/work/epic/obconnector/etendo_rx +git add . +git commit -m "Update submodule reference for documentation" +git push origin feature/ETP-3459 +``` diff --git a/libs/com.etendorx.das_core/src/main/java/com/etendorx/entities/mapper/lib/BindedRestController.java b/libs/com.etendorx.das_core/src/main/java/com/etendorx/entities/mapper/lib/BindedRestController.java index 90f4c1df..59939d1b 100644 --- a/libs/com.etendorx.das_core/src/main/java/com/etendorx/entities/mapper/lib/BindedRestController.java +++ b/libs/com.etendorx.das_core/src/main/java/com/etendorx/entities/mapper/lib/BindedRestController.java @@ -112,7 +112,8 @@ public ResponseEntity get(@PathVariable("id") String id) { if (entity != null) { return new ResponseEntity<>(entity, HttpStatus.OK); } - throw new ResponseStatusException(HttpStatus.NOT_FOUND, "Record not found"); + throw new ResponseStatusException(HttpStatus.NOT_FOUND, + "Record not found: entity=" + repository.getClass().getSimpleName() + " id=" + id); } /** diff --git a/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/KafkaMessageUtil.java b/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/KafkaMessageUtil.java index 44810f99..8bb09673 100644 --- a/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/KafkaMessageUtil.java +++ b/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/KafkaMessageUtil.java @@ -61,10 +61,11 @@ private static String toJson(AsyncProcessExecution asyncProcessExecution) { public void saveProcessExecution(Object bodyChanges, String mid, String description, AsyncProcessState state) { + String rawParams = bodyChanges != null ? bodyChanges.toString() : ""; AsyncProcessExecution process = AsyncProcessExecution.builder() .asyncProcessId(mid) .description(description) - .params(bodyChanges != null ? bodyChanges.toString() : "") + .params(rawParams.length() > 1000 ? rawParams.substring(0, 1000) + "..." : rawParams) .time(new Date()) .state(state) .build(); diff --git a/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/model/AsyncProcess.java b/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/model/AsyncProcess.java index e34e14fb..b542601a 100644 --- a/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/model/AsyncProcess.java +++ b/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/model/AsyncProcess.java @@ -45,7 +45,12 @@ public AsyncProcess process(AsyncProcessExecution asyncProcessExecution) { return this; } + private static final int MAX_EXECUTIONS = 50; + private void addExecution(AsyncProcessExecution transactionClone) { executions.add(transactionClone); + while (executions.size() > MAX_EXECUTIONS) { + executions.pollLast(); + } } } diff --git a/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/topology/AsyncProcessTopology.java b/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/topology/AsyncProcessTopology.java index 8820e03a..2c145282 100644 --- a/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/topology/AsyncProcessTopology.java +++ b/libs/com.etendorx.lib.kafka/src/main/java/com/etendorx/lib/kafka/topology/AsyncProcessTopology.java @@ -20,6 +20,7 @@ import com.etendorx.lib.kafka.model.AsyncProcessExecution; import com.etendorx.lib.kafka.model.AsyncProcessState; import com.etendorx.lib.kafka.model.JsonSerde; +import lombok.extern.slf4j.Slf4j; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.Bytes; @@ -31,6 +32,7 @@ import org.apache.kafka.streams.kstream.Produced; import org.apache.kafka.streams.state.KeyValueStore; +@Slf4j public class AsyncProcessTopology { public static final String ASYNC_PROCESS_EXECUTION = "async-process-execution"; @@ -45,6 +47,14 @@ public static void buildTopology(StreamsBuilder streamsBuilder) { KStream asyncProcessExecutionStream = streamsBuilder.stream( ASYNC_PROCESS_EXECUTION, Consumed.with(Serdes.String(), asyncProcessExecutionSerdes)) + .filter((key, value) -> { + if (key == null || value == null) { + log.warn("Skipping async-process-execution record with null key={} or null value. " + + "This usually means a malformed message was published to the topic.", key); + return false; + } + return true; + }) .groupByKey() .aggregate(AsyncProcess::new, (key, value, aggregate) -> aggregate.process(value), Materialized.>as(ASYNC_PROCESS_STORE) diff --git a/libs/com.etendorx.utils.auth/src/main/java/com/etendorx/utils/auth/key/context/FilterContext.java b/libs/com.etendorx.utils.auth/src/main/java/com/etendorx/utils/auth/key/context/FilterContext.java index b4d5888c..297500e0 100644 --- a/libs/com.etendorx.utils.auth/src/main/java/com/etendorx/utils/auth/key/context/FilterContext.java +++ b/libs/com.etendorx.utils.auth/src/main/java/com/etendorx/utils/auth/key/context/FilterContext.java @@ -53,12 +53,25 @@ public class FilterContext extends OncePerRequestFilter { String publicKey; @Value("${auth.token:}") String tokenYaml; + @Value("${auth.disabled:false}") + boolean authDisabled; @Autowired(required = false) private JwtClassicConfig jwtClassicConfig; @Override protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) throws ServletException, IOException { + if (authDisabled) { + userContext.setClientId("0"); + userContext.setOrganizationId("0"); + userContext.setUserId("100"); + userContext.setRoleId("0"); + userContext.setRestMethod(request.getMethod()); + userContext.setRestUri(request.getRequestURI()); + AppContext.setCurrentUser(userContext); + filterChain.doFilter(request, response); + return; + } String token = request.getHeader(HEADER_TOKEN); if (StringUtils.isEmpty(token)) { String authHeader = request.getHeader(HEADER_AUTHORIZATION); diff --git a/modules_core/com.etendorx.asyncprocess/build.gradle b/modules_core/com.etendorx.asyncprocess/build.gradle index 96cfb1fb..80cc2c7f 100644 --- a/modules_core/com.etendorx.asyncprocess/build.gradle +++ b/modules_core/com.etendorx.asyncprocess/build.gradle @@ -71,6 +71,15 @@ bootBuildImage { } } +bootRun { + debugOptions { + enabled = Boolean.parseBoolean((findProperty('debugEnabled') ?: enabled.get()) as String) + suspend = Boolean.parseBoolean((findProperty('debugSuspend') ?: suspend.get()) as String) + server = Boolean.parseBoolean((findProperty('debugServer') ?: server.get()) as String) + port = Integer.valueOf((findProperty('debugPort') ?: port.get()) as String) + } +} + ext { set('springCloudVersion', "2022.0.4") } diff --git a/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/config/StreamConfiguration.java b/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/config/StreamConfiguration.java index 2efaad49..dea21186 100644 --- a/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/config/StreamConfiguration.java +++ b/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/config/StreamConfiguration.java @@ -66,7 +66,7 @@ public class StreamConfiguration { private String kafkaStreamsStateDir; // The bootstrap servers for Kafka. - @Value("${bootstrap_server:kafka:9092}") + @Value("${spring.kafka.bootstrap-servers:${bootstrap_server:localhost:29092}}") private String bootstrapServer; /** @@ -83,7 +83,7 @@ public Properties kafkaStreamsConfiguration() { properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); properties.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "true"); - properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0"); + properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "10485760"); properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, kafkaStreamsHostInfo); properties.put(StreamsConfig.STATE_DIR_CONFIG, kafkaStreamsStateDir); return properties; diff --git a/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/controller/AsyncProcessController.java b/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/controller/AsyncProcessController.java index 271767aa..ba7e2475 100644 --- a/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/controller/AsyncProcessController.java +++ b/modules_core/com.etendorx.asyncprocess/src/main/java/com/etendorx/asyncprocess/controller/AsyncProcessController.java @@ -98,7 +98,7 @@ public AsyncProcess broadcastMessage(@Payload AsyncProcess textMessageDTO) { @Operation(summary = "Create the async process") @PostMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseStatus(HttpStatus.ACCEPTED) - public Map index(@RequestBody Map> bodyChanges, + public Map index(@RequestBody Map bodyChanges, @RequestParam(required = false, name = "process") String processName, @RequestParam(required = false, name = "run_id") String runId) throws Exception { Map ret = new HashMap<>(); diff --git a/modules_core/com.etendorx.asyncprocess/src/main/resources/application-local.properties b/modules_core/com.etendorx.asyncprocess/src/main/resources/application-local.properties new file mode 100644 index 00000000..59d5abd9 --- /dev/null +++ b/modules_core/com.etendorx.asyncprocess/src/main/resources/application-local.properties @@ -0,0 +1,9 @@ +server.port=8099 +bootstrap_server=localhost:29092 +spring.cloud.stream.kafka.binder.brokers=localhost:29092 +kafka.streams.host.info=localhost:8099 +kafka.streams.state.dir=/tmp/kafka-streams/async-process-local +spring.config.import=optional:configserver:http://localhost:8888 +management.endpoints.web.exposure.include=health,metrics +auth.disabled=true +spring.kafka.bootstrap-servers=localhost:29092 diff --git a/modules_core/com.etendorx.asyncprocess/src/main/resources/application.properties b/modules_core/com.etendorx.asyncprocess/src/main/resources/application.properties index 83d2d0e4..8870c1c6 100644 --- a/modules_core/com.etendorx.asyncprocess/src/main/resources/application.properties +++ b/modules_core/com.etendorx.asyncprocess/src/main/resources/application.properties @@ -1,4 +1,4 @@ config.server.url=http://localhost:8888 -spring.config.import=configserver:${config.server.url} +spring.config.import=optional:configserver:${config.server.url} spring.application.name=asyncprocess server.port=8099 diff --git a/modules_core/com.etendorx.auth/build.gradle b/modules_core/com.etendorx.auth/build.gradle index 1ff0dbdc..73fbb8a0 100644 --- a/modules_core/com.etendorx.auth/build.gradle +++ b/modules_core/com.etendorx.auth/build.gradle @@ -71,6 +71,16 @@ dependencies { // Swagger implementation 'org.springdoc:springdoc-openapi-starter-webmvc-ui:2.2.0' + + // Google oAuth + implementation(platform("com.google.http-client:google-http-client-bom:2.0.0")) + implementation("com.google.http-client:google-http-client-gson") + + implementation(platform("com.google.api-client:google-api-client-bom:2.8.1")) + implementation("com.google.api-client:google-api-client") + + implementation("com.google.apis:google-api-services-sheets:v4-rev20250616-2.0.0") + implementation("com.google.apis:google-api-services-drive:v3-rev20250717-2.0.0") } ext { @@ -86,11 +96,14 @@ sourceSets { } /** - * To debug run with --debug-jvm -PdebugPort= - * ./gradlew com.etendorx.auth:bootRun --info --debug-jvm -PdebugPort=9994 + * To debug run with -PdebugEnabled=true -PdebugSuspend=false -PdebugPort= + * ./gradlew com.etendorx.auth:bootRun --info -PdebugEnabled=true -PdebugSuspend=false -PdebugPort=9994 */ bootRun { debugOptions { + enabled = Boolean.parseBoolean((findProperty('debugEnabled') ?: enabled.get()) as String) + suspend = Boolean.parseBoolean((findProperty('debugSuspend') ?: suspend.get()) as String) + server = Boolean.parseBoolean((findProperty('debugServer') ?: server.get()) as String) port = Integer.valueOf((findProperty('debugPort') ?: port.get()) as String) } } diff --git a/modules_core/com.etendorx.auth/src/main/resources/application-local.properties b/modules_core/com.etendorx.auth/src/main/resources/application-local.properties new file mode 100644 index 00000000..e8c3f3ab --- /dev/null +++ b/modules_core/com.etendorx.auth/src/main/resources/application-local.properties @@ -0,0 +1,8 @@ +das.url=http://localhost:8092 +classic.url=http://localhost:8080 +token=eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzd3MiLCJhdWQiOiJzd3MiLCJ1c2VyIjoiMCIsImNsaWVudCI6IjAiLCJyb2xlIjoiMCIsIm9yZ2FuaXphdGlvbiI6IjAiLCJ3YXJlaG91c2UiOiI0RDQ1RkU0QzUxNTA0MTcwOTA0N0Y1MUQxMzlBMjFBQyIsImlhdCI6MTc3MzA5OTM4OX0.9Gm51jBvRquDvzr-M3UKJJz3unh6DgSKVpasoVcMmBmN4pyX4Y6yV-Eknt8--h2t6hR8nh4iw3wBVQWAU_sHiQ +admin.token=eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzd3MiLCJhdWQiOiJzd3MiLCJ1c2VyIjoiMTAwIiwiY2xpZW50IjoiMjNDNTk1NzVCOUNGNDY3Qzk2MjA3NjBFQjI1NUIzODkiLCJyb2xlIjoiNDJEMEVFQjFDNjZGNDk3QTkwREQ1MjZEQzU5N0U2RjAiLCJvcmdhbml6YXRpb24iOiI3QkFCQTVGRjgwNDk0Q0FGQTU0REVCRDIyRUM0NkYwMSIsIndhcmVob3VzZSI6IjlDRjk4QTE4QkM3NTRCOTk5OThFNDIxRjkxQzVGRTEyIiwiaWF0IjoxNzczMDk5Mzg5fQ.QPNzs1Xr3xKYJmQz3WjOwyYIdyfkYzv3FJ4PrES2U3-j3PabIaN4SwHQ-5b2-lFAOQ5zV9-bzYjvDDykWxcWew +private-key=-----BEGIN PRIVATE KEY-----MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQghw7e6vE9io6Y6HTHgHD6RZ7o74ZRHmfVpQIEVE4qo6ihRANCAARi7S13nL4F2GSFm4KhRiEV/TP/6K23be2/ZLJmh1tihweopvKjMTGnwPOP2n5xgSQo68LJP2FzwUzZ2kQNrTEh-----END PRIVATE KEY----- +public-key=MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYu0td5y+BdhkhZuCoUYhFf0z/+itt23tv2SyZodbYocHqKbyozExp8Dzj9p+cYEkKOvCyT9hc8FM2dpEDa0xIQ== +config.server.url=http://localhost:8888 +spring.config.import=optional:configserver:${config.server.url} diff --git a/modules_core/com.etendorx.auth/src/main/resources/application.properties b/modules_core/com.etendorx.auth/src/main/resources/application.properties index 216ccb10..b5679790 100644 --- a/modules_core/com.etendorx.auth/src/main/resources/application.properties +++ b/modules_core/com.etendorx.auth/src/main/resources/application.properties @@ -1,3 +1,3 @@ spring.application.name=auth #config.server.url=http://localhost:8080/etendo/buildConfig -spring.config.import=configserver:${config.server.url} +spring.config.import=optional:configserver:${config.server.url} diff --git a/modules_core/com.etendorx.auth/src/test/resources/application.properties b/modules_core/com.etendorx.auth/src/test/resources/application.properties index fa816a88..7113db91 100644 --- a/modules_core/com.etendorx.auth/src/test/resources/application.properties +++ b/modules_core/com.etendorx.auth/src/test/resources/application.properties @@ -10,5 +10,5 @@ logging.level.root=DEBUG private.key=-----BEGIN PRIVATE KEY-----\\nMIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgoP6uMq5AuRjREe3o\\nUUeuh6LJTYWTnPvr7Ds8+mstk5+hRANCAASjRJgZeEBfLflXzTYeSFuPSlwBGlVK\\nXDY1+baWJM2L0E+o3NLyLWFY1qjfudRUY8H3AkSoNY3KmfT67h7We56F\\n-----END PRIVATE KEY----- public.key=-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEo0SYGXhAXy35V802Hkhbj0pcARpV\\nSlw2Nfm2liTNi9BPqNzS8i1hWNao37nUVGPB9wJEqDWNypn0+u4e1nuehQ==\\n-----END PUBLIC KEY----- das.url:http://localhost:8092 -token:eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzd3MiLCJhdWQiOiJzd3MiLCJ1c2VyIjoiMTAwIiwiY2xpZW50IjoiMjNDNTk1NzVCOUNGNDY3Qzk2MjA3NjBFQjI1NUIzODkiLCJyb2xlIjoiNDJEMEVFQjFDNjZGNDk3QTkwREQ1MjZEQzU5N0U2RjAiLCJvcmdhbml6YXRpb24iOiIwIiwid2FyZWhvdXNlIjoiNEQ0NUZFNEM1MTUwNDE3MDkwNDdGNTFEMTM5QTIxQUMiLCJpYXQiOjE3NDQ4OTYxOTB9.A3tNu2NZc7u-zXhSSbp0_E-S1qX0Z1rZALoCe5c1E1g4ahXNisk6K9w8-mbdb1ZjJmHx0OXkbRFxC-GMQnBEog -admin.token=eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzd3MiLCJhdWQiOiJzd3MiLCJ1c2VyIjoiMTAwIiwiY2xpZW50IjoiMjNDNTk1NzVCOUNGNDY3Qzk2MjA3NjBFQjI1NUIzODkiLCJyb2xlIjoiNDJEMEVFQjFDNjZGNDk3QTkwREQ1MjZEQzU5N0U2RjAiLCJvcmdhbml6YXRpb24iOiI3QkFCQTVGRjgwNDk0Q0FGQTU0REVCRDIyRUM0NkYwMSIsIndhcmVob3VzZSI6IjlDRjk4QTE4QkM3NTRCOTk5OThFNDIxRjkxQzVGRTEyIiwiaWF0IjoxNzUxNjM1ODEzfQ.hYMT9R-s2Ot6qa1RxF8kzaudAHwthCN-m3OVuTVmNaHOBpBvwOzD0z4LfkOAGwZRvs09PP7en266koNMSJGi_Q +token:eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzd3MiLCJhdWQiOiJzd3MiLCJ1c2VyIjoiMTAwIiwiY2xpZW50IjoiMCIsInJvbGUiOiIwIiwib3JnYW5pemF0aW9uIjoiMCIsIndhcmVob3VzZSI6IjRENDVGRTRDNTE1MDQxNzA5MDQ3RjUxRDEzOUEyMUFDIiwiaWF0IjoxNzczMTY5MjMzfQ.foUEPnfhkxstq84ndRAQeCqWBdnJ0wvQ_kyzP0-VJNMxYKiDvSMm8jpiflemzU96dOXb3TmgcBdoUEfakTWElQ +admin.token=eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzd3MiLCJhdWQiOiJzd3MiLCJ1c2VyIjoiMTAwIiwiY2xpZW50IjoiMzkzNjNCMDkyMUJCNDI5M0I0ODM4Mzg0NDMyNUU4NEMiLCJyb2xlIjoiRTcxN0Y5MDJDNDRDNDU1NzkzNDYzNDUwNDk1RkYzNkIiLCJvcmdhbml6YXRpb24iOiIwIiwid2FyZWhvdXNlIjoiNUE2MUYzOEQ1N0YxNDdBM0EyOEU0QzYwNkI2NkRDNDciLCJpYXQiOjE3NzMxNjkzNDV9.MkSsjo-2sXFzO20EeIOogyqc_kr4_VmZ5rZN1b0Xdzx64CYHPb6yvznBblpNwrKsfIcFbFjESLHEiIZiwO2k8Q diff --git a/modules_core/com.etendorx.configserver/build.gradle b/modules_core/com.etendorx.configserver/build.gradle index 5a373ac9..53b508e0 100644 --- a/modules_core/com.etendorx.configserver/build.gradle +++ b/modules_core/com.etendorx.configserver/build.gradle @@ -42,6 +42,15 @@ dependencies { testImplementation 'org.springframework.boot:spring-boot-starter-test' } +bootRun { + debugOptions { + enabled = Boolean.parseBoolean((findProperty('debugEnabled') ?: enabled.get()) as String) + suspend = Boolean.parseBoolean((findProperty('debugSuspend') ?: suspend.get()) as String) + server = Boolean.parseBoolean((findProperty('debugServer') ?: server.get()) as String) + port = Integer.valueOf((findProperty('debugPort') ?: port.get()) as String) + } +} + dependencyManagement { imports { mavenBom "org.springframework.cloud:spring-cloud-dependencies:${springCloudVersion}" diff --git a/modules_core/com.etendorx.configserver/src/main/resources/application.properties b/modules_core/com.etendorx.configserver/src/main/resources/application.properties index c4e658f9..ffdcef3c 100644 --- a/modules_core/com.etendorx.configserver/src/main/resources/application.properties +++ b/modules_core/com.etendorx.configserver/src/main/resources/application.properties @@ -1 +1,5 @@ server.port=8888 + +# Use local config files instead of Git when running in this workspace. +spring.profiles.active=native +spring.cloud.config.server.native.searchLocations=file:../../rxconfig diff --git a/modules_core/com.etendorx.das/build.gradle b/modules_core/com.etendorx.das/build.gradle index ba77fc5f..9b530f1a 100644 --- a/modules_core/com.etendorx.das/build.gradle +++ b/modules_core/com.etendorx.das/build.gradle @@ -166,6 +166,9 @@ bootJar { bootRun { classpath += configurations.codegen debugOptions { + enabled = Boolean.parseBoolean((findProperty('debugEnabled') ?: enabled.get()) as String) + suspend = Boolean.parseBoolean((findProperty('debugSuspend') ?: suspend.get()) as String) + server = Boolean.parseBoolean((findProperty('debugServer') ?: server.get()) as String) port = Integer.valueOf((findProperty('debugPort') ?: port.get()) as String) } } diff --git a/modules_core/com.etendorx.das/src/main/java/com/etendorx/das/utils/DefaultFilters.java b/modules_core/com.etendorx.das/src/main/java/com/etendorx/das/utils/DefaultFilters.java index 822ba8ea..53003113 100644 --- a/modules_core/com.etendorx.das/src/main/java/com/etendorx/das/utils/DefaultFilters.java +++ b/modules_core/com.etendorx.das/src/main/java/com/etendorx/das/utils/DefaultFilters.java @@ -72,6 +72,13 @@ public static String addFilters(String sql, String userId, String clientId, Stri return sql; } + if (restMethod == null) { + log.error("[ addFilters ] restMethod is null — UserContext.restMethod was not set. " + + "Check FilterContext auth-disabled block or token parsing. sql={}", sql); + throw new IllegalArgumentException( + "restMethod is null in UserContext. Ensure FilterContext sets restMethod on the request."); + } + switch (restMethod) { case GET_METHOD: return replaceInQuery(sql, clientId, roleId, isActive); diff --git a/modules_core/com.etendorx.das/src/main/resources/application-local.properties b/modules_core/com.etendorx.das/src/main/resources/application-local.properties new file mode 100644 index 00000000..0fbc81b4 --- /dev/null +++ b/modules_core/com.etendorx.das/src/main/resources/application-local.properties @@ -0,0 +1,13 @@ +server.port=8092 +spring.datasource.url=jdbc:postgresql://localhost:5432/etendo +spring.datasource.username=tad +spring.datasource.password=tad +spring.jackson.serialization.FAIL_ON_EMPTY_BEANS=false +scan.basePackage=com.etendorx.integration.to_openbravo.mapping +post-upsert=true + +# Disable auth for local development +auth.disabled=true + +# Expose actuator metrics +management.endpoints.web.exposure.include=health,metrics diff --git a/modules_core/com.etendorx.edge/build.gradle b/modules_core/com.etendorx.edge/build.gradle index 0297b0a7..5eca6dcd 100644 --- a/modules_core/com.etendorx.edge/build.gradle +++ b/modules_core/com.etendorx.edge/build.gradle @@ -56,6 +56,9 @@ dependencyManagement { bootRun { debugOptions { + enabled = Boolean.parseBoolean((findProperty('debugEnabled') ?: enabled.get()) as String) + suspend = Boolean.parseBoolean((findProperty('debugSuspend') ?: suspend.get()) as String) + server = Boolean.parseBoolean((findProperty('debugServer') ?: server.get()) as String) port = Integer.valueOf((findProperty('debugPort') ?: port.get()) as String) } } diff --git a/modules_core/com.etendorx.edge/src/main/resources/application-local.properties b/modules_core/com.etendorx.edge/src/main/resources/application-local.properties new file mode 100644 index 00000000..0d3dee9a --- /dev/null +++ b/modules_core/com.etendorx.edge/src/main/resources/application-local.properties @@ -0,0 +1,4 @@ +obconnector.url=http://localhost:8101 +subapp.url=http://localhost:3000 +classic.context.name=etendo_conn +etendorx.auth.url=http://localhost:8094 \ No newline at end of file diff --git a/modules_core/com.etendorx.edge/src/main/resources/application.properties b/modules_core/com.etendorx.edge/src/main/resources/application.properties index deaef6ea..9e76dbd2 100644 --- a/modules_core/com.etendorx.edge/src/main/resources/application.properties +++ b/modules_core/com.etendorx.edge/src/main/resources/application.properties @@ -1,3 +1,3 @@ config.server.url=http://localhost:8888 -spring.config.import=configserver:${config.server.url} +spring.config.import=optional:configserver:${config.server.url} spring.application.name=edge diff --git a/modules_core/com.etendorx.webflux/src/main/resources/application.properties b/modules_core/com.etendorx.webflux/src/main/resources/application.properties index 7543ca0b..c82ac8ff 100644 --- a/modules_core/com.etendorx.webflux/src/main/resources/application.properties +++ b/modules_core/com.etendorx.webflux/src/main/resources/application.properties @@ -1,3 +1,3 @@ -spring.application.name=webflux config.server.url=http://localhost:8888 -spring.config.import=configserver:${config.server.url} +spring.config.import=optional:configserver:${config.server.url} +spring.application.name=webflux diff --git a/portal/index.html b/portal/index.html new file mode 100644 index 00000000..75b26bc3 --- /dev/null +++ b/portal/index.html @@ -0,0 +1,128 @@ + + + + +EtendoRX Dev Portal + + + + + + + + +
+
+

Select a service

+ +
+
+

Dev Portal

+

Click a service on the left to open it here.

+
+ +
+ + + + + diff --git a/rxconfig/edge.yaml.template b/rxconfig/edge.yaml.template index 204e0b3e..99c688fc 100644 --- a/rxconfig/edge.yaml.template +++ b/rxconfig/edge.yaml.template @@ -26,6 +26,47 @@ spring: filters: - RewritePath=/login, /api/authenticate + # Classic path + - id: classic_path_route + uri: ${classic.url} + predicates: + - Method=GET, POST, DELETE, HEAD, PATCH + - Path=/${classic.context.name}/** + filters: + - RewritePath=/${classic.context.name}/(?.*), /${classic.context.name}/$\{segment} + - RemoveResponseHeader=Location + + # Das path + - id: das_path_route + uri: ${das.url} + predicates: + - Method=GET, PUT, POST, DELETE, HEAD, PATCH + - Path=/das/** + filters: + #- JwtAuthenticationFilter + - RewritePath=/das/(?.*), /$\{segment} + - RemoveResponseHeader=Location + + # Subapp debug mode path + - id: subapp_debug_mode + uri: ${subapp.url} + predicates: + - Method=GET, POST, DELETE, HEAD, PATCH + - Path=/subapp/** + filters: + #- JwtAuthenticationFilter + - RewritePath=/subapp/(?.*), /$\{segment} + - RemoveResponseHeader=Location + + # CUSTOM (API Connector) + - id: obconnector + uri: ${obconnector.url} + predicates: + - Method=GET, POST, PUT, DELETE + - Path=/api/sync/** + filters: + - JwtAuthenticationFilter + # Root route - id: root-route uri: no://op diff --git a/rxconfig/server.yaml.template b/rxconfig/obconnsrv.yaml.template similarity index 75% rename from rxconfig/server.yaml.template rename to rxconfig/obconnsrv.yaml.template index 3da767e8..29cc68f1 100644 --- a/rxconfig/server.yaml.template +++ b/rxconfig/obconnsrv.yaml.template @@ -1,3 +1,3 @@ server: port: 8101 -token: \ No newline at end of file +token: diff --git a/rxconfig/worker.yaml.template b/rxconfig/worker.yaml.template index 99079f66..c8164519 100644 --- a/rxconfig/worker.yaml.template +++ b/rxconfig/worker.yaml.template @@ -1,7 +1,11 @@ server: - port: 0 + port: 8102 openbravo: url: classic: token: + +# Sync dashboard (optional) +# dashboard: +# enabled: true