From cfee03142a2bdc90fe55f41a8a78467f7429c7eb Mon Sep 17 00:00:00 2001 From: Roy Le Date: Thu, 12 Mar 2026 13:10:08 +0700 Subject: [PATCH] [MISC] odoo/service: scale up by one worker when all workers are busy, the total number of workers is limited to 1.5x --- odoo/service/server.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/odoo/service/server.py b/odoo/service/server.py index b11fe731c9f24..8752f2def8571 100644 --- a/odoo/service/server.py +++ b/odoo/service/server.py @@ -4,10 +4,12 @@ import datetime import errno import logging +import multiprocessing import os import os.path import platform import random +import re import select import signal import socket @@ -67,6 +69,7 @@ _logger = logging.getLogger(__name__) SLEEP_INTERVAL = 60 # 1 min +WORKER_BUSY = multiprocessing.Value('i', 0) def memory_info(process): """ @@ -943,6 +946,26 @@ def process_spawn(self): if config['http_enable']: while len(self.workers_http) < self.population: self.worker_spawn(WorkerHTTP, self.workers_http) + # Scale up by at most one additional worker when all current workers + # are busy. The total number of workers is capped at 1.5x the configured + # population to avoid uncontrolled worker growth under heavy load. + # NOTE: In our current deployment, all instances are restarted daily, + # so any extra workers created during traffic spikes will be reset + # back to the base population after restart. + # TODO: + # Implement automatic scale-down for idle workers in the future + # (e.g., terminate extra workers after being idle for a certain time) + # to avoid keeping unnecessary workers alive. + if (WORKER_BUSY.value and len(self.workers_http) == WORKER_BUSY.value): + if len(self.workers_http) < self.population + self.population // 2: + worker = self.worker_spawn(WorkerHTTP, self.workers_http) + _logger.info( + f"Scaling up {worker.__class__.__name__} by 1 (busy={WORKER_BUSY.value}, pid={worker.pid})" + ) + else: + _logger.info( + f"{WorkerHTTP.__name__} scale-up skipped: max workers reached ({self.population + self.population // 2})" + ) if not self.long_polling_pid: self.long_polling_spawn() while len(self.workers_cron) < config['max_cron_threads']: @@ -1203,7 +1226,11 @@ def process_request(self, client, addr): # tolerate broken pipe when the http client closes the socket before # receiving the full reply try: + with WORKER_BUSY.get_lock(): + WORKER_BUSY.value += 1 self.server.process_request(client, addr) + with WORKER_BUSY.get_lock(): + WORKER_BUSY.value -= 1 except IOError as e: if e.errno != errno.EPIPE: raise