diff --git a/_public/static/admin/js/config.js b/_public/static/admin/js/config.js
index 84c7111e6..9fbe18540 100644
--- a/_public/static/admin/js/config.js
+++ b/_public/static/admin/js/config.js
@@ -14,6 +14,8 @@ const NUMERIC_FIELDS = new Set([
'limit_mb',
'save_delay_ms',
'usage_flush_interval_sec',
+ 'on_demand_refresh_min_interval_sec',
+ 'on_demand_refresh_max_tokens',
'upload_concurrent',
'upload_timeout',
'download_concurrent',
@@ -32,7 +34,10 @@ const NUMERIC_FIELDS = new Set([
'medium_min_bytes',
'blocked_parallel_attempts',
'concurrent',
- 'batch_size'
+ 'batch_size',
+ 'max_file_size_mb',
+ 'max_files',
+ 'request_slow_ms'
]);
const LOCALE_MAP = {
@@ -150,7 +155,18 @@ const LOCALE_MAP = {
"fail_threshold": { title: "失败阈值", desc: "单个 Token 连续失败多少次后被标记为不可用。" },
"save_delay_ms": { title: "保存延迟", desc: "Token 变更合并写入的延迟(毫秒)。" },
"usage_flush_interval_sec": { title: "用量落库间隔", desc: "用量类字段写入数据库的最小间隔(秒)。" },
- "reload_interval_sec": { title: "同步间隔", desc: "多 worker 场景下 Token 状态刷新间隔(秒)。" }
+ "reload_interval_sec": { title: "同步间隔", desc: "多 worker 场景下 Token 状态刷新间隔(秒)。" },
+ "on_demand_refresh_enabled": { title: "按需刷新", desc: "当请求拿不到可用 Token 时,是否允许触发受限的按需刷新。" },
+ "on_demand_refresh_min_interval_sec": { title: "按需刷新最小间隔", desc: "请求侧按需刷新之间的最小间隔(秒),用于避免刷新风暴。" },
+ "on_demand_refresh_max_tokens": { title: "按需刷新最大数量", desc: "单次请求侧按需刷新最多检查多少个 cooling Token。" }
+ },
+
+ "log": {
+ "label": "日志配置",
+ "max_file_size_mb": { title: "单文件上限", desc: "单个日志文件大小上限(MB),超过后自动轮转;设置为 0 或负数表示不按大小轮转。" },
+ "max_files": { title: "保留文件数", desc: "最多保留多少个日志文件;设置为 0 或负数表示不限制数量。" },
+ "log_all_requests": { title: "记录全部请求", desc: "开启后记录所有请求;关闭时仅记录慢请求、异常请求和错误请求。" },
+ "request_slow_ms": { title: "慢请求阈值", desc: "请求耗时超过该值(毫秒)时会写入日志。" }
},
diff --git a/app/api/v1/admin/config.py b/app/api/v1/admin/config.py
index d719242da..037c3a489 100644
--- a/app/api/v1/admin/config.py
+++ b/app/api/v1/admin/config.py
@@ -5,8 +5,13 @@
from app.core.auth import verify_app_key
from app.core.config import config
-from app.core.storage import get_storage as resolve_storage, LocalStorage, RedisStorage, SQLStorage
-from app.core.logger import logger
+from app.core.logger import logger, reload_logging_from_config
+from app.core.storage import (
+ LocalStorage,
+ RedisStorage,
+ SQLStorage,
+ get_storage as resolve_storage,
+)
router = APIRouter()
@@ -99,6 +104,10 @@ async def update_config(data: dict):
"""更新配置"""
try:
await config.update(_sanitize_proxy_config_payload(data))
+ reload_logging_from_config(
+ default_level=os.getenv("LOG_LEVEL", "INFO"),
+ json_console=False,
+ )
return {"status": "success", "message": "配置已更新"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
diff --git a/app/core/logger.py b/app/core/logger.py
index 0b0290f77..d4242dd58 100644
--- a/app/core/logger.py
+++ b/app/core/logger.py
@@ -2,20 +2,20 @@
结构化 JSON 日志 - 极简格式
"""
-import sys
-import os
import json
+import os
+import sys
import traceback
from pathlib import Path
-from loguru import logger
+from typing import Any
-# Provide logging.Logger compatibility for legacy calls
-if not hasattr(logger, "isEnabledFor"):
- logger.isEnabledFor = lambda _level: True
+from loguru import logger
# 日志目录
DEFAULT_LOG_DIR = Path(__file__).parent.parent.parent / "logs"
LOG_DIR = Path(os.getenv("LOG_DIR", str(DEFAULT_LOG_DIR)))
+DEFAULT_LOG_MAX_FILE_SIZE_MB = 100
+DEFAULT_LOG_MAX_FILES = 7
_LOG_DIR_READY = False
@@ -70,7 +70,13 @@ def _format_json(record) -> str:
)
)
- return json.dumps(log_entry, ensure_ascii=False)
+ return json.dumps(log_entry, ensure_ascii=False, default=str)
+
+
+# Provide logging.Logger compatibility for legacy calls
+if not hasattr(logger, "isEnabledFor"):
+ logger.isEnabledFor = lambda _level: True
+
def _env_flag(name: str, default: bool) -> bool:
raw = os.getenv(name)
@@ -79,41 +85,54 @@ def _env_flag(name: str, default: bool) -> bool:
return raw.strip().lower() in ("1", "true", "yes", "on", "y")
-def _make_json_sink(output):
- """创建 JSON sink"""
-
- def sink(message):
- json_str = _format_json(message.record)
- print(json_str, file=output, flush=True)
-
- return sink
+def _env_int(name: str, default: int) -> int:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ try:
+ return int(raw.strip())
+ except (TypeError, ValueError):
+ return default
-def _file_json_sink(message):
- """写入日志文件"""
- record = message.record
- json_str = _format_json(record)
- log_file = LOG_DIR / f"app_{record['time'].strftime('%Y-%m-%d')}.log"
- with open(log_file, "a", encoding="utf-8") as f:
- f.write(json_str + "\n")
+def _patch_json_record(record) -> None:
+ """为全局 Loguru 记录补充序列化后的 JSON 文本。"""
+ record["extra"]["_json_line"] = _format_json(record)
def setup_logging(
level: str = "DEBUG",
json_console: bool = True,
file_logging: bool = True,
+ file_rotation_size_mb: int | None = None,
+ file_retention_count: int | None = None,
):
"""设置日志配置"""
+ logger.configure(patcher=_patch_json_record)
logger.remove()
file_logging = _env_flag("LOG_FILE_ENABLED", file_logging)
+ rotation_size_mb = _env_int(
+ "LOG_MAX_FILE_SIZE_MB",
+ DEFAULT_LOG_MAX_FILE_SIZE_MB
+ if file_rotation_size_mb is None
+ else int(file_rotation_size_mb),
+ )
+ retention_count = _env_int(
+ "LOG_MAX_FILES",
+ DEFAULT_LOG_MAX_FILES
+ if file_retention_count is None
+ else int(file_retention_count),
+ )
# 控制台输出
if json_console:
logger.add(
- _make_json_sink(sys.stdout),
+ sys.stdout,
level=level,
- format="{message}",
+ format="{extra[_json_line]}",
colorize=False,
+ backtrace=False,
+ diagnose=False,
)
else:
logger.add(
@@ -121,16 +140,29 @@ def setup_logging(
level=level,
format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {file.name}:{line} - {message}",
colorize=True,
+ backtrace=False,
+ diagnose=False,
)
# 文件输出
if file_logging:
if _prepare_log_dir():
+ file_kwargs: dict[str, Any] = {
+ "level": level,
+ "format": "{extra[_json_line]}",
+ "colorize": False,
+ "enqueue": True,
+ "encoding": "utf-8",
+ "backtrace": False,
+ "diagnose": False,
+ }
+ if rotation_size_mb > 0:
+ file_kwargs["rotation"] = rotation_size_mb * 1024 * 1024
+ if retention_count > 0:
+ file_kwargs["retention"] = retention_count
logger.add(
- _file_json_sink,
- level=level,
- format="{message}",
- enqueue=True,
+ str(LOG_DIR / "app_{time:YYYY-MM-DD}.log"),
+ **file_kwargs,
)
else:
logger.warning("File logging disabled: no writable log directory.")
@@ -138,6 +170,29 @@ def setup_logging(
return logger
+def reload_logging_from_config(
+ default_level: str = "INFO",
+ json_console: bool = False,
+):
+ """根据运行时配置重新加载日志设置。"""
+ try:
+ from app.core.config import get_config
+
+ return setup_logging(
+ level=str(get_config("log.level", default_level)),
+ json_console=json_console,
+ file_logging=bool(get_config("log.file_enabled", True)),
+ file_rotation_size_mb=get_config(
+ "log.max_file_size_mb", DEFAULT_LOG_MAX_FILE_SIZE_MB
+ ),
+ file_retention_count=get_config("log.max_files", DEFAULT_LOG_MAX_FILES),
+ )
+ except Exception as exc:
+ configured = setup_logging(level=default_level, json_console=json_console)
+ configured.warning("Failed to reload logging config: {}", exc)
+ return configured
+
+
def get_logger(trace_id: str = "", span_id: str = ""):
"""获取绑定了 trace 上下文的 logger"""
bound = {}
@@ -148,4 +203,10 @@ def get_logger(trace_id: str = "", span_id: str = ""):
return logger.bind(**bound) if bound else logger
-__all__ = ["logger", "setup_logging", "get_logger", "LOG_DIR"]
+__all__ = [
+ "logger",
+ "setup_logging",
+ "reload_logging_from_config",
+ "get_logger",
+ "LOG_DIR",
+]
diff --git a/app/core/response_middleware.py b/app/core/response_middleware.py
index 4c0a07ece..e6189cc69 100644
--- a/app/core/response_middleware.py
+++ b/app/core/response_middleware.py
@@ -10,6 +10,7 @@
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
+from app.core.config import get_config
from app.core.logger import logger
@@ -19,6 +20,23 @@ class ResponseLoggerMiddleware(BaseHTTPMiddleware):
Request Logging and Response Tracking Middleware
"""
+ @staticmethod
+ def _should_log_response(path: str, status_code: int, duration_ms: float) -> bool:
+ if path == "/health" and not bool(
+ get_config("log.log_health_requests", False)
+ ):
+ return False
+
+ if bool(get_config("log.log_all_requests", False)):
+ return True
+
+ try:
+ slow_ms = float(get_config("log.request_slow_ms", 3000))
+ except (TypeError, ValueError):
+ slow_ms = 3000.0
+
+ return status_code >= 400 or duration_ms >= slow_ms
+
async def dispatch(self, request: Request, call_next):
# 生成请求 ID
trace_id = str(uuid.uuid4())
@@ -40,39 +58,36 @@ async def dispatch(self, request: Request, call_next):
):
return await call_next(request)
- # 记录请求信息
- logger.info(
- f"Request: {request.method} {request.url.path}",
- extra={
- "traceID": trace_id,
- "method": request.method,
- "path": request.url.path,
- },
- )
-
try:
response = await call_next(request)
# 计算耗时
duration = (time.time() - start_time) * 1000
- # 记录响应信息
- logger.info(
- f"Response: {request.method} {request.url.path} - {response.status_code} ({duration:.2f}ms)",
- extra={
- "traceID": trace_id,
- "method": request.method,
- "path": request.url.path,
- "status": response.status_code,
- "duration_ms": round(duration, 2),
- },
- )
+ if self._should_log_response(path, response.status_code, duration):
+ log_method = (
+ logger.error
+ if response.status_code >= 500
+ else logger.warning
+ if response.status_code >= 400
+ else logger.info
+ )
+ log_method(
+ f"Response: {request.method} {request.url.path} - {response.status_code} ({duration:.2f}ms)",
+ extra={
+ "traceID": trace_id,
+ "method": request.method,
+ "path": request.url.path,
+ "status": response.status_code,
+ "duration_ms": round(duration, 2),
+ },
+ )
return response
except Exception as e:
duration = (time.time() - start_time) * 1000
- logger.error(
+ logger.opt(exception=e).error(
f"Response Error: {request.method} {request.url.path} - {str(e)} ({duration:.2f}ms)",
extra={
"traceID": trace_id,
@@ -82,4 +97,4 @@ async def dispatch(self, request: Request, call_next):
"error": str(e),
},
)
- raise e
+ raise
diff --git a/app/services/grok/batch_services/usage.py b/app/services/grok/batch_services/usage.py
index fa4dd76c2..1169b09ed 100644
--- a/app/services/grok/batch_services/usage.py
+++ b/app/services/grok/batch_services/usage.py
@@ -55,7 +55,7 @@ async def get(self, token: str) -> Dict:
remaining = data.get("remainingQueries")
if remaining is not None:
data["remainingTokens"] = remaining
- logger.info(
+ logger.debug(
f"Usage sync success: remaining={remaining}, token={token[:10]}..."
)
return data
diff --git a/app/services/grok/utils/retry.py b/app/services/grok/utils/retry.py
index bcb3a01f0..d252bdf02 100644
--- a/app/services/grok/utils/retry.py
+++ b/app/services/grok/utils/retry.py
@@ -25,7 +25,7 @@ async def pick_token(
break
if not token and not tried:
- result = await token_mgr.refresh_cooling_tokens()
+ result = await token_mgr.refresh_cooling_tokens_on_demand()
if result.get("recovered", 0) > 0:
for pool_name in ModelService.pool_candidates_for_model(model_id):
token = token_mgr.get_token(pool_name, prefer_tags=prefer_tags)
diff --git a/app/services/reverse/app_chat.py b/app/services/reverse/app_chat.py
index 779650bf6..835e98ee6 100644
--- a/app/services/reverse/app_chat.py
+++ b/app/services/reverse/app_chat.py
@@ -15,6 +15,7 @@
from app.services.reverse.utils.retry import retry_on_status
CHAT_API = "https://grok.com/rest/app-chat/conversations/new"
+_LAST_PROXY_LOG_STATE: tuple[str, str] | None = None
def _normalize_chat_proxy(proxy_url: str) -> str:
@@ -30,6 +31,23 @@ def _normalize_chat_proxy(proxy_url: str) -> str:
return proxy_url
+def _log_proxy_state_once(base_proxy: str, normalized_proxy: str = "", scheme: str = ""):
+ """仅在代理状态变化时记录一次代理配置日志。"""
+ global _LAST_PROXY_LOG_STATE
+
+ state = ("enabled", normalized_proxy) if base_proxy else ("direct", "")
+ if state == _LAST_PROXY_LOG_STATE:
+ return
+
+ _LAST_PROXY_LOG_STATE = state
+ if base_proxy:
+ logger.info(
+ f"AppChatReverse proxy enabled: scheme={scheme}, target={normalized_proxy}"
+ )
+ else:
+ logger.info("AppChatReverse proxy is empty, requests will use direct network")
+
+
class AppChatReverse:
"""/rest/app-chat/conversations/new reverse interface."""
@@ -148,11 +166,9 @@ async def request(
proxy = normalized_proxy
else:
proxies = {"http": normalized_proxy, "https": normalized_proxy}
- logger.info(
- f"AppChatReverse proxy enabled: scheme={scheme}, target={normalized_proxy}"
- )
+ _log_proxy_state_once(base_proxy, normalized_proxy, scheme)
else:
- logger.warning("AppChatReverse proxy is empty, request will use direct network")
+ _log_proxy_state_once("")
# Build headers
headers = build_headers(
diff --git a/app/services/token/manager.py b/app/services/token/manager.py
index eb668edb6..1906342ae 100644
--- a/app/services/token/manager.py
+++ b/app/services/token/manager.py
@@ -29,6 +29,8 @@
DEFAULT_RELOAD_INTERVAL_SEC = 30
DEFAULT_SAVE_DELAY_MS = 500
DEFAULT_USAGE_FLUSH_INTERVAL_SEC = 5
+DEFAULT_ON_DEMAND_REFRESH_MIN_INTERVAL_SEC = 300
+DEFAULT_ON_DEMAND_REFRESH_MAX_TOKENS = 100
SUPER_WINDOW_THRESHOLD_SECONDS = 14400
SUPER_POOL_NAME = "ssoSuper"
@@ -62,6 +64,8 @@ def __init__(self):
self._last_usage_flush_at = 0.0
self._dirty_tokens = {}
self._dirty_deletes = set()
+ self._on_demand_refresh_lock = asyncio.Lock()
+ self._last_on_demand_refresh_at = 0.0
@classmethod
async def get_instance(cls) -> "TokenManager":
@@ -123,7 +127,7 @@ async def _load(self):
self.initialized = True
self._last_reload_at = time.monotonic()
total = sum(p.count() for p in self.pools.values())
- logger.info(
+ logger.debug(
f"TokenManager initialized: {len(self.pools)} pools with {total} tokens"
)
except Exception as e:
@@ -343,12 +347,12 @@ def get_token(self, pool_name: str = "ssoBasic", exclude: set = None, prefer_tag
"""
pool = self.pools.get(pool_name)
if not pool:
- logger.warning(f"Pool '{pool_name}' not found")
+ logger.debug(f"Pool '{pool_name}' not found")
return None
token_info = pool.select(exclude=exclude, prefer_tags=prefer_tags)
if not token_info:
- logger.warning(f"No available token in pool '{pool_name}'")
+ logger.debug(f"No available token in pool '{pool_name}'")
return None
token = token_info.token
@@ -368,12 +372,12 @@ def get_token_info(self, pool_name: str = "ssoBasic", prefer_tags: Optional[Set[
"""
pool = self.pools.get(pool_name)
if not pool:
- logger.warning(f"Pool '{pool_name}' not found")
+ logger.debug(f"Pool '{pool_name}' not found")
return None
token_info = pool.select(prefer_tags=prefer_tags)
if not token_info:
- logger.warning(f"No available token in pool '{pool_name}'")
+ logger.debug(f"No available token in pool '{pool_name}'")
return None
return token_info
@@ -561,7 +565,7 @@ async def sync_usage(
)
consumed = max(0, old_quota - new_quota)
- logger.info(
+ logger.debug(
f"Token {raw_token[:10]}...: synced quota "
f"{old_quota} -> {new_quota} (consumed: {consumed}, use_count: {target_token.use_count})"
)
@@ -843,7 +847,12 @@ def get_pool_tokens(self, pool_name: str = "ssoBasic") -> List[TokenInfo]:
return []
return pool.list()
- async def refresh_cooling_tokens(self) -> Dict[str, int]:
+ async def refresh_cooling_tokens(
+ self,
+ *,
+ trigger: str = "scheduler",
+ max_tokens: Optional[int] = None,
+ ) -> Dict[str, int]:
"""
批量刷新 cooling 状态的 Token 配额
@@ -867,11 +876,30 @@ async def refresh_cooling_tokens(self) -> Dict[str, int]:
if token.need_refresh(interval_hours):
to_refresh.append((pool.name, token))
+ to_refresh.sort(
+ key=lambda item: (
+ item[1].last_sync_at or 0,
+ item[1].last_used_at or 0,
+ item[1].created_at or 0,
+ )
+ )
+ candidate_count = len(to_refresh)
+ if max_tokens is not None and max_tokens > 0:
+ to_refresh = to_refresh[:max_tokens]
+
if not to_refresh:
- logger.debug("Refresh check: no tokens need refresh")
+ logger.debug(f"Refresh check: trigger={trigger}, no tokens need refresh")
return {"checked": 0, "refreshed": 0, "recovered": 0, "expired": 0}
- logger.info(f"Refresh check: found {len(to_refresh)} cooling tokens to refresh")
+ logger.info(
+ f"Refresh check: trigger={trigger}, candidates={candidate_count}, "
+ f"selected={len(to_refresh)}"
+ + (
+ f", limit={max_tokens}"
+ if max_tokens is not None and max_tokens > 0
+ else ""
+ )
+ )
# 批量并发刷新
semaphore = asyncio.Semaphore(DEFAULT_REFRESH_CONCURRENCY)
@@ -962,7 +990,7 @@ async def _refresh_one(item: tuple[str, TokenInfo]) -> dict:
reason=f"windowSizeSeconds={window_size}",
)
- logger.info(
+ logger.debug(
f"Token {token_info.token[:10]}...: refreshed "
f"{old_quota} -> {new_quota}, status: {old_status} -> {token_info.status}"
)
@@ -1005,7 +1033,7 @@ async def _refresh_one(item: tuple[str, TokenInfo]) -> dict:
await self._save(force=True)
logger.info(
- f"Refresh completed: "
+ f"Refresh completed: trigger={trigger}, candidates={candidate_count}, "
f"checked={len(to_refresh)}, refreshed={refreshed}, "
f"recovered={recovered}, expired={expired}"
)
@@ -1017,6 +1045,69 @@ async def _refresh_one(item: tuple[str, TokenInfo]) -> dict:
"expired": expired,
}
+ async def refresh_cooling_tokens_on_demand(self) -> Dict[str, int]:
+ """请求链路触发的按需刷新,带限流与并发保护。"""
+ enabled = bool(get_config("token.on_demand_refresh_enabled", True))
+ if not enabled:
+ logger.debug("On-demand refresh skipped: disabled")
+ return {"checked": 0, "refreshed": 0, "recovered": 0, "expired": 0}
+
+ try:
+ min_interval_sec = float(
+ get_config(
+ "token.on_demand_refresh_min_interval_sec",
+ DEFAULT_ON_DEMAND_REFRESH_MIN_INTERVAL_SEC,
+ )
+ )
+ except (TypeError, ValueError):
+ min_interval_sec = float(DEFAULT_ON_DEMAND_REFRESH_MIN_INTERVAL_SEC)
+
+ try:
+ max_tokens = int(
+ get_config(
+ "token.on_demand_refresh_max_tokens",
+ DEFAULT_ON_DEMAND_REFRESH_MAX_TOKENS,
+ )
+ )
+ except (TypeError, ValueError):
+ max_tokens = DEFAULT_ON_DEMAND_REFRESH_MAX_TOKENS
+
+ if self._on_demand_refresh_lock.locked():
+ logger.debug("On-demand refresh skipped: another refresh is already running")
+ return {"checked": 0, "refreshed": 0, "recovered": 0, "expired": 0}
+
+ now = time.monotonic()
+ if (
+ min_interval_sec > 0
+ and self._last_on_demand_refresh_at > 0
+ and (now - self._last_on_demand_refresh_at) < min_interval_sec
+ ):
+ logger.debug(
+ "On-demand refresh skipped: last refresh {:.2f}s ago",
+ now - self._last_on_demand_refresh_at,
+ )
+ return {"checked": 0, "refreshed": 0, "recovered": 0, "expired": 0}
+
+ async with self._on_demand_refresh_lock:
+ now = time.monotonic()
+ if (
+ min_interval_sec > 0
+ and self._last_on_demand_refresh_at > 0
+ and (now - self._last_on_demand_refresh_at) < min_interval_sec
+ ):
+ logger.debug(
+ "On-demand refresh skipped after lock: last refresh {:.2f}s ago",
+ now - self._last_on_demand_refresh_at,
+ )
+ return {"checked": 0, "refreshed": 0, "recovered": 0, "expired": 0}
+
+ result = await self.refresh_cooling_tokens(
+ trigger="on_demand",
+ max_tokens=max_tokens,
+ )
+ self._last_on_demand_refresh_at = time.monotonic()
+ return result
+
# 便捷函数
async def get_token_manager() -> TokenManager:
diff --git a/config.defaults.toml b/config.defaults.toml
index c9e318593..c035c5622 100644
--- a/config.defaults.toml
+++ b/config.defaults.toml
@@ -90,6 +90,23 @@ save_delay_ms = 500
usage_flush_interval_sec = 5
# 多 worker 状态同步间隔(秒)
reload_interval_sec = 30
+# 是否启用请求侧按需刷新
+on_demand_refresh_enabled = true
+# 请求侧按需刷新最小间隔(秒)
+on_demand_refresh_min_interval_sec = 300
+# 请求侧按需刷新最大检查 Token 数量
+on_demand_refresh_max_tokens = 100
+
+# ==================== 日志配置 ====================
+[log]
+# 单个日志文件大小上限(MB),<=0 表示不按大小轮转
+max_file_size_mb = 100
+# 最多保留日志文件数量,<=0 表示不限制
+max_files = 7
+# 是否记录所有请求(关闭时仅记录慢请求与错误请求)
+log_all_requests = false
+# 慢请求阈值(毫秒)
+request_slow_ms = 3000
# ==================== 缓存管理 ====================
[cache]
diff --git a/main.py b/main.py
index f62c12e6a..ecda04d89 100644
--- a/main.py
+++ b/main.py
@@ -29,7 +29,7 @@
from app.core.auth import verify_api_key # noqa: E402
from app.core.config import get_config # noqa: E402
-from app.core.logger import logger, setup_logging # noqa: E402
+from app.core.logger import logger, reload_logging_from_config, setup_logging # noqa: E402
from app.core.exceptions import register_exception_handlers # noqa: E402
from app.core.response_middleware import ResponseLoggerMiddleware # noqa: E402
from app.api.v1.chat import router as chat_router # noqa: E402
@@ -39,11 +39,11 @@
from app.api.v1.models import router as models_router # noqa: E402
from app.api.v1.response import router as responses_router # noqa: E402
from app.services.token import get_scheduler # noqa: E402
-from app.api.v1.admin import router as admin_router
-from app.api.v1.function import router as function_router
-from app.api.pages import router as pages_router
-from fastapi.responses import RedirectResponse
-from fastapi.staticfiles import StaticFiles
+from app.api.v1.admin import router as admin_router # noqa: E402
+from app.api.v1.function import router as function_router # noqa: E402
+from app.api.pages import router as pages_router # noqa: E402
+from fastapi.responses import RedirectResponse # noqa: E402
+from fastapi.staticfiles import StaticFiles # noqa: E402
# 初始化日志
setup_logging(
@@ -62,6 +62,10 @@ async def lifespan(app: FastAPI):
# 2. 加载配置
await config.load()
+ reload_logging_from_config(
+ default_level=os.getenv("LOG_LEVEL", "INFO"),
+ json_console=False,
+ )
# 3. 启动服务显示
logger.info("Starting Grok2API...")