Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 111 additions & 1 deletion src-tauri/src/codex_config.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
// unused imports removed
use std::io::{BufRead, BufReader};
use std::path::PathBuf;

use crate::config::{
atomic_write, delete_file, get_home_dir, sanitize_provider_name, write_json_file,
write_text_file,
};
use crate::error::AppError;
use rusqlite::Connection;
use serde_json::Value;
use std::fs;
use std::path::Path;
Expand Down Expand Up @@ -250,6 +252,115 @@ pub fn remove_codex_toml_base_url_if(toml_str: &str, predicate: impl Fn(&str) ->
doc.to_string()
}

/// 获取 Codex session_index.jsonl 路径
#[allow(dead_code)]
pub fn get_codex_session_index_path() -> PathBuf {
get_codex_config_dir().join("session_index.jsonl")
}

/// 获取 Codex state_5.sqlite 路径
pub fn get_codex_state_db_path() -> PathBuf {
get_codex_config_dir().join("state_5.sqlite")
}

fn collect_rollout_files(root: &Path, files: &mut Vec<PathBuf>) -> Result<(), AppError> {
if !root.exists() {
return Ok(());
}

let entries = std::fs::read_dir(root).map_err(|e| AppError::io(root, e))?;
for entry in entries {
let entry = entry.map_err(|e| AppError::io(root, e))?;
let path = entry.path();
if path.is_dir() {
collect_rollout_files(&path, files)?;
} else if path.extension().and_then(|ext| ext.to_str()) == Some("jsonl") {
files.push(path);
}
}

Ok(())
}

/// 修复 Codex 历史 rollout 首行里缺失的 model_provider
///
/// 只填充缺失值(空字符串或不存在),不覆盖已有值,避免污染跨 provider 的历史数据。
///
/// 注意:此函数在 Codex 运行时执行可能存在竞态条件(读-改-写整文件),
/// 建议仅在 Codex 未运行时调用,或配合文件锁使用。
pub fn sync_codex_rollout_model_provider(provider_id: &str) -> Result<usize, AppError> {
let root = get_codex_config_dir();
let mut files = Vec::new();
collect_rollout_files(&root.join("sessions"), &mut files)?;
collect_rollout_files(&root.join("archived_sessions"), &mut files)?;

let mut changed = 0usize;

for path in files {
let file = std::fs::File::open(&path).map_err(|e| AppError::io(&path, e))?;
let mut lines: Vec<String> = BufReader::new(file)
.lines()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| AppError::io(&path, e))?;

if lines.is_empty() {
continue;
}

let mut first: Value = match serde_json::from_str(&lines[0]) {
Ok(value) => value,
Err(_) => continue,
};

if first.get("type").and_then(Value::as_str) != Some("session_meta") {
continue;
}

let Some(payload) = first.get_mut("payload").and_then(Value::as_object_mut) else {
continue;
};

// 只修复缺失值(空字符串或不存在),不覆盖已有 provider
let current = payload
.get("model_provider")
.and_then(Value::as_str)
.unwrap_or("");
if !current.is_empty() {
continue; // 已有 provider,跳过
}

payload.insert(
"model_provider".to_string(),
Value::String(provider_id.to_string()),
);
lines[0] =
serde_json::to_string(&first).map_err(|e| AppError::JsonSerialize { source: e })?;
write_text_file(&path, &(lines.join("\n") + "\n"))?;
Comment on lines +336 to +338
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Avoid full-file rewrite during rollout metadata patch

This path rewrites the entire JSONL file from an in-memory snapshot after reading it, so if Codex is concurrently appending events to the same rollout file during a switch, any lines appended between the read and the atomic replace are lost. The severity depends on concurrent writes, but that scenario is realistic for active sessions and leads to irreversible history truncation. The sync should avoid replacing whole files (or use locking/active-session exclusion) when only the first line needs patching.

Useful? React with 👍 / 👎.

changed += 1;
}

Ok(changed)
}

/// 将 Codex state_5.sqlite 中的 threads.model_provider 修复为当前活动 provider
/// 只修复缺失值(NULL),不覆盖已有值,避免污染跨 provider 的历史数据
pub fn sync_codex_threads_model_provider(provider_id: &str) -> Result<usize, AppError> {
let path = get_codex_state_db_path();
if !path.exists() {
return Ok(0);
}

let conn = Connection::open(&path).map_err(|e| AppError::Database(e.to_string()))?;
// 只修复缺失值,不覆盖已有 provider 的历史记录
let changed = conn
.execute(
"UPDATE threads SET model_provider = ?1 WHERE model_provider IS NULL",
[provider_id],
)
.map_err(|e| AppError::Database(e.to_string()))?;
Ok(changed)
}

#[cfg(test)]
mod tests {
use super::*;
Expand All @@ -263,7 +374,6 @@ model = "gpt-5.1-codex"
name = "any"
wire_api = "responses"
"#;

let result = update_codex_toml_field(input, "base_url", "https://example.com/v1").unwrap();
let parsed: toml::Value = toml::from_str(&result).unwrap();

Expand Down
4 changes: 2 additions & 2 deletions src-tauri/src/commands/misc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1415,7 +1415,7 @@ mod tests {

let count = paths
.iter()
.filter(|path| **path == PathBuf::from("/same/path"))
.filter(|path| **path == std::path::Path::new("/same/path"))
.count();
assert_eq!(count, 1);
}
Expand All @@ -1427,7 +1427,7 @@ mod tests {

let count = paths
.iter()
.filter(|path| **path == PathBuf::from("/home/tester/.bun/bin"))
.filter(|path| **path == std::path::Path::new("/home/tester/.bun/bin"))
.count();
assert_eq!(count, 1);
}
Expand Down
116 changes: 65 additions & 51 deletions src-tauri/src/commands/settings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,13 +85,15 @@ mod tests {

#[test]
fn save_settings_should_preserve_existing_webdav_when_payload_omits_it() {
let mut existing = AppSettings::default();
existing.webdav_sync = Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "secret".to_string(),
..WebDavSyncSettings::default()
});
let existing = AppSettings {
webdav_sync: Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "secret".to_string(),
..WebDavSyncSettings::default()
}),
..Default::default()
};

let incoming = AppSettings::default();
let merged = merge_settings_for_save(incoming, &existing);
Expand All @@ -105,21 +107,25 @@ mod tests {

#[test]
fn save_settings_should_keep_incoming_webdav_when_present() {
let mut existing = AppSettings::default();
existing.webdav_sync = Some(WebDavSyncSettings {
base_url: "https://dav.old.example.com".to_string(),
username: "old".to_string(),
password: "old-pass".to_string(),
..WebDavSyncSettings::default()
});

let mut incoming = AppSettings::default();
incoming.webdav_sync = Some(WebDavSyncSettings {
base_url: "https://dav.new.example.com".to_string(),
username: "new".to_string(),
password: "new-pass".to_string(),
..WebDavSyncSettings::default()
});
let existing = AppSettings {
webdav_sync: Some(WebDavSyncSettings {
base_url: "https://dav.old.example.com".to_string(),
username: "old".to_string(),
password: "old-pass".to_string(),
..WebDavSyncSettings::default()
}),
..Default::default()
};

let incoming = AppSettings {
webdav_sync: Some(WebDavSyncSettings {
base_url: "https://dav.new.example.com".to_string(),
username: "new".to_string(),
password: "new-pass".to_string(),
..WebDavSyncSettings::default()
}),
..Default::default()
};

let merged = merge_settings_for_save(incoming, &existing);

Expand All @@ -135,22 +141,26 @@ mod tests {
/// must NOT overwrite the existing one.
#[test]
fn save_settings_should_preserve_password_when_incoming_has_empty_password() {
let mut existing = AppSettings::default();
existing.webdav_sync = Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "secret".to_string(),
..WebDavSyncSettings::default()
});
let existing = AppSettings {
webdav_sync: Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "secret".to_string(),
..WebDavSyncSettings::default()
}),
..Default::default()
};

// Simulate frontend sending settings with cleared password
let mut incoming = AppSettings::default();
incoming.webdav_sync = Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "".to_string(),
..WebDavSyncSettings::default()
});
let incoming = AppSettings {
webdav_sync: Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "".to_string(),
..WebDavSyncSettings::default()
}),
..Default::default()
};

let merged = merge_settings_for_save(incoming, &existing);

Expand All @@ -165,21 +175,25 @@ mod tests {
/// work without panicking and keep the empty state.
#[test]
fn save_settings_should_handle_both_empty_passwords() {
let mut existing = AppSettings::default();
existing.webdav_sync = Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "".to_string(),
..WebDavSyncSettings::default()
});

let mut incoming = AppSettings::default();
incoming.webdav_sync = Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "".to_string(),
..WebDavSyncSettings::default()
});
let existing = AppSettings {
webdav_sync: Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "".to_string(),
..WebDavSyncSettings::default()
}),
..Default::default()
};

let incoming = AppSettings {
webdav_sync: Some(WebDavSyncSettings {
base_url: "https://dav.example.com".to_string(),
username: "alice".to_string(),
password: "".to_string(),
..WebDavSyncSettings::default()
}),
..Default::default()
};

let merged = merge_settings_for_save(incoming, &existing);

Expand Down
6 changes: 4 additions & 2 deletions src-tauri/src/database/backup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -791,8 +791,10 @@ mod tests {
std::fs::create_dir_all(&test_home).expect("create test home");
std::env::set_var("CC_SWITCH_TEST_HOME", &test_home);

let mut settings = AppSettings::default();
settings.backup_interval_hours = Some(0);
let settings = AppSettings {
backup_interval_hours: Some(0),
..Default::default()
};
update_settings(settings).expect("disable auto backup");

let db = Database::memory()?;
Expand Down
6 changes: 4 additions & 2 deletions src-tauri/src/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -725,8 +725,10 @@ mod tests {

#[test]
fn provider_meta_serializes_pricing_model_source() {
let mut meta = ProviderMeta::default();
meta.pricing_model_source = Some("response".to_string());
let meta = ProviderMeta {
pricing_model_source: Some("response".to_string()),
..Default::default()
};

let value = serde_json::to_value(&meta).expect("serialize ProviderMeta");

Expand Down
8 changes: 5 additions & 3 deletions src-tauri/src/proxy/response_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -772,9 +772,11 @@ mod tests {
db.set_pricing_model_source(app_type, "response").await?;
seed_pricing(&db)?;

let mut meta = ProviderMeta::default();
meta.cost_multiplier = Some("2".to_string());
meta.pricing_model_source = Some("request".to_string());
let meta = ProviderMeta {
cost_multiplier: Some("2".to_string()),
pricing_model_source: Some("request".to_string()),
..Default::default()
};
insert_provider(&db, "provider-1", app_type, meta)?;

let state = build_state(db.clone());
Expand Down
21 changes: 19 additions & 2 deletions src-tauri/src/services/provider/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ use serde::Deserialize;
use serde_json::Value;

use crate::app_config::AppType;
use crate::codex_config::{sync_codex_rollout_model_provider, sync_codex_threads_model_provider};
use crate::error::AppError;
use crate::provider::{Provider, UsageResult};
use crate::services::mcp::McpService;
use crate::settings::CustomEndpoint;
use crate::store::AppState;

// Re-export sub-module functions for external access
pub use live::{
import_default_config, import_openclaw_providers_from_live,
Expand Down Expand Up @@ -1542,13 +1542,30 @@ impl ProviderService {
}
}

// Codex 特殊处理:切换 live 配置后,同步历史 rollout 首行中的 model_provider,
// 避免 thread/list 因历史元数据与当前 provider 不一致而过滤空历史。
// 注意:此操作只在正常切换时执行,不在热切换时执行(避免重 I/O 操作增加切换耗时)
if matches!(app_type, AppType::Codex) {
if let Err(e) = sync_codex_rollout_model_provider(id) {
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Use Codex model_provider key for metadata sync

The id passed here is the cc-switch provider record ID, but Codex session metadata is keyed by TOML model_provider values (for example custom/openai), not by internal UUIDs. In this repo, non-additive providers are created with generated UUID IDs, so using id causes both sync helpers to write a value that does not match Codex’s active provider key, which means thread/list filtering can still miss or misclassify history after switching. Please parse the target provider’s settings_config.config and pass its model_provider value to these sync functions instead of the internal provider ID.

Useful? React with 👍 / 👎.

log::warn!("同步 Codex rollout model_provider 失败: {e}");
result
.warnings
.push("codex_rollout_sync_failed".to_string());
}
if let Err(e) = sync_codex_threads_model_provider(id) {
log::warn!("同步 Codex threads.model_provider 失败: {e}");
result
.warnings
.push("codex_threads_sync_failed".to_string());
}
}

// Sync MCP
McpService::sync_all_enabled(state)?;

Ok(result)
}

/// Sync current provider to live configuration (re-export)
pub fn sync_current_to_live(state: &AppState) -> Result<(), AppError> {
sync_current_to_live(state)
}
Expand Down