From 012bd516b2b05bcbb340df513dc011a843d48b79 Mon Sep 17 00:00:00 2001 From: Dylan Tang Date: Thu, 2 Apr 2026 19:47:18 -0500 Subject: [PATCH 1/2] Use az commands to count the current containers rather than manually maintaining atomic counter --- .../src/run_task/pool_manager.rs | 93 +++++-------------- 1 file changed, 22 insertions(+), 71 deletions(-) diff --git a/DoWhiz_service/run_task_module/src/run_task/pool_manager.rs b/DoWhiz_service/run_task_module/src/run_task/pool_manager.rs index 938fdf18..c2ec9e85 100644 --- a/DoWhiz_service/run_task_module/src/run_task/pool_manager.rs +++ b/DoWhiz_service/run_task_module/src/run_task/pool_manager.rs @@ -11,8 +11,7 @@ //! - Scheduler polls completion queue, then calls replenish() use std::process::Command; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex, OnceLock}; +use std::sync::{Mutex, OnceLock}; use uuid::Uuid; const DEFAULT_POOL_SIZE: usize = 5; @@ -50,7 +49,6 @@ pub struct PoolConfig { /// Manages a pool of warm ACI containers. pub struct PoolManager { config: PoolConfig, - active_count: Arc, target_size: usize, /// Tokio runtime handle captured during initialization for use in sync contexts runtime_handle: OnceLock, @@ -63,7 +61,6 @@ impl PoolManager { pub fn new(config: PoolConfig, target_size: Option) -> Self { Self { config, - active_count: Arc::new(AtomicUsize::new(0)), target_size: target_size.unwrap_or(DEFAULT_POOL_SIZE), runtime_handle: OnceLock::new(), replenish_lock: Mutex::new(()), @@ -82,8 +79,6 @@ impl PoolManager { // Count existing warm containers let existing_count = count_existing_containers(&self.config.resource_group)?; - self.active_count.store(existing_count, Ordering::SeqCst); - let containers_needed = self.target_size.saturating_sub(existing_count); eprintln!( @@ -104,10 +99,11 @@ impl PoolManager { })); } + let mut provisioned = 0; for handle in handles { match handle.await { Ok(Ok(name)) => { - self.active_count.fetch_add(1, Ordering::SeqCst); + provisioned += 1; eprintln!("[pool_manager] Provisioned: {}", name); } Ok(Err(e)) => eprintln!("[pool_manager] Provision failed: {}", e), @@ -115,26 +111,17 @@ impl PoolManager { } } - let final_count = self.active_count.load(Ordering::SeqCst); eprintln!( "[pool_manager] Pool ready with {} containers (target: {})", - final_count, self.target_size + existing_count + provisioned, self.target_size ); Ok(()) } /// Called after a task completes to replenish the pool. - /// Decrements active count and spawns a background provision task. + /// Queries Azure for actual container count to avoid sync issues. pub fn replenish(&self) { - let previous = self.active_count.fetch_sub(1, Ordering::SeqCst); - eprintln!( - "[pool_manager] Container finished, active: {} -> {}", - previous, - previous - 1 - ); - - // Lock to serialize check-and-reserve, preventing concurrent replenish - // calls from both seeing count < target and over-provisioning + // Lock to serialize replenish calls let _guard = match self.replenish_lock.lock() { Ok(guard) => guard, Err(e) => { @@ -143,26 +130,34 @@ impl PoolManager { } }; - let current = self.active_count.load(Ordering::SeqCst); + // Query Azure for actual container count + let current = match count_existing_containers(&self.config.resource_group) { + Ok(count) => count, + Err(e) => { + eprintln!("[pool_manager] Failed to count containers: {}", e); + return; + } + }; + + eprintln!( + "[pool_manager] Replenish check: {} containers exist, target is {}", + current, self.target_size + ); + if current >= self.target_size { eprintln!("[pool_manager] Pool at target size, skipping replenish"); return; } - // Reserve slot before spawning - self.active_count.fetch_add(1, Ordering::SeqCst); - let handle = match self.runtime_handle.get() { Some(h) => h, None => { eprintln!("[pool_manager] No runtime handle available, skipping replenish"); - self.active_count.fetch_sub(1, Ordering::SeqCst); return; } }; let config = self.config.clone(); - let active_count = Arc::clone(&self.active_count); drop(_guard); // Release lock before spawning async work @@ -173,17 +168,15 @@ impl PoolManager { eprintln!("[pool_manager] Replenished with: {}", name); } Err(e) => { - // Rollback reservation on failure - active_count.fetch_sub(1, Ordering::SeqCst); eprintln!("[pool_manager] Replenish failed: {}", e); } } }); } - /// Get current number of active containers. + /// Get current number of active containers by querying Azure. pub fn active_count(&self) -> usize { - self.active_count.load(Ordering::SeqCst) + count_existing_containers(&self.config.resource_group).unwrap_or(0) } /// Get target pool size. @@ -522,7 +515,6 @@ mod tests { let manager = PoolManager::new(config, Some(5)); assert_eq!(manager.target_size(), 5); - assert_eq!(manager.active_count(), 0); assert_eq!(manager.task_queue(), "test-tasks"); assert_eq!(manager.completion_queue(), "test-completions"); } @@ -660,45 +652,4 @@ mod tests { assert_eq!(CONTAINER_PREFIX, "dwz-warm-"); } - #[test] - fn test_pool_manager_active_count_starts_at_zero() { - let config = test_config(); - let manager = PoolManager::new(config, Some(5)); - - // Before initialize, active_count should be 0 - assert_eq!(manager.active_count(), 0); - } - - #[test] - fn test_pool_manager_replenish_decrements_count() { - let config = test_config(); - let manager = PoolManager::new(config, Some(5)); - - // Manually set active count to simulate initialized state - manager.active_count.store(5, Ordering::SeqCst); - assert_eq!(manager.active_count(), 5); - - // replenish() decrements the count (note: won't spawn without runtime handle) - manager.replenish(); - assert_eq!(manager.active_count(), 4); - - manager.replenish(); - assert_eq!(manager.active_count(), 3); - } - - #[test] - fn test_pool_manager_replenish_skips_when_at_target() { - let config = test_config(); - let manager = PoolManager::new(config, Some(3)); - - // Set active count above target - manager.active_count.store(5, Ordering::SeqCst); - - // replenish decrements but logs "skipping" since still >= target - manager.replenish(); // 5 -> 4, still >= 3 - assert_eq!(manager.active_count(), 4); - - manager.replenish(); // 4 -> 3, still >= 3 - assert_eq!(manager.active_count(), 3); - } } From eae58fb0538fb02c91766b1a2aa153c3c5495bd8 Mon Sep 17 00:00:00 2001 From: Logan Date: Thu, 2 Apr 2026 19:37:30 -0700 Subject: [PATCH 2/2] Repair ambiguous weekday routine cron handling --- .agents/skills/scheduler_maintain/SKILL.md | 3 + DoWhiz_service/scheduler_module/README.md | 2 + .../scheduler_module/src/scheduler/actions.rs | 29 +++-- .../scheduler_module/src/scheduler/mod.rs | 58 ++++++++- .../src/scheduler/schedule.rs | 49 ++++++++ .../src/scheduler/store/mongo.rs | 52 +++++--- .../scheduler_module/src/scheduler/tests.rs | 113 +++++++++++++++++- .../skills/scheduler_maintain/SKILL.md | 3 + 8 files changed, 284 insertions(+), 25 deletions(-) diff --git a/.agents/skills/scheduler_maintain/SKILL.md b/.agents/skills/scheduler_maintain/SKILL.md index 5d5af346..c74b61d7 100644 --- a/.agents/skills/scheduler_maintain/SKILL.md +++ b/.agents/skills/scheduler_maintain/SKILL.md @@ -45,6 +45,9 @@ SCHEDULER_ACTIONS_JSON_END ## Rules - Use RFC3339 UTC timestamps. - Cron uses 6 fields: `sec min hour day month weekday`. +- Prefer named weekdays like `MON-FRI` for recurring weekday schedules. Do not use numeric + weekday ranges for Monday-Friday here; in this scheduler parser, `1-5` is ambiguous and can + behave like Sunday-Thursday. - Do not include workspace paths; `create_run_task` always targets the current workspace. - Output only JSON inside blocks; no commentary inside blocks. - If no changes are requested, omit the relevant block. diff --git a/DoWhiz_service/scheduler_module/README.md b/DoWhiz_service/scheduler_module/README.md index d8561129..d3e953cb 100644 --- a/DoWhiz_service/scheduler_module/README.md +++ b/DoWhiz_service/scheduler_module/README.md @@ -19,6 +19,8 @@ Responsibilities: Schedules: - `Cron` (6 fields: `sec min hour day month weekday`, UTC) + - Prefer named weekdays like `MON-FRI` for weekday schedules. Numeric weekday ranges are + parser-ambiguous here; for example, `1-5` can behave like Sunday-Thursday. - `OneShot` (`run_at` timestamp) ## Channels diff --git a/DoWhiz_service/scheduler_module/src/scheduler/actions.rs b/DoWhiz_service/scheduler_module/src/scheduler/actions.rs index 138607b2..db7f0e8e 100644 --- a/DoWhiz_service/scheduler_module/src/scheduler/actions.rs +++ b/DoWhiz_service/scheduler_module/src/scheduler/actions.rs @@ -16,8 +16,11 @@ use crate::thread_state::{current_thread_epoch, default_thread_state_path}; use super::core::Scheduler; use super::executor::TaskExecutor; +use super::load_run_task_request_context; use super::reply::load_reply_context; -use super::schedule::{next_run_after, validate_cron_expression}; +use super::schedule::{ + next_run_after, normalize_weekday_cron_expression, validate_cron_expression, +}; use super::store::SchedulerStore; use super::types::{RunTaskTask, Schedule, ScheduledTask, SchedulerError, SendReplyTask, TaskKind}; use super::utils::parse_datetime; @@ -1247,6 +1250,7 @@ pub(crate) fn apply_scheduler_actions( let mut rescheduled = 0usize; let mut created = 0usize; let mut skipped = 0usize; + let request_context = load_run_task_request_context(task); for action in actions { match action { @@ -1279,7 +1283,11 @@ pub(crate) fn apply_scheduler_actions( continue; } }; - match resolve_schedule_request(schedule, now) { + match resolve_schedule_request_with_context( + schedule, + now, + request_context.as_deref(), + ) { Ok(new_schedule) => { target.schedule = new_schedule; target.enabled = true; @@ -1301,7 +1309,11 @@ pub(crate) fn apply_scheduler_actions( codex_disabled, reply_to, } => { - let schedule = match resolve_schedule_request(schedule, now) { + let schedule = match resolve_schedule_request_with_context( + schedule, + now, + request_context.as_deref(), + ) { Ok(schedule) => schedule, Err(err) => { warn!( @@ -1369,16 +1381,19 @@ fn parse_action_task_ids(task_ids: &[String]) -> (HashSet, Vec) { (ids, invalid) } -pub(crate) fn resolve_schedule_request( +pub(crate) fn resolve_schedule_request_with_context( schedule: &run_task_module::ScheduleRequest, now: DateTime, + request_context: Option<&str>, ) -> Result { match schedule { run_task_module::ScheduleRequest::Cron { expression } => { - validate_cron_expression(expression)?; - let next_run = next_run_after(expression, now)?; + let normalized_expression = + normalize_weekday_cron_expression(expression, request_context); + validate_cron_expression(&normalized_expression)?; + let next_run = next_run_after(&normalized_expression, now)?; Ok(Schedule::Cron { - expression: expression.clone(), + expression: normalized_expression, next_run, }) } diff --git a/DoWhiz_service/scheduler_module/src/scheduler/mod.rs b/DoWhiz_service/scheduler_module/src/scheduler/mod.rs index 9b999576..e2594f19 100644 --- a/DoWhiz_service/scheduler_module/src/scheduler/mod.rs +++ b/DoWhiz_service/scheduler_module/src/scheduler/mod.rs @@ -19,9 +19,10 @@ pub use types::{ pub use utils::load_google_access_token_from_service_env; use chrono::{DateTime, Duration as ChronoDuration, Utc}; +use std::fs; use std::path::Path; -use self::schedule::next_run_after; +use self::schedule::{next_run_after, normalize_weekday_cron_expression}; const ROUTINE_ONE_SHOT_DELAY_THRESHOLD_MINUTES: i64 = 5; @@ -127,5 +128,60 @@ pub fn prepare_task_for_resume( Ok(resumed) } +pub(crate) fn load_run_task_request_context(task: &RunTaskTask) -> Option { + let input_email_dir = if task.input_email_dir.is_absolute() { + task.input_email_dir.clone() + } else { + task.workspace_dir.join(&task.input_email_dir) + }; + + let thread_request_path = input_email_dir.join("thread_request.md"); + if let Ok(content) = fs::read_to_string(thread_request_path) { + let trimmed = content.trim(); + if !trimmed.is_empty() { + return Some(trimmed.to_string()); + } + } + + None +} + +/// Repair legacy weekday cron expressions for stored run_task schedules. +/// +/// This is a compatibility shim for older model-generated schedules that encoded a Monday-Friday +/// request with numeric weekday fields like `1-5`, which the scheduler's cron parser can +/// interpret as Sunday-Thursday. We only rewrite the cron when the original request context +/// clearly asked for weekdays, so legitimate Sunday-Thursday routines remain untouched. +pub(crate) fn maybe_repair_legacy_weekday_cron_task( + task: &mut ScheduledTask, + now: DateTime, +) -> Result { + let request_context = match &task.kind { + TaskKind::RunTask(run_task) => load_run_task_request_context(run_task), + _ => return Ok(false), + }; + + let Schedule::Cron { + expression, + next_run, + } = &mut task.schedule + else { + return Ok(false); + }; + + let normalized = normalize_weekday_cron_expression(expression, request_context.as_deref()); + if normalized == *expression { + return Ok(false); + } + + let reference_time = task + .last_run + .map(|value| if value > now { value } else { now }) + .unwrap_or(now); + *expression = normalized; + *next_run = next_run_after(expression, reference_time)?; + Ok(true) +} + #[cfg(test)] mod tests; diff --git a/DoWhiz_service/scheduler_module/src/scheduler/schedule.rs b/DoWhiz_service/scheduler_module/src/scheduler/schedule.rs index 9e5c7a2f..5f06f32a 100644 --- a/DoWhiz_service/scheduler_module/src/scheduler/schedule.rs +++ b/DoWhiz_service/scheduler_module/src/scheduler/schedule.rs @@ -4,6 +4,17 @@ use std::str::FromStr; use super::types::SchedulerError; +const WEEKDAY_REQUEST_MARKERS: &[&str] = &[ + "weekday", + "weekdays", + "business day", + "business days", + "monday through friday", + "monday to friday", + "mon-fri", + "mon thru fri", +]; + pub(crate) fn validate_cron_expression(expression: &str) -> Result<(), SchedulerError> { let fields = expression.split_whitespace().count(); if fields != 6 { @@ -25,3 +36,41 @@ pub(crate) fn next_run_after( } Err(SchedulerError::NoNextRun) } + +/// Normalize legacy ambiguous weekday cron expressions for explicit weekday requests. +/// +/// The cron parser used by the scheduler does not follow the common Unix-cron numeric weekday +/// convention. In practice, numeric weekday ranges like `1-5` are ambiguous for model output and +/// have historically produced Sunday-Thursday runs when the user asked for Monday-Friday +/// weekdays. When the request text clearly asks for a Monday-Friday cadence, rewrite legacy +/// numeric weekday fields to the unambiguous `MON-FRI`. +pub(crate) fn normalize_weekday_cron_expression( + expression: &str, + request_context: Option<&str>, +) -> String { + let Some(request_context) = request_context else { + return expression.to_string(); + }; + if !request_implies_monday_through_friday(request_context) { + return expression.to_string(); + } + + let mut fields: Vec<&str> = expression.split_whitespace().collect(); + if fields.len() != 6 || !is_legacy_weekday_field(fields[5]) { + return expression.to_string(); + } + + fields[5] = "MON-FRI"; + fields.join(" ") +} + +fn request_implies_monday_through_friday(text: &str) -> bool { + let normalized = text.to_ascii_lowercase(); + WEEKDAY_REQUEST_MARKERS + .iter() + .any(|marker| normalized.contains(marker)) +} + +fn is_legacy_weekday_field(field: &str) -> bool { + matches!(field.trim(), "0-4" | "0,1,2,3,4" | "1-5" | "1,2,3,4,5") +} diff --git a/DoWhiz_service/scheduler_module/src/scheduler/store/mongo.rs b/DoWhiz_service/scheduler_module/src/scheduler/store/mongo.rs index 3c04d365..cec97152 100644 --- a/DoWhiz_service/scheduler_module/src/scheduler/store/mongo.rs +++ b/DoWhiz_service/scheduler_module/src/scheduler/store/mongo.rs @@ -11,9 +11,9 @@ use uuid::Uuid; use crate::mongo_store::{create_client_from_env, database_from_env, ensure_index_compatible}; -use super::super::is_user_visible_routine_task; use super::super::types::{Schedule, ScheduledTask, SchedulerError}; use super::super::utils::{task_kind_channel, task_kind_label}; +use super::super::{is_user_visible_routine_task, maybe_repair_legacy_weekday_cron_task}; use super::{RoutineSummary, TaskDebugArchiveRecord, TaskStatusSummary}; static EXECUTION_SEQ: AtomicI64 = AtomicI64::new(1); @@ -112,6 +112,7 @@ impl MongoSchedulerStore { .map_err(mongo_err)?; let mut seen_task_ids = HashSet::new(); let mut tasks = Vec::new(); + let now = Utc::now(); for row in cursor { let document = row.map_err(mongo_err)?; if let Ok(task_id) = document.get_str("task_id") { @@ -119,7 +120,10 @@ impl MongoSchedulerStore { continue; } } - let task = deserialize_task_document(&document)?; + let mut task = deserialize_task_document(&document)?; + if maybe_repair_legacy_weekday_cron_task(&mut task, now)? { + self.update_task(&task)?; + } tasks.push(task); } tasks.sort_by_key(|task| task.created_at); @@ -134,7 +138,15 @@ impl MongoSchedulerStore { .tasks .find_one(self.task_filter(task_id), None) .map_err(mongo_err)?; - document.as_ref().map(deserialize_task_document).transpose() + let Some(document) = document else { + return Ok(None); + }; + + let mut task = deserialize_task_document(&document)?; + if maybe_repair_legacy_weekday_cron_task(&mut task, Utc::now())? { + self.update_task(&task)?; + } + Ok(Some(task)) } pub(crate) fn insert_task(&self, task: &ScheduledTask) -> Result<(), SchedulerError> { @@ -391,6 +403,7 @@ impl MongoSchedulerStore { .map_err(mongo_err)?; let mut summaries = Vec::new(); let mut seen_task_ids = HashSet::new(); + let now = Utc::now(); for row in cursor { let task_doc = row.map_err(mongo_err)?; let task_id = task_doc @@ -399,23 +412,31 @@ impl MongoSchedulerStore { if !seen_task_ids.insert(task_id.to_string()) { continue; } + let mut task = deserialize_task_document(&task_doc)?; + if maybe_repair_legacy_weekday_cron_task(&mut task, now)? { + self.update_task(&task)?; + } let request_summary = derive_request_summary(&task_doc); - let schedule = task_doc.get_document("schedule").ok(); let execution = self.latest_execution_for_task(task_id)?; + let (schedule_type, next_run, run_at) = match &task.schedule { + Schedule::Cron { next_run, .. } => { + ("cron".to_string(), Some(next_run.to_rfc3339()), None) + } + Schedule::OneShot { run_at } => { + ("one_shot".to_string(), None, Some(run_at.to_rfc3339())) + } + }; summaries.push(TaskStatusSummary { id: task_id.to_string(), kind: task_doc.get_str("kind").unwrap_or("unknown").to_string(), channel: task_doc.get_str("channel").unwrap_or("email").to_string(), request_summary, - enabled: task_doc.get_bool("enabled").unwrap_or(false), - created_at: datetime_field_to_rfc3339(&task_doc, "created_at").unwrap_or_default(), - last_run: datetime_field_to_rfc3339(&task_doc, "last_run"), - schedule_type: schedule - .and_then(|doc| doc.get_str("type").ok()) - .unwrap_or("one_shot") - .to_string(), - next_run: schedule.and_then(|doc| datetime_field_to_rfc3339(doc, "next_run")), - run_at: schedule.and_then(|doc| datetime_field_to_rfc3339(doc, "run_at")), + enabled: task.enabled, + created_at: task.created_at.to_rfc3339(), + last_run: task.last_run.map(|value| value.to_rfc3339()), + schedule_type, + next_run, + run_at, execution_status: execution .as_ref() .and_then(|doc| doc.get_str("status").ok()) @@ -456,7 +477,10 @@ impl MongoSchedulerStore { continue; } - let task = deserialize_task_document(&task_doc)?; + let mut task = deserialize_task_document(&task_doc)?; + if maybe_repair_legacy_weekday_cron_task(&mut task, now)? { + self.update_task(&task)?; + } if !is_user_visible_routine_task(&task, now) { continue; } diff --git a/DoWhiz_service/scheduler_module/src/scheduler/tests.rs b/DoWhiz_service/scheduler_module/src/scheduler/tests.rs index 0d7962ad..5b9d9ca4 100644 --- a/DoWhiz_service/scheduler_module/src/scheduler/tests.rs +++ b/DoWhiz_service/scheduler_module/src/scheduler/tests.rs @@ -1,4 +1,4 @@ -use chrono::Utc; +use chrono::{DateTime, Utc}; use std::fs; use std::path::{Path, PathBuf}; use std::time::Duration; @@ -8,8 +8,10 @@ use uuid::Uuid; use crate::channel::{Channel, ChannelMetadata}; use super::{ - actions::{apply_scheduler_actions, schedule_send_email}, - is_user_visible_routine_task, prepare_task_for_resume, + actions::{ + apply_scheduler_actions, resolve_schedule_request_with_context, schedule_send_email, + }, + is_user_visible_routine_task, maybe_repair_legacy_weekday_cron_task, prepare_task_for_resume, snapshot::build_scheduler_snapshot, RunTaskTask, Schedule, ScheduledTask, Scheduler, SchedulerError, TaskExecution, TaskExecutor, TaskKind, @@ -85,6 +87,18 @@ fn force_one_shot_due(scheduler: &mut Scheduler, task_id: Uu .expect("persist forced one-shot schedule"); } +fn parse_utc(value: &str) -> DateTime { + DateTime::parse_from_rfc3339(value) + .expect("valid RFC3339 timestamp") + .with_timezone(&Utc) +} + +fn write_thread_request(workspace: &Path, content: &str) { + let incoming_email = workspace.join("incoming_email"); + fs::create_dir_all(&incoming_email).expect("incoming_email dir"); + fs::write(incoming_email.join("thread_request.md"), content).expect("thread_request"); +} + #[test] fn defer_one_shot_task_by_id_pushes_run_at_forward_and_persists() { let temp = TempDir::new().expect("tempdir"); @@ -303,6 +317,99 @@ fn apply_scheduler_actions_creates_run_task() { } } +#[test] +fn resolve_schedule_request_with_context_normalizes_legacy_weekday_cron() { + let now = parse_utc("2026-04-02T20:00:00Z"); + let schedule = run_task_module::ScheduleRequest::Cron { + expression: "0 0 16 * * 1-5".to_string(), + }; + + let resolved = resolve_schedule_request_with_context( + &schedule, + now, + Some("Please send this every weekday at 9:00 AM America/Los_Angeles."), + ) + .expect("resolve schedule"); + + match resolved { + Schedule::Cron { + expression, + next_run, + } => { + assert_eq!(expression, "0 0 16 * * MON-FRI"); + assert_eq!(next_run, parse_utc("2026-04-03T16:00:00Z")); + } + _ => panic!("expected cron schedule"), + } +} + +#[test] +fn resolve_schedule_request_with_context_keeps_explicit_sunday_thursday_cron() { + let now = parse_utc("2026-04-02T20:00:00Z"); + let schedule = run_task_module::ScheduleRequest::Cron { + expression: "0 0 16 * * 1-5".to_string(), + }; + + let resolved = resolve_schedule_request_with_context( + &schedule, + now, + Some("Please send this every Sunday through Thursday at 9:00 AM."), + ) + .expect("resolve schedule"); + + match resolved { + Schedule::Cron { + expression, + next_run, + } => { + assert_eq!(expression, "0 0 16 * * 1-5"); + assert_eq!(next_run, parse_utc("2026-04-05T16:00:00Z")); + } + _ => panic!("expected cron schedule"), + } +} + +#[test] +fn maybe_repair_legacy_weekday_cron_task_uses_thread_request_context() { + let temp = TempDir::new().expect("tempdir"); + let workspace = temp.path().join("workspace"); + let mail_root = temp.path().join("mail"); + fs::create_dir_all(&workspace).expect("workspace"); + fs::create_dir_all(&mail_root).expect("mail root"); + write_thread_request( + &workspace, + "Track GLD for me every weekday at 9:00 AM America/Los_Angeles.", + ); + + let now = parse_utc("2026-04-02T20:00:00Z"); + let run_task = base_run_task(&workspace, &mail_root); + let mut task = ScheduledTask { + id: Uuid::new_v4(), + kind: TaskKind::RunTask(run_task), + schedule: Schedule::Cron { + expression: "0 0 16 * * 1-5".to_string(), + next_run: parse_utc("2026-04-05T16:00:00Z"), + }, + enabled: true, + created_at: now, + last_run: None, + }; + + let repaired = maybe_repair_legacy_weekday_cron_task(&mut task, now).expect("repair task"); + assert!(repaired, "weekday request should repair the legacy cron"); + + match task.schedule { + Schedule::Cron { + expression, + next_run, + } => { + assert_eq!(expression, "0 0 16 * * MON-FRI"); + assert_eq!(next_run, parse_utc("2026-04-03T16:00:00Z")); + } + _ => panic!("expected cron schedule"), + } +} + #[test] fn schedule_send_email_supports_five_and_twenty_minute_reminders() { let temp = TempDir::new().expect("tempdir"); diff --git a/DoWhiz_service/skills/scheduler_maintain/SKILL.md b/DoWhiz_service/skills/scheduler_maintain/SKILL.md index f6b12969..967f01b5 100644 --- a/DoWhiz_service/skills/scheduler_maintain/SKILL.md +++ b/DoWhiz_service/skills/scheduler_maintain/SKILL.md @@ -45,6 +45,9 @@ SCHEDULER_ACTIONS_JSON_END ## Rules - Use RFC3339 UTC timestamps. - Cron uses 6 fields: `sec min hour day month weekday`. +- Prefer named weekdays like `MON-FRI` for recurring weekday schedules. Do not use numeric + weekday ranges for Monday-Friday here; in this scheduler parser, `1-5` is ambiguous and can + behave like Sunday-Thursday. - Do not include workspace paths; `create_run_task` always targets the current workspace. - Output only JSON inside blocks; no commentary inside blocks. - Treat any enabled task shown under `due` as an existing active schedule/task, not as evidence that scheduling is missing.