diff --git a/DoWhiz_service/scheduler_module/src/gtm_agents/contracts.rs b/DoWhiz_service/scheduler_module/src/gtm_agents/contracts.rs new file mode 100644 index 00000000..68ce27fd --- /dev/null +++ b/DoWhiz_service/scheduler_module/src/gtm_agents/contracts.rs @@ -0,0 +1,772 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use uuid::Uuid; + +pub const GTM_SCHEMA_VERSION: &str = "1.0"; + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "snake_case")] +pub enum AgentId { + RachelOrchestrator, + RachelIcpScout, + RachelOutboundSdr, + RachelFeedbackPrdSynthesizer, + RachelPositioningPmm, + RachelContentStudio, + RachelOnboardingCsm, + RachelExperimentAnalyst, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum TaskPriority { + Low, + Normal, + High, + Urgent, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum TaskStatus { + Succeeded, + NeedsHuman, + Failed, + Partial, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "snake_case")] +pub enum GtmChannel { + Email, + LinkedinAds, + HubspotWorkflow, + LinkedinDm, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum SubjectType { + Objective, + Account, + Contact, + Campaign, + Feature, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PolicyPack { + pub human_approval_required: bool, + pub allowed_channels: Vec, + pub blocked_actions: Vec, + pub pii_policy: String, + pub compliance_region: Vec, +} + +impl Default for PolicyPack { + fn default() -> Self { + Self { + human_approval_required: false, + allowed_channels: vec![ + GtmChannel::Email, + GtmChannel::LinkedinAds, + GtmChannel::HubspotWorkflow, + ], + blocked_actions: Vec::new(), + pii_policy: "mask".to_string(), + compliance_region: vec!["US".to_string()], + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct AgentTaskEnvelope { + pub task_id: Uuid, + pub tenant_id: Uuid, + pub objective_id: Uuid, + pub agent_id: AgentId, + pub schema_version: String, + pub requested_at: DateTime, + pub deadline_at: Option>, + pub priority: TaskPriority, + pub policy_pack: PolicyPack, + pub input_refs: Vec, + pub trace_id: Uuid, + pub idempotency_key: String, +} + +impl AgentTaskEnvelope { + pub fn new(agent_id: AgentId) -> Self { + let now = Utc::now(); + let task_id = Uuid::new_v4(); + Self { + task_id, + tenant_id: Uuid::new_v4(), + objective_id: Uuid::new_v4(), + agent_id, + schema_version: GTM_SCHEMA_VERSION.to_string(), + requested_at: now, + deadline_at: None, + priority: TaskPriority::Normal, + policy_pack: PolicyPack::default(), + input_refs: Vec::new(), + trace_id: Uuid::new_v4(), + idempotency_key: format!("{}:{}", agent_id.as_str(), task_id), + } + } + + pub fn with_agent(&self, agent_id: AgentId) -> Self { + let task_id = Uuid::new_v4(); + Self { + task_id, + tenant_id: self.tenant_id, + objective_id: self.objective_id, + agent_id, + schema_version: self.schema_version.clone(), + requested_at: self.requested_at, + deadline_at: self.deadline_at, + priority: self.priority, + policy_pack: self.policy_pack.clone(), + input_refs: self.input_refs.clone(), + trace_id: self.trace_id, + idempotency_key: format!("{}:{}", agent_id.as_str(), task_id), + } + } +} + +impl AgentId { + pub fn as_str(self) -> &'static str { + match self { + AgentId::RachelOrchestrator => "rachel_orchestrator", + AgentId::RachelIcpScout => "rachel_icp_scout", + AgentId::RachelOutboundSdr => "rachel_outbound_sdr", + AgentId::RachelFeedbackPrdSynthesizer => "rachel_feedback_prd_synthesizer", + AgentId::RachelPositioningPmm => "rachel_positioning_pmm", + AgentId::RachelContentStudio => "rachel_content_studio", + AgentId::RachelOnboardingCsm => "rachel_onboarding_csm", + AgentId::RachelExperimentAnalyst => "rachel_experiment_analyst", + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct AgentTaskResult { + pub task_id: Uuid, + pub status: TaskStatus, + pub schema_version: String, + pub output_payload: T, + pub emitted_events: Vec, + pub confidence: f32, + pub evidence_refs: Vec, + pub next_action: String, + pub errors: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct EventEnvelope { + pub event_id: Uuid, + pub event_type: String, + pub occurred_at: DateTime, + pub producer: AgentId, + pub tenant_id: Uuid, + pub subject_type: SubjectType, + pub subject_id: Uuid, + pub schema_version: String, + pub trace_id: Uuid, + pub idempotency_key: String, + pub payload: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct Objective { + pub name: String, + pub target_metric: String, + pub target_value: String, + pub due_date: Option>, + pub owner: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] +pub struct CurrentState { + pub open_tasks: u32, + pub blockers: Vec, + pub active_campaigns: u32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ResourceLimits { + pub daily_email_cap: u32, + pub budget_cap_usd: u32, + pub human_review_capacity: u32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OrchestratorInput { + pub objective: Objective, + pub current_state: CurrentState, + pub resource_limits: ResourceLimits, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ExecutionStep { + pub step_id: String, + pub description: String, + pub depends_on: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct TaskAssignment { + pub task_type: String, + pub agent_id: AgentId, + pub deadline_at: Option>, + pub input_refs: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ApprovalRequest { + pub reason: String, + pub risk_level: String, + pub reviewer_group: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct WorkflowState { + pub stage: String, + pub progress_pct: u8, + pub eta_minutes: u32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OrchestratorOutput { + pub execution_plan: Vec, + pub task_assignments: Vec, + pub approval_requests: Vec, + pub workflow_state: WorkflowState, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct AccountSignal { + pub entity_id: Uuid, + pub company_size: u32, + pub industry: String, + pub region: String, + pub product_events_14d: u32, + pub support_tickets_30d: u32, + pub won_deals_12m: u32, + pub lost_deals_12m: u32, + pub churned: bool, + pub activation_days: u32, + pub ltv_usd: f64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct IcpScoutInput { + pub accounts: Vec, + pub current_segment_ids: Vec, + pub min_sample_size: usize, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum EntityType { + Account, + Contact, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum IcpTier { + A, + B, + C, + D, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct IcpScore { + pub entity_id: Uuid, + pub entity_type: EntityType, + pub score_0_100: u8, + pub tier: IcpTier, + pub top_drivers: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct SegmentDefinition { + pub segment_id: String, + pub rule_dsl: String, + pub expected_lift: f32, + pub confidence: f32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct DriftReport { + pub drift_detected: bool, + pub drift_dimensions: Vec, + pub recommended_retrain_date: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct IcpScoutOutput { + pub icp_scores: Vec, + pub segment_definitions: Vec, + pub anti_icp_rules: Vec, + pub drift_report: DriftReport, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct SegmentContact { + pub recipient_id: Uuid, + pub account_id: Uuid, + pub email: String, + pub first_name: Option, + pub job_title: Option, + pub company_name: Option, + pub timezone: Option, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum ClaimRisk { + Low, + Medium, + High, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct MessageVariant { + pub template_id: String, + pub subject: String, + pub body: String, + pub claim_risk: ClaimRisk, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct MessageBundle { + pub segment_id: String, + pub variants: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct SequencePolicy { + pub max_touches: u8, + pub cadence_days: u16, + pub stop_conditions: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ChannelPolicy { + pub email_enabled: bool, + pub linkedin_ads_enabled: bool, + pub linkedin_dm_enabled: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OutboundSdrInput { + pub segment_manifest: Vec, + pub message_bundle: MessageBundle, + pub sequence_policy: SequencePolicy, + pub channel_policy: ChannelPolicy, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct SequenceTouch { + pub touch_number: u8, + pub offset_days: u16, + pub channel: GtmChannel, + pub template_id: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct SequenceDraft { + pub sequence_id: String, + pub touches: Vec, + pub channel: GtmChannel, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct SendRequest { + pub recipient_id: Uuid, + pub template_id: String, + pub send_at: DateTime, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum ReplyClass { + Positive, + Neutral, + Negative, + Unsubscribe, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ReplyClassification { + pub reply_id: String, + pub class: ReplyClass, + pub sentiment: String, + pub intent: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct Handoff { + pub to_team: String, + pub reason: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OutboundSdrOutput { + pub sequence_draft: Option, + pub personalization_fields_used: Vec, + pub send_requests: Vec, + pub reply_classifications: Vec, + pub handoffs: Vec, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum ManualDispatchStatus { + PendingApproval, + ReadyForRep, + Completed, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct LinkedinManualSendTask { + pub manual_task_id: String, + pub recipient_id: Uuid, + pub account_id: Uuid, + pub recipient_email: String, + pub recipient_name: Option, + pub company_name: Option, + pub channel: GtmChannel, + pub template_id: String, + pub send_at: DateTime, + pub assignee_team: String, + pub status: ManualDispatchStatus, + pub instructions: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct HubspotTaskDraft { + pub external_id: String, + pub subject: String, + pub body: String, + pub due_at: DateTime, + pub contact_email: String, + pub owner_team: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct HubspotCommunicationDraft { + pub external_id: String, + pub channel: GtmChannel, + pub contact_email: String, + pub scheduled_at: DateTime, + pub summary: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ModeAOutboundDispatchInput { + pub outbound_input: OutboundSdrInput, + pub outbound_output: OutboundSdrOutput, + pub assignee_team: String, + pub reviewer_group: String, + pub approval_required: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ModeAOutboundDispatchOutput { + pub approval_queue: Vec, + pub manual_send_tasks: Vec, + pub hubspot_task_drafts: Vec, + pub hubspot_communication_drafts: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] +pub struct HubspotDispatchReport { + pub tasks_attempted: usize, + pub tasks_created: usize, + pub notes_attempted: usize, + pub notes_created: usize, + pub associations_attempted: usize, + pub associations_created: usize, + pub errors: Vec, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum FeedbackSource { + Onboarding, + OutboundReply, + SupportTicket, + SalesCall, + ProductUsage, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct FeedbackItem { + pub feedback_id: Uuid, + pub source: FeedbackSource, + pub segment_id: Option, + pub text: String, + pub created_at: DateTime, + pub evidence_ref: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ProductContext { + pub roadmap_refs: Vec, + pub constraints: Vec, + pub architecture_notes: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct BusinessContext { + pub revenue_goal: String, + pub strategic_themes: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ClusterPolicy { + pub min_cluster_size: usize, + pub recency_weight: f32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct FeedbackPrdInput { + pub feedback_items: Vec, + pub product_context: ProductContext, + pub business_context: BusinessContext, + pub cluster_policy: ClusterPolicy, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct InsightCluster { + pub cluster_id: String, + pub theme: String, + pub frequency: u32, + pub affected_segments: Vec, + pub evidence_refs: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct JobStory { + pub as_persona: String, + pub when_context: String, + pub i_want: String, + pub so_i_can: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PrdDraft { + pub prd_id: String, + pub problem: String, + pub users: Vec, + pub success_metrics: Vec, + pub scope: Vec, + pub risks: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct PriorityScore { + pub prd_id: String, + pub impact: f32, + pub reach: f32, + pub confidence: f32, + pub effort: f32, + pub overall: f32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct FeedbackPrdOutput { + pub insight_clusters: Vec, + pub job_stories: Vec, + pub prd_drafts: Vec, + pub priority_scores: Vec, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum FunnelStage { + Awareness, + Consideration, + Decision, + Expansion, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct PositioningInput { + pub segment_definitions: Vec, + pub insight_clusters: Vec, + pub prd_drafts: Vec, + pub strategic_themes: Vec, + pub data_contract_version: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct MessageMap { + pub segment_id: String, + pub value_proposition: String, + pub pains: Vec, + pub proof_points: Vec, + pub objection_handling: Vec, + pub funnel_stage: FunnelStage, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PositioningBundle { + pub bundle_id: String, + pub message_maps: Vec, + pub claim_safe_list: Vec, + pub generated_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PositioningOutput { + pub positioning_bundle: PositioningBundle, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "snake_case")] +pub enum AssetChannel { + Email, + LandingPage, + LinkedinAd, + SalesOnePager, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ContentInput { + pub positioning_bundle: PositioningBundle, + pub channels: Vec, + pub max_assets_per_channel: u8, + pub requires_human_review: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ContentAsset { + pub asset_id: String, + pub channel: AssetChannel, + pub segment_id: String, + pub title: String, + pub body: String, + pub cta: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ContentOutput { + pub assets: Vec, + pub publish_ready: bool, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum RiskSeverity { + Low, + Medium, + High, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct OnboardingInput { + pub customer_id: Uuid, + pub account_name: String, + pub segment_id: String, + pub customer_goals: Vec, + pub known_blockers: Vec, + pub current_activation_rate: f32, + pub target_activation_rate: f32, + pub handoff_summary: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OnboardingMilestone { + pub milestone_id: String, + pub name: String, + pub due_in_days: u16, + pub owner_role: String, + pub success_criteria: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OnboardingRiskFlag { + pub code: String, + pub severity: RiskSeverity, + pub summary: String, + pub mitigation: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OnboardingOutput { + pub onboarding_plan: Vec, + pub activation_risk_flags: Vec, + pub captured_feedback: Vec, + pub qbr_summary: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct CampaignPerformance { + pub campaign_id: String, + pub segment_id: String, + pub spend_usd: f32, + pub impressions: u32, + pub clicks: u32, + pub meetings: u32, + pub sqls: u32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct FeatureAdoptionSignal { + pub feature_name: String, + pub before_rate: f32, + pub after_rate: f32, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ExperimentInput { + pub experiment_name: String, + pub primary_metric: String, + pub baseline_value: f32, + pub observed_value: f32, + pub sample_size: usize, + pub min_sample_size: usize, + pub confidence_estimate: f32, + pub segment_ids: Vec, + pub campaign_results: Vec, + pub adoption_signals: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ExperimentDesign { + pub experiment_id: String, + pub hypothesis: String, + pub success_metric: String, + pub guardrails: Vec, + pub segments: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ExperimentResultSummary { + pub experiment_id: String, + pub uplift_ratio: f32, + pub statistically_reliable: bool, + pub confidence_estimate: f32, + pub sample_size: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ExperimentRecommendation { + pub action: String, + pub owner: String, + pub rationale: String, + pub expected_impact: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ExperimentOutput { + pub experiment_design: ExperimentDesign, + pub result_summary: ExperimentResultSummary, + pub recommendations: Vec, +} diff --git a/DoWhiz_service/scheduler_module/src/gtm_agents/hubspot.rs b/DoWhiz_service/scheduler_module/src/gtm_agents/hubspot.rs new file mode 100644 index 00000000..b35645e4 --- /dev/null +++ b/DoWhiz_service/scheduler_module/src/gtm_agents/hubspot.rs @@ -0,0 +1,402 @@ +use std::collections::HashMap; +use std::time::Duration; + +use serde::Deserialize; +use serde_json::json; + +use super::contracts::{HubspotDispatchReport, ModeAOutboundDispatchOutput}; + +#[derive(Debug, thiserror::Error)] +pub enum HubspotDispatchError { + #[error("missing HUBSPOT_ACCESS_TOKEN in environment")] + MissingAccessToken, + #[error("hubspot request failed: {0}")] + Request(#[from] reqwest::Error), + #[error("hubspot response parse failed: {0}")] + Parse(#[from] serde_json::Error), + #[error("hubspot request failed with status {status}: {body}")] + UnexpectedStatus { status: u16, body: String }, +} + +#[derive(Debug, Clone)] +pub struct HubspotModeAExecutor { + client: reqwest::blocking::Client, + base_url: String, + access_token: String, +} + +impl HubspotModeAExecutor { + pub fn from_env() -> Result { + let access_token = std::env::var("HUBSPOT_ACCESS_TOKEN") + .ok() + .map(|value| value.trim().to_string()) + .filter(|value| !value.is_empty()) + .ok_or(HubspotDispatchError::MissingAccessToken)?; + let base_url = std::env::var("HUBSPOT_API_BASE_URL") + .ok() + .map(|value| value.trim().to_string()) + .filter(|value| !value.is_empty()) + .unwrap_or_else(|| "https://api.hubapi.com".to_string()); + Ok(Self::new(base_url, access_token)) + } + + pub fn new(base_url: String, access_token: String) -> Self { + let client = reqwest::blocking::Client::builder() + .timeout(Duration::from_secs(20)) + .build() + .expect("hubspot client should build"); + Self { + client, + base_url, + access_token, + } + } + + pub fn dispatch_mode_a_drafts( + &self, + output: &ModeAOutboundDispatchOutput, + ) -> HubspotDispatchReport { + let mut report = HubspotDispatchReport::default(); + let mut contacts_by_email = HashMap::new(); + + for task in &output.hubspot_task_drafts { + report.tasks_attempted += 1; + match self.create_task(task) { + Ok(task_id) => { + report.tasks_created += 1; + let contact_id = resolve_contact_id_cached( + self, + &task.contact_email, + &mut contacts_by_email, + &mut report, + ); + if let Some(contact_id) = contact_id { + report.associations_attempted += 1; + if self + .associate_entity_with_contact("tasks", &task_id, &contact_id) + .is_ok() + { + report.associations_created += 1; + } else { + report.errors.push(format!( + "failed to associate HubSpot task {} with contact {}", + task_id, task.contact_email + )); + } + } + } + Err(err) => { + report.errors.push(format!( + "failed to create HubSpot task draft {}: {}", + task.external_id, err + )); + } + } + } + + for communication in &output.hubspot_communication_drafts { + report.notes_attempted += 1; + match self.create_note(communication) { + Ok(note_id) => { + report.notes_created += 1; + let contact_id = resolve_contact_id_cached( + self, + &communication.contact_email, + &mut contacts_by_email, + &mut report, + ); + if let Some(contact_id) = contact_id { + report.associations_attempted += 1; + if self + .associate_entity_with_contact("notes", ¬e_id, &contact_id) + .is_ok() + { + report.associations_created += 1; + } else { + report.errors.push(format!( + "failed to associate HubSpot note {} with contact {}", + note_id, communication.contact_email + )); + } + } + } + Err(err) => { + report.errors.push(format!( + "failed to create HubSpot communication draft {}: {}", + communication.external_id, err + )); + } + } + } + + report + } + + fn create_task( + &self, + task: &super::contracts::HubspotTaskDraft, + ) -> Result { + let payload = json!({ + "properties": { + "hs_task_subject": task.subject, + "hs_task_body": task.body, + "hs_timestamp": task.due_at.to_rfc3339(), + "hs_task_status": "NOT_STARTED", + "hs_task_priority": "MEDIUM" + } + }); + let response = self.post_json("/crm/v3/objects/tasks", &payload)?; + extract_id(response) + } + + fn create_note( + &self, + communication: &super::contracts::HubspotCommunicationDraft, + ) -> Result { + let payload = json!({ + "properties": { + "hs_note_body": communication.summary, + "hs_timestamp": communication.scheduled_at.to_rfc3339(), + } + }); + let response = self.post_json("/crm/v3/objects/notes", &payload)?; + extract_id(response) + } + + fn associate_entity_with_contact( + &self, + entity_kind: &str, + entity_id: &str, + contact_id: &str, + ) -> Result<(), HubspotDispatchError> { + let path = format!( + "/crm/v4/objects/{}/{}/associations/default/contacts/{}", + entity_kind, entity_id, contact_id + ); + self.put_empty(&path) + } + + fn find_contact_id_by_email( + &self, + email: &str, + ) -> Result, HubspotDispatchError> { + let payload = json!({ + "filterGroups": [ + { + "filters": [ + { + "propertyName": "email", + "operator": "EQ", + "value": email + } + ] + } + ], + "limit": 1 + }); + + let response = self.post_json("/crm/v3/objects/contacts/search", &payload)?; + let parsed: ContactSearchResponse = serde_json::from_value(response)?; + Ok(parsed.results.first().map(|result| result.id.clone())) + } + + fn post_json( + &self, + path: &str, + payload: &serde_json::Value, + ) -> Result { + let response = self + .client + .post(self.build_url(path)) + .bearer_auth(&self.access_token) + .json(payload) + .send()?; + let status = response.status(); + if !status.is_success() { + let body = response.text().unwrap_or_default(); + return Err(HubspotDispatchError::UnexpectedStatus { + status: status.as_u16(), + body, + }); + } + Ok(response.json()?) + } + + fn put_empty(&self, path: &str) -> Result<(), HubspotDispatchError> { + let response = self + .client + .put(self.build_url(path)) + .bearer_auth(&self.access_token) + .send()?; + let status = response.status(); + if !status.is_success() { + let body = response.text().unwrap_or_default(); + return Err(HubspotDispatchError::UnexpectedStatus { + status: status.as_u16(), + body, + }); + } + Ok(()) + } + + fn build_url(&self, path: &str) -> String { + format!("{}{}", self.base_url.trim_end_matches('/'), path) + } +} + +fn extract_id(value: serde_json::Value) -> Result { + value + .get("id") + .and_then(|id| id.as_str()) + .map(|id| id.to_string()) + .ok_or_else(|| HubspotDispatchError::UnexpectedStatus { + status: 200, + body: format!("missing id in response payload: {}", value), + }) +} + +fn resolve_contact_id_cached( + executor: &HubspotModeAExecutor, + email: &str, + cache: &mut HashMap>, + report: &mut HubspotDispatchReport, +) -> Option { + if let Some(existing) = cache.get(email) { + return existing.clone(); + } + + let resolved = match executor.find_contact_id_by_email(email) { + Ok(value) => value, + Err(err) => { + report.errors.push(format!( + "failed to find HubSpot contact for {}: {}", + email, err + )); + None + } + }; + cache.insert(email.to_string(), resolved.clone()); + resolved +} + +#[derive(Debug, Deserialize)] +struct ContactSearchResponse { + results: Vec, +} + +#[derive(Debug, Deserialize)] +struct ContactSearchResult { + id: String, +} + +#[cfg(test)] +mod tests { + use chrono::Utc; + use mockito::{Matcher, Server}; + + use super::super::contracts::{ + GtmChannel, HubspotCommunicationDraft, HubspotTaskDraft, ModeAOutboundDispatchOutput, + }; + use super::*; + + fn mode_a_output() -> ModeAOutboundDispatchOutput { + ModeAOutboundDispatchOutput { + approval_queue: Vec::new(), + manual_send_tasks: Vec::new(), + hubspot_task_drafts: vec![HubspotTaskDraft { + external_id: "task_ext_1".to_string(), + subject: "Follow up on LinkedIn outreach".to_string(), + body: "Please send the approved DM template manually.".to_string(), + due_at: Utc::now(), + contact_email: "alpha@example.com".to_string(), + owner_team: "sdr_team".to_string(), + }], + hubspot_communication_drafts: vec![HubspotCommunicationDraft { + external_id: "comm_ext_1".to_string(), + channel: GtmChannel::LinkedinDm, + contact_email: "alpha@example.com".to_string(), + scheduled_at: Utc::now(), + summary: "Planned manual LinkedIn outreach".to_string(), + }], + } + } + + #[test] + fn dispatch_mode_a_drafts_creates_tasks_and_notes() { + let mut server = Server::new(); + let _search = server + .mock("POST", "/crm/v3/objects/contacts/search") + .match_header("authorization", "Bearer test-token") + .match_body(Matcher::Regex("alpha@example.com".to_string())) + .with_status(200) + .with_body(r#"{"results":[{"id":"201"}]}"#) + .create(); + let _task_create = server + .mock("POST", "/crm/v3/objects/tasks") + .match_header("authorization", "Bearer test-token") + .with_status(201) + .with_body(r#"{"id":"301"}"#) + .create(); + let _task_assoc = server + .mock( + "PUT", + "/crm/v4/objects/tasks/301/associations/default/contacts/201", + ) + .match_header("authorization", "Bearer test-token") + .with_status(204) + .create(); + let _note_create = server + .mock("POST", "/crm/v3/objects/notes") + .match_header("authorization", "Bearer test-token") + .with_status(201) + .with_body(r#"{"id":"401"}"#) + .create(); + let _note_assoc = server + .mock( + "PUT", + "/crm/v4/objects/notes/401/associations/default/contacts/201", + ) + .match_header("authorization", "Bearer test-token") + .with_status(204) + .create(); + + let executor = HubspotModeAExecutor::new(server.url(), "test-token".to_string()); + let report = executor.dispatch_mode_a_drafts(&mode_a_output()); + + assert_eq!(report.tasks_attempted, 1); + assert_eq!(report.tasks_created, 1); + assert_eq!(report.notes_attempted, 1); + assert_eq!(report.notes_created, 1); + assert_eq!(report.associations_attempted, 2); + assert_eq!(report.associations_created, 2); + assert!(report.errors.is_empty()); + } + + #[test] + fn dispatch_mode_a_drafts_reports_api_errors() { + let mut server = Server::new(); + let _task_create = server + .mock("POST", "/crm/v3/objects/tasks") + .match_header("authorization", "Bearer test-token") + .with_status(500) + .with_body(r#"{"message":"server error"}"#) + .create(); + let _note_create = server + .mock("POST", "/crm/v3/objects/notes") + .match_header("authorization", "Bearer test-token") + .with_status(500) + .with_body(r#"{"message":"server error"}"#) + .create(); + + let executor = HubspotModeAExecutor::new(server.url(), "test-token".to_string()); + let report = executor.dispatch_mode_a_drafts(&mode_a_output()); + + assert_eq!(report.tasks_attempted, 1); + assert_eq!(report.tasks_created, 0); + assert_eq!(report.notes_attempted, 1); + assert_eq!(report.notes_created, 0); + assert_eq!(report.associations_attempted, 0); + assert_eq!(report.associations_created, 0); + assert_eq!(report.errors.len(), 2); + } +} diff --git a/DoWhiz_service/scheduler_module/src/gtm_agents/mod.rs b/DoWhiz_service/scheduler_module/src/gtm_agents/mod.rs new file mode 100644 index 00000000..2235b3b4 --- /dev/null +++ b/DoWhiz_service/scheduler_module/src/gtm_agents/mod.rs @@ -0,0 +1,28 @@ +mod contracts; +mod hubspot; +mod mode_a; +mod phase1; +mod phase2; +mod phase3; + +pub use contracts::{ + AccountSignal, AgentId, AgentTaskEnvelope, AgentTaskResult, ApprovalRequest, AssetChannel, + BusinessContext, CampaignPerformance, ChannelPolicy, ClaimRisk, ClusterPolicy, ContentAsset, + ContentInput, ContentOutput, CurrentState, EntityType, EventEnvelope, ExperimentDesign, + ExperimentInput, ExperimentOutput, ExperimentRecommendation, ExperimentResultSummary, + FeatureAdoptionSignal, FeedbackItem, FeedbackPrdInput, FeedbackPrdOutput, FeedbackSource, + FunnelStage, GtmChannel, HubspotCommunicationDraft, HubspotDispatchReport, HubspotTaskDraft, + IcpScore, IcpScoutInput, IcpScoutOutput, IcpTier, InsightCluster, JobStory, + LinkedinManualSendTask, ManualDispatchStatus, MessageBundle, MessageMap, MessageVariant, + ModeAOutboundDispatchInput, ModeAOutboundDispatchOutput, Objective, OnboardingInput, + OnboardingMilestone, OnboardingOutput, OnboardingRiskFlag, OrchestratorInput, + OrchestratorOutput, OutboundSdrInput, OutboundSdrOutput, PolicyPack, PositioningBundle, + PositioningInput, PositioningOutput, PrdDraft, PriorityScore, ProductContext, ResourceLimits, + RiskSeverity, SegmentContact, SegmentDefinition, SequencePolicy, TaskPriority, TaskStatus, + GTM_SCHEMA_VERSION, +}; +pub use hubspot::{HubspotDispatchError, HubspotModeAExecutor}; +pub use mode_a::{ModeAAgentEngine, ModeAAgentError, ModeAWorkflowInput, ModeAWorkflowResult}; +pub use phase1::{GtmAgentError, Phase1AgentEngine, Phase1WorkflowInput, Phase1WorkflowResult}; +pub use phase2::{Phase2AgentEngine, Phase2AgentError, Phase2WorkflowInput, Phase2WorkflowResult}; +pub use phase3::{Phase3AgentEngine, Phase3AgentError, Phase3WorkflowInput, Phase3WorkflowResult}; diff --git a/DoWhiz_service/scheduler_module/src/gtm_agents/mode_a.rs b/DoWhiz_service/scheduler_module/src/gtm_agents/mode_a.rs new file mode 100644 index 00000000..839b0253 --- /dev/null +++ b/DoWhiz_service/scheduler_module/src/gtm_agents/mode_a.rs @@ -0,0 +1,535 @@ +use std::collections::HashMap; + +use chrono::{Duration, Utc}; +use serde::Serialize; +use uuid::Uuid; + +use super::contracts::{ + AgentId, AgentTaskEnvelope, AgentTaskResult, ApprovalRequest, ClaimRisk, EventEnvelope, + GtmChannel, HubspotCommunicationDraft, HubspotTaskDraft, LinkedinManualSendTask, + ManualDispatchStatus, ModeAOutboundDispatchInput, ModeAOutboundDispatchOutput, + OutboundSdrInput, SendRequest, SubjectType, TaskStatus, GTM_SCHEMA_VERSION, +}; + +#[derive(Debug, thiserror::Error)] +pub enum ModeAAgentError { + #[error("event payload serialization failed: {0}")] + EventSerialization(#[from] serde_json::Error), + #[error("mode A dispatch requires an outbound sequence draft")] + MissingSequenceDraft, + #[error("mode A dispatch requires at least one outbound message variant")] + MissingMessageVariant, + #[error("mode A dispatch supports only linkedin_dm channel, got {0}")] + UnsupportedChannel(String), +} + +#[derive(Debug, Clone)] +pub struct ModeAWorkflowInput { + pub base_envelope: AgentTaskEnvelope, + pub dispatch: ModeAOutboundDispatchInput, +} + +#[derive(Debug, Clone)] +pub struct ModeAWorkflowResult { + pub dispatch: AgentTaskResult, + pub events: Vec, +} + +#[derive(Debug, Default, Clone)] +pub struct ModeAAgentEngine; + +impl ModeAAgentEngine { + pub fn run_workflow( + &self, + input: ModeAWorkflowInput, + ) -> Result { + let dispatch = self.run_linkedin_dispatch( + input.base_envelope.with_agent(AgentId::RachelOutboundSdr), + input.dispatch, + )?; + + Ok(ModeAWorkflowResult { + events: dispatch.emitted_events.clone(), + dispatch, + }) + } + + pub fn run_linkedin_dispatch( + &self, + envelope: AgentTaskEnvelope, + input: ModeAOutboundDispatchInput, + ) -> Result, ModeAAgentError> { + let sequence = input + .outbound_output + .sequence_draft + .as_ref() + .ok_or(ModeAAgentError::MissingSequenceDraft)?; + if sequence.channel != GtmChannel::LinkedinDm { + return Err(ModeAAgentError::UnsupportedChannel( + channel_label(sequence.channel).to_string(), + )); + } + + let selected_variant = input + .outbound_input + .message_bundle + .variants + .iter() + .min_by_key(|variant| claim_risk_rank(variant.claim_risk)) + .cloned() + .ok_or(ModeAAgentError::MissingMessageVariant)?; + + let reviewer_group = normalize_non_empty(input.reviewer_group, "gtm_ops"); + let assignee_team = normalize_non_empty(input.assignee_team, "sdr_team"); + + let approval_required = input.approval_required + || envelope.policy_pack.human_approval_required + || selected_variant.claim_risk != ClaimRisk::Low; + + let mut send_requests = input.outbound_output.send_requests.clone(); + if send_requests.is_empty() { + send_requests = + synthesize_send_requests(&input.outbound_input, &selected_variant.template_id); + } + + let contacts_by_id = input + .outbound_input + .segment_manifest + .iter() + .map(|contact| (contact.recipient_id, contact)) + .collect::>(); + + let mut manual_send_tasks = Vec::new(); + let mut hubspot_task_drafts = Vec::new(); + let mut hubspot_communication_drafts = Vec::new(); + let mut errors = Vec::new(); + let mut events = Vec::new(); + + for (index, send_request) in send_requests.iter().enumerate() { + let Some(contact) = contacts_by_id.get(&send_request.recipient_id) else { + errors.push(format!( + "recipient {} not found in segment manifest", + send_request.recipient_id + )); + continue; + }; + + let manual_task_id = format!("mode_a:{}:{}", envelope.task_id.simple(), index + 1); + let dispatch_status = if approval_required { + ManualDispatchStatus::PendingApproval + } else { + ManualDispatchStatus::ReadyForRep + }; + let recipient_name = contact.first_name.clone(); + let instructions = build_linkedin_instructions(contact, &selected_variant.template_id); + let manual_task = LinkedinManualSendTask { + manual_task_id: manual_task_id.clone(), + recipient_id: contact.recipient_id, + account_id: contact.account_id, + recipient_email: contact.email.clone(), + recipient_name, + company_name: contact.company_name.clone(), + channel: GtmChannel::LinkedinDm, + template_id: selected_variant.template_id.clone(), + send_at: send_request.send_at, + assignee_team: assignee_team.clone(), + status: dispatch_status, + instructions, + }; + + let task_subject = format!( + "[LinkedIn Manual Send] {}", + contact + .company_name + .as_deref() + .unwrap_or(contact.email.as_str()) + ); + let task_body = format!( + "Use template `{}` and send a LinkedIn DM manually.\nRecipient: {}\nSegment: {}\nMessage preview subject: {}", + selected_variant.template_id, + contact.email, + input.outbound_input.message_bundle.segment_id, + selected_variant.subject + ); + let hubspot_task = HubspotTaskDraft { + external_id: format!("{}:hubspot_task", manual_task_id), + subject: task_subject, + body: task_body, + due_at: send_request.send_at, + contact_email: contact.email.clone(), + owner_team: assignee_team.clone(), + }; + let hubspot_communication = HubspotCommunicationDraft { + external_id: format!("{}:hubspot_comm", manual_task_id), + channel: GtmChannel::LinkedinDm, + contact_email: contact.email.clone(), + scheduled_at: send_request.send_at, + summary: format!( + "Planned manual LinkedIn outreach using template {}", + selected_variant.template_id + ), + }; + + events.push(self.make_event( + &envelope, + AgentId::RachelOutboundSdr, + "mode_a.manual_send.task_created", + SubjectType::Contact, + contact.recipient_id, + &manual_task, + )?); + events.push(self.make_event( + &envelope, + AgentId::RachelOutboundSdr, + "mode_a.hubspot.task.drafted", + SubjectType::Contact, + contact.recipient_id, + &hubspot_task, + )?); + events.push(self.make_event( + &envelope, + AgentId::RachelOutboundSdr, + "mode_a.hubspot.communication.drafted", + SubjectType::Contact, + contact.recipient_id, + &hubspot_communication, + )?); + + manual_send_tasks.push(manual_task); + hubspot_task_drafts.push(hubspot_task); + hubspot_communication_drafts.push(hubspot_communication); + } + + let mut approval_queue = Vec::new(); + if approval_required && !manual_send_tasks.is_empty() { + let approval = ApprovalRequest { + reason: + "Manual LinkedIn send tasks are queued and require manager approval before reps execute" + .to_string(), + risk_level: if selected_variant.claim_risk == ClaimRisk::High { + "high".to_string() + } else { + "medium".to_string() + }, + reviewer_group, + }; + events.push(self.make_event( + &envelope, + AgentId::RachelOutboundSdr, + "mode_a.approval.queued", + SubjectType::Campaign, + envelope.objective_id, + &approval, + )?); + approval_queue.push(approval); + } + + if manual_send_tasks.is_empty() && errors.is_empty() { + errors.push("no manual LinkedIn tasks could be generated".to_string()); + } + + let output = ModeAOutboundDispatchOutput { + approval_queue, + manual_send_tasks, + hubspot_task_drafts, + hubspot_communication_drafts, + }; + + let status = if output.manual_send_tasks.is_empty() { + TaskStatus::Failed + } else if !output.approval_queue.is_empty() { + TaskStatus::NeedsHuman + } else { + TaskStatus::Succeeded + }; + + Ok(AgentTaskResult { + task_id: envelope.task_id, + status, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence: match status { + TaskStatus::Failed => 0.3, + TaskStatus::NeedsHuman => 0.68, + _ => 0.84, + }, + evidence_refs: vec![format!( + "segment:{}", + input.outbound_input.message_bundle.segment_id + )], + next_action: match status { + TaskStatus::Failed => { + "Fix segment manifest and outbound sequence before creating manual tasks" + .to_string() + } + TaskStatus::NeedsHuman => { + "Complete manager approval, then reps execute LinkedIn sends and mark completion" + .to_string() + } + _ => "Create HubSpot tasks and start manual LinkedIn execution".to_string(), + }, + errors, + }) + } + + fn make_event( + &self, + envelope: &AgentTaskEnvelope, + producer: AgentId, + event_type: &str, + subject_type: SubjectType, + subject_id: Uuid, + payload: &T, + ) -> Result { + Ok(EventEnvelope { + event_id: Uuid::new_v4(), + event_type: event_type.to_string(), + occurred_at: Utc::now(), + producer, + tenant_id: envelope.tenant_id, + subject_type, + subject_id, + schema_version: GTM_SCHEMA_VERSION.to_string(), + trace_id: envelope.trace_id, + idempotency_key: format!( + "{}:{}:{}", + envelope.idempotency_key, + producer.as_str(), + event_type + ), + payload: serde_json::to_value(payload)?, + }) + } +} + +fn claim_risk_rank(risk: ClaimRisk) -> u8 { + match risk { + ClaimRisk::Low => 0, + ClaimRisk::Medium => 1, + ClaimRisk::High => 2, + } +} + +fn synthesize_send_requests(input: &OutboundSdrInput, template_id: &str) -> Vec { + input + .segment_manifest + .iter() + .enumerate() + .map(|(index, contact)| SendRequest { + recipient_id: contact.recipient_id, + template_id: template_id.to_string(), + send_at: Utc::now() + Duration::minutes((index as i64) * 5), + }) + .collect() +} + +fn normalize_non_empty(value: String, fallback: &str) -> String { + let trimmed = value.trim(); + if trimmed.is_empty() { + fallback.to_string() + } else { + trimmed.to_string() + } +} + +fn build_linkedin_instructions( + contact: &super::contracts::SegmentContact, + template_id: &str, +) -> String { + let recipient = contact + .first_name + .as_deref() + .unwrap_or(contact.email.as_str()); + let company = contact.company_name.as_deref().unwrap_or("target account"); + format!( + "Find {} at {} on LinkedIn, send DM using template `{}`, then log outcome in HubSpot.", + recipient, company, template_id + ) +} + +fn channel_label(channel: GtmChannel) -> &'static str { + match channel { + GtmChannel::Email => "email", + GtmChannel::LinkedinAds => "linkedin_ads", + GtmChannel::HubspotWorkflow => "hubspot_workflow", + GtmChannel::LinkedinDm => "linkedin_dm", + } +} + +#[cfg(test)] +mod tests { + use super::super::contracts::{ + AgentId, AgentTaskEnvelope, ChannelPolicy, MessageBundle, MessageVariant, OutboundSdrInput, + OutboundSdrOutput, SegmentContact, SequenceDraft, SequencePolicy, SequenceTouch, + TaskPriority, + }; + use super::*; + + fn base_envelope() -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(AgentId::RachelOrchestrator); + envelope.priority = TaskPriority::High; + envelope.policy_pack.allowed_channels = + vec![GtmChannel::LinkedinDm, GtmChannel::HubspotWorkflow]; + envelope + } + + fn outbound_input(risk: ClaimRisk) -> OutboundSdrInput { + OutboundSdrInput { + segment_manifest: vec![ + SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "alpha@example.com".to_string(), + first_name: Some("Avery".to_string()), + job_title: Some("Head of Growth".to_string()), + company_name: Some("Alpha".to_string()), + timezone: Some("America/Los_Angeles".to_string()), + }, + SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "bravo@example.com".to_string(), + first_name: Some("Bailey".to_string()), + job_title: Some("VP Marketing".to_string()), + company_name: Some("Bravo".to_string()), + timezone: Some("America/New_York".to_string()), + }, + ], + message_bundle: MessageBundle { + segment_id: "tier_a_high_fit".to_string(), + variants: vec![MessageVariant { + template_id: "linkedin_dm_safe_v1".to_string(), + subject: "Idea for faster GTM execution".to_string(), + body: "Sharing a short playbook.".to_string(), + claim_risk: risk, + }], + }, + sequence_policy: SequencePolicy { + max_touches: 3, + cadence_days: 2, + stop_conditions: vec!["positive_reply".to_string()], + }, + channel_policy: ChannelPolicy { + email_enabled: false, + linkedin_ads_enabled: false, + linkedin_dm_enabled: true, + }, + } + } + + fn outbound_output(input: &OutboundSdrInput, channel: GtmChannel) -> OutboundSdrOutput { + OutboundSdrOutput { + sequence_draft: Some(SequenceDraft { + sequence_id: format!("seq-{}", Uuid::new_v4().simple()), + touches: vec![SequenceTouch { + touch_number: 1, + offset_days: 0, + channel, + template_id: "linkedin_dm_safe_v1".to_string(), + }], + channel, + }), + personalization_fields_used: vec!["first_name".to_string()], + send_requests: input + .segment_manifest + .iter() + .enumerate() + .map(|(idx, contact)| SendRequest { + recipient_id: contact.recipient_id, + template_id: "linkedin_dm_safe_v1".to_string(), + send_at: Utc::now() + Duration::minutes((idx as i64) * 5), + }) + .collect(), + reply_classifications: Vec::new(), + handoffs: Vec::new(), + } + } + + #[test] + fn mode_a_dispatch_creates_approval_and_hubspot_drafts() { + let engine = ModeAAgentEngine; + let input = outbound_input(ClaimRisk::Low); + let output = outbound_output(&input, GtmChannel::LinkedinDm); + + let result = engine + .run_linkedin_dispatch( + base_envelope(), + ModeAOutboundDispatchInput { + outbound_input: input, + outbound_output: output, + assignee_team: "sdr_team".to_string(), + reviewer_group: "gtm_ops".to_string(), + approval_required: true, + }, + ) + .unwrap(); + + assert_eq!(result.status, TaskStatus::NeedsHuman); + assert_eq!(result.output_payload.manual_send_tasks.len(), 2); + assert_eq!(result.output_payload.hubspot_task_drafts.len(), 2); + assert_eq!(result.output_payload.hubspot_communication_drafts.len(), 2); + assert_eq!(result.output_payload.approval_queue.len(), 1); + assert!(result + .output_payload + .manual_send_tasks + .iter() + .all(|task| task.status == ManualDispatchStatus::PendingApproval)); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "mode_a.approval.queued")); + } + + #[test] + fn mode_a_dispatch_rejects_non_linkedin_channel() { + let engine = ModeAAgentEngine; + let input = outbound_input(ClaimRisk::Low); + let output = outbound_output(&input, GtmChannel::Email); + + let error = engine + .run_linkedin_dispatch( + base_envelope(), + ModeAOutboundDispatchInput { + outbound_input: input, + outbound_output: output, + assignee_team: "sdr_team".to_string(), + reviewer_group: "gtm_ops".to_string(), + approval_required: true, + }, + ) + .unwrap_err(); + + assert!(matches!(error, ModeAAgentError::UnsupportedChannel(_))); + } + + #[test] + fn mode_a_dispatch_can_prepare_ready_for_rep_tasks() { + let engine = ModeAAgentEngine; + let input = outbound_input(ClaimRisk::Low); + let mut output = outbound_output(&input, GtmChannel::LinkedinDm); + output.send_requests.clear(); + + let result = engine + .run_linkedin_dispatch( + base_envelope(), + ModeAOutboundDispatchInput { + outbound_input: input, + outbound_output: output, + assignee_team: "sdr_team".to_string(), + reviewer_group: "gtm_ops".to_string(), + approval_required: false, + }, + ) + .unwrap(); + + assert_eq!(result.status, TaskStatus::Succeeded); + assert_eq!(result.output_payload.approval_queue.len(), 0); + assert_eq!(result.output_payload.manual_send_tasks.len(), 2); + assert!(result + .output_payload + .manual_send_tasks + .iter() + .all(|task| task.status == ManualDispatchStatus::ReadyForRep)); + } +} diff --git a/DoWhiz_service/scheduler_module/src/gtm_agents/phase1.rs b/DoWhiz_service/scheduler_module/src/gtm_agents/phase1.rs new file mode 100644 index 00000000..6624359f --- /dev/null +++ b/DoWhiz_service/scheduler_module/src/gtm_agents/phase1.rs @@ -0,0 +1,1235 @@ +use std::cmp::Reverse; +use std::collections::{BTreeMap, HashMap, HashSet}; + +use chrono::{Duration, Utc}; +use serde::Serialize; +use uuid::Uuid; + +use super::contracts::{ + AccountSignal, AgentId, AgentTaskEnvelope, AgentTaskResult, ApprovalRequest, ClaimRisk, + DriftReport, EntityType, EventEnvelope, FeedbackPrdOutput, GtmChannel, IcpScore, + IcpScoutOutput, IcpTier, InsightCluster, JobStory, OrchestratorOutput, OutboundSdrInput, + OutboundSdrOutput, PrdDraft, PriorityScore, SegmentDefinition, SendRequest, SequenceDraft, + SequenceTouch, SubjectType, TaskAssignment, TaskStatus, WorkflowState, GTM_SCHEMA_VERSION, +}; +use super::contracts::{FeedbackPrdInput, IcpScoutInput, OrchestratorInput}; + +#[derive(Debug, thiserror::Error)] +pub enum GtmAgentError { + #[error("event payload serialization failed: {0}")] + EventSerialization(#[from] serde_json::Error), +} + +#[derive(Debug, Clone)] +pub struct Phase1WorkflowInput { + pub base_envelope: AgentTaskEnvelope, + pub orchestrator: OrchestratorInput, + pub icp_scout: IcpScoutInput, + pub outbound_sdr: OutboundSdrInput, + pub feedback_prd: FeedbackPrdInput, +} + +#[derive(Debug, Clone)] +pub struct Phase1WorkflowResult { + pub orchestrator: AgentTaskResult, + pub icp_scout: AgentTaskResult, + pub outbound_sdr: AgentTaskResult, + pub feedback_prd: AgentTaskResult, + pub events: Vec, +} + +#[derive(Debug, Default, Clone)] +pub struct Phase1AgentEngine; + +impl Phase1AgentEngine { + pub fn run_workflow( + &self, + input: Phase1WorkflowInput, + ) -> Result { + let orchestrator = self.run_orchestrator( + input.base_envelope.with_agent(AgentId::RachelOrchestrator), + input.orchestrator, + )?; + let icp_scout = self.run_icp_scout( + input.base_envelope.with_agent(AgentId::RachelIcpScout), + input.icp_scout, + )?; + let outbound_sdr = self.run_outbound_sdr( + input.base_envelope.with_agent(AgentId::RachelOutboundSdr), + input.outbound_sdr, + )?; + let feedback_prd = self.run_feedback_prd( + input + .base_envelope + .with_agent(AgentId::RachelFeedbackPrdSynthesizer), + input.feedback_prd, + )?; + + let mut events = Vec::new(); + events.extend(orchestrator.emitted_events.clone()); + events.extend(icp_scout.emitted_events.clone()); + events.extend(outbound_sdr.emitted_events.clone()); + events.extend(feedback_prd.emitted_events.clone()); + + Ok(Phase1WorkflowResult { + orchestrator, + icp_scout, + outbound_sdr, + feedback_prd, + events, + }) + } + + pub fn run_orchestrator( + &self, + envelope: AgentTaskEnvelope, + input: OrchestratorInput, + ) -> Result, GtmAgentError> { + let fallback_deadline = envelope.requested_at + Duration::hours(24); + let deadline = envelope.deadline_at.unwrap_or(fallback_deadline); + let stage = if !input.current_state.blockers.is_empty() { + "blocked" + } else if input.current_state.open_tasks > 0 { + "execution" + } else { + "planning" + }; + + let execution_plan = vec![ + super::contracts::ExecutionStep { + step_id: "phase1_icp".to_string(), + description: "Score accounts and promote ICP segments".to_string(), + depends_on: Vec::new(), + }, + super::contracts::ExecutionStep { + step_id: "phase1_outbound".to_string(), + description: "Draft outbound sequence for promoted segments".to_string(), + depends_on: vec!["phase1_icp".to_string()], + }, + super::contracts::ExecutionStep { + step_id: "phase1_feedback_prd".to_string(), + description: "Synthesize feedback into PRD drafts".to_string(), + depends_on: vec!["phase1_outbound".to_string()], + }, + ]; + + let task_assignments = vec![ + TaskAssignment { + task_type: "icp.score.refresh".to_string(), + agent_id: AgentId::RachelIcpScout, + deadline_at: Some(deadline - Duration::hours(16)), + input_refs: envelope.input_refs.clone(), + }, + TaskAssignment { + task_type: "outbound.sequence.prepare".to_string(), + agent_id: AgentId::RachelOutboundSdr, + deadline_at: Some(deadline - Duration::hours(8)), + input_refs: envelope.input_refs.clone(), + }, + TaskAssignment { + task_type: "feedback.prd.synthesize".to_string(), + agent_id: AgentId::RachelFeedbackPrdSynthesizer, + deadline_at: Some(deadline - Duration::hours(2)), + input_refs: envelope.input_refs.clone(), + }, + ]; + + let mut approval_requests = Vec::new(); + if envelope.policy_pack.human_approval_required { + approval_requests.push(ApprovalRequest { + reason: "Policy pack enforces human gate before external sends".to_string(), + risk_level: "medium".to_string(), + reviewer_group: "gtm_ops".to_string(), + }); + } + if input.resource_limits.human_review_capacity == 0 { + approval_requests.push(ApprovalRequest { + reason: "No human review capacity configured".to_string(), + risk_level: "high".to_string(), + reviewer_group: "revops".to_string(), + }); + } + + let workflow_state = WorkflowState { + stage: stage.to_string(), + progress_pct: match stage { + "blocked" => 35, + "execution" => 55, + _ => 20, + }, + eta_minutes: 90 + + input.current_state.open_tasks * 15 + + input.current_state.active_campaigns * 10, + }; + let output = OrchestratorOutput { + execution_plan, + task_assignments, + approval_requests, + workflow_state, + }; + + let mut events = Vec::new(); + events.push(self.make_event( + &envelope, + AgentId::RachelOrchestrator, + "orchestrator.workflow.updated", + SubjectType::Objective, + envelope.objective_id, + &output.workflow_state, + )?); + for assignment in &output.task_assignments { + events.push(self.make_event( + &envelope, + AgentId::RachelOrchestrator, + "orchestrator.task.assigned", + SubjectType::Objective, + envelope.objective_id, + assignment, + )?); + } + for approval in &output.approval_requests { + events.push(self.make_event( + &envelope, + AgentId::RachelOrchestrator, + "approval.requested", + SubjectType::Objective, + envelope.objective_id, + approval, + )?); + } + + let confidence = if input.current_state.blockers.is_empty() { + 0.84 + } else { + 0.68 + }; + + Ok(AgentTaskResult { + task_id: envelope.task_id, + status: TaskStatus::Succeeded, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence, + evidence_refs: envelope.input_refs.clone(), + next_action: "Run ICP Scout assignments and publish promoted segments".to_string(), + errors: Vec::new(), + }) + } + + pub fn run_icp_scout( + &self, + envelope: AgentTaskEnvelope, + input: IcpScoutInput, + ) -> Result, GtmAgentError> { + let mut icp_scores = Vec::with_capacity(input.accounts.len()); + let mut industry_hits: HashMap = HashMap::new(); + + for account in &input.accounts { + let (score, top_drivers) = score_account(account); + let tier = tier_for_score(score); + if matches!(tier, IcpTier::A | IcpTier::B) { + *industry_hits + .entry(account.industry.to_ascii_lowercase()) + .or_insert(0) += 1; + } + icp_scores.push(IcpScore { + entity_id: account.entity_id, + entity_type: EntityType::Account, + score_0_100: score, + tier, + top_drivers, + }); + } + + icp_scores.sort_by_key(|score| Reverse(score.score_0_100)); + let sample_size = input.accounts.len(); + let sample_ratio = if input.min_sample_size == 0 { + 1.0 + } else { + (sample_size as f32 / input.min_sample_size as f32).min(1.2) + }; + let base_confidence = (0.45 + sample_ratio * 0.35).min(0.95); + + let mut segment_definitions = vec![SegmentDefinition { + segment_id: "tier_a_high_fit".to_string(), + rule_dsl: "icp_tier == 'A'".to_string(), + expected_lift: 1.35, + confidence: (base_confidence + 0.05).min(0.95), + }]; + if let Some((industry, _)) = industry_hits.into_iter().max_by_key(|(_, count)| *count) { + segment_definitions.push(SegmentDefinition { + segment_id: format!("industry_{}_high_fit", slugify(&industry)), + rule_dsl: format!("industry == '{}' AND icp_tier IN ('A','B')", industry), + expected_lift: 1.22, + confidence: base_confidence, + }); + } + segment_definitions.push(SegmentDefinition { + segment_id: "fast_activation".to_string(), + rule_dsl: "activation_days <= 7 AND support_tickets_30d <= 2".to_string(), + expected_lift: 1.18, + confidence: (base_confidence - 0.05).max(0.3), + }); + + let anti_icp_rules = vec![ + "churned == true".to_string(), + "support_tickets_30d > 8".to_string(), + "activation_days > 30".to_string(), + ]; + + let promoted_segment_ids: HashSet = segment_definitions + .iter() + .map(|segment| segment.segment_id.clone()) + .collect(); + let drift_detected = !input.current_segment_ids.is_empty() + && input + .current_segment_ids + .iter() + .all(|segment| !promoted_segment_ids.contains(segment)); + let drift_report = DriftReport { + drift_detected, + drift_dimensions: if drift_detected { + vec![ + "segment_membership".to_string(), + "conversion_signal_weight".to_string(), + ] + } else { + Vec::new() + }, + recommended_retrain_date: Some(Utc::now() + Duration::days(30)), + }; + + let output = IcpScoutOutput { + icp_scores, + segment_definitions, + anti_icp_rules, + drift_report, + }; + let needs_human = sample_size < input.min_sample_size; + + let mut events = Vec::new(); + events.push(self.make_event( + &envelope, + AgentId::RachelIcpScout, + "icp.score.updated", + SubjectType::Objective, + envelope.objective_id, + &serde_json::json!({ + "score_count": output.icp_scores.len(), + "top_score": output.icp_scores.first().map(|score| score.score_0_100), + }), + )?); + for segment in &output.segment_definitions { + events.push(self.make_event( + &envelope, + AgentId::RachelIcpScout, + "icp.segment.promoted", + SubjectType::Objective, + envelope.objective_id, + segment, + )?); + } + events.push(self.make_event( + &envelope, + AgentId::RachelIcpScout, + "icp.anti_segment.updated", + SubjectType::Objective, + envelope.objective_id, + &serde_json::json!({ "rules": output.anti_icp_rules.clone() }), + )?); + if output.drift_report.drift_detected { + events.push(self.make_event( + &envelope, + AgentId::RachelIcpScout, + "icp.drift.detected", + SubjectType::Objective, + envelope.objective_id, + &output.drift_report, + )?); + } + + Ok(AgentTaskResult { + task_id: envelope.task_id, + status: if needs_human { + TaskStatus::NeedsHuman + } else { + TaskStatus::Succeeded + }, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence: if needs_human { + base_confidence.min(0.6) + } else { + base_confidence + }, + evidence_refs: envelope.input_refs.clone(), + next_action: if needs_human { + "Collect more won/lost and activation samples before promoting ICP tiers" + .to_string() + } else { + "Publish promoted segments to outbound sequencing".to_string() + }, + errors: if needs_human { + vec![format!( + "insufficient sample size: {} < {}", + sample_size, input.min_sample_size + )] + } else { + Vec::new() + }, + }) + } + + pub fn run_outbound_sdr( + &self, + envelope: AgentTaskEnvelope, + input: OutboundSdrInput, + ) -> Result, GtmAgentError> { + let selected_channel = select_channel(&envelope, &input); + let selected_variant = input + .message_bundle + .variants + .iter() + .min_by_key(|variant| claim_risk_rank(variant.claim_risk)) + .cloned(); + let risk_blocked = selected_variant + .as_ref() + .map(|variant| variant.claim_risk == ClaimRisk::High) + .unwrap_or(false); + + let mut errors = Vec::new(); + if selected_channel.is_none() { + errors.push( + "no outbound channel available after applying policy and channel settings" + .to_string(), + ); + } + if selected_variant.is_none() { + errors.push("message bundle has no variants".to_string()); + } + + let mut events = Vec::new(); + let no_channel_or_variant = selected_channel.is_none() || selected_variant.is_none(); + let output = if no_channel_or_variant { + OutboundSdrOutput { + sequence_draft: None, + personalization_fields_used: Vec::new(), + send_requests: Vec::new(), + reply_classifications: Vec::new(), + handoffs: Vec::new(), + } + } else { + let channel = selected_channel.expect("checked above"); + let variant = selected_variant.clone().expect("checked above"); + let touch_count = input.sequence_policy.max_touches.max(1).min(6); + let touches = (0..touch_count) + .map(|idx| SequenceTouch { + touch_number: idx + 1, + offset_days: idx as u16 * input.sequence_policy.cadence_days, + channel, + template_id: variant.template_id.clone(), + }) + .collect::>(); + let sequence_draft = SequenceDraft { + sequence_id: format!("seq-{}", envelope.task_id.simple()), + touches, + channel, + }; + + let risk_requires_approval = envelope.policy_pack.human_approval_required + || variant.claim_risk == ClaimRisk::High; + let send_requests = if risk_requires_approval { + Vec::new() + } else { + input + .segment_manifest + .iter() + .enumerate() + .map(|(idx, contact)| SendRequest { + recipient_id: contact.recipient_id, + template_id: variant.template_id.clone(), + send_at: Utc::now() + Duration::minutes((idx as i64) * 5), + }) + .collect() + }; + + events.push(self.make_event( + &envelope, + AgentId::RachelOutboundSdr, + "outbound.sequence.drafted", + SubjectType::Campaign, + envelope.objective_id, + &sequence_draft, + )?); + if risk_requires_approval { + let approval = ApprovalRequest { + reason: if variant.claim_risk == ClaimRisk::High { + "Selected copy variant has high claim risk".to_string() + } else { + "Policy pack requires human approval before send".to_string() + }, + risk_level: "high".to_string(), + reviewer_group: "gtm_ops".to_string(), + }; + events.push(self.make_event( + &envelope, + AgentId::RachelOutboundSdr, + "approval.requested", + SubjectType::Campaign, + envelope.objective_id, + &approval, + )?); + } else { + events.push(self.make_event( + &envelope, + AgentId::RachelOutboundSdr, + "outbound.send.requested", + SubjectType::Campaign, + envelope.objective_id, + &serde_json::json!({ + "segment_size": input.segment_manifest.len(), + "send_request_count": send_requests.len(), + "channel": channel, + }), + )?); + } + + OutboundSdrOutput { + sequence_draft: Some(sequence_draft), + personalization_fields_used: vec![ + "first_name".to_string(), + "company_name".to_string(), + "job_title".to_string(), + ], + send_requests, + reply_classifications: Vec::new(), + handoffs: Vec::new(), + } + }; + + let status = if no_channel_or_variant { + TaskStatus::Failed + } else if envelope.policy_pack.human_approval_required || risk_blocked { + TaskStatus::NeedsHuman + } else { + TaskStatus::Succeeded + }; + + let confidence = match status { + TaskStatus::Failed => 0.25, + TaskStatus::NeedsHuman => 0.62, + _ => (0.6 + (input.segment_manifest.len().min(50) as f32 / 250.0)).min(0.9), + }; + let next_action = match status { + TaskStatus::Failed => { + "Fix channel policy and message variant configuration".to_string() + } + TaskStatus::NeedsHuman => { + "Wait for human approval, then enqueue outbound send requests".to_string() + } + _ => "Dispatch send requests and start reply classification loop".to_string(), + }; + + Ok(AgentTaskResult { + task_id: envelope.task_id, + status, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence, + evidence_refs: vec![format!("segment:{}", input.message_bundle.segment_id)], + next_action, + errors, + }) + } + + pub fn run_feedback_prd( + &self, + envelope: AgentTaskEnvelope, + input: FeedbackPrdInput, + ) -> Result, GtmAgentError> { + #[derive(Default)] + struct ClusterAccumulator { + frequency: u32, + segments: HashSet, + evidence_refs: Vec, + } + + let mut clusters_by_theme: BTreeMap = BTreeMap::new(); + for item in &input.feedback_items { + let theme = classify_feedback_theme(&item.text).to_string(); + let entry = clusters_by_theme.entry(theme).or_default(); + entry.frequency += 1; + if let Some(segment) = item.segment_id.as_deref() { + entry.segments.insert(segment.to_string()); + } + entry.evidence_refs.push( + item.evidence_ref + .clone() + .unwrap_or_else(|| format!("feedback:{}", item.feedback_id)), + ); + } + + let mut insight_clusters = clusters_by_theme + .into_iter() + .filter(|(_, acc)| acc.frequency as usize >= input.cluster_policy.min_cluster_size) + .map(|(theme, acc)| InsightCluster { + cluster_id: format!("cluster_{}", slugify(&theme)), + theme, + frequency: acc.frequency, + affected_segments: { + let mut segments = acc.segments.into_iter().collect::>(); + segments.sort(); + segments + }, + evidence_refs: acc.evidence_refs, + }) + .collect::>(); + insight_clusters.sort_by_key(|cluster| Reverse(cluster.frequency)); + + let mut job_stories = Vec::new(); + let mut prd_drafts = Vec::new(); + let mut priority_scores = Vec::new(); + + for cluster in &insight_clusters { + let persona = persona_for_theme(&cluster.theme); + let prd_id = format!("prd_{}", cluster.cluster_id); + let users = if cluster.affected_segments.is_empty() { + vec!["core_gtm_segment".to_string()] + } else { + cluster.affected_segments.clone() + }; + let effort = effort_for_theme(&cluster.theme); + let impact = ((cluster.frequency as f32 / 8.0) + 0.2).min(1.0); + let reach = ((users.len().max(1) as f32) / 4.0).min(1.0); + let confidence = + (0.5 + input.cluster_policy.recency_weight.clamp(0.0, 1.0) * 0.3).min(0.95); + let overall = (impact * reach * confidence) / (effort + 0.1); + + job_stories.push(JobStory { + as_persona: persona.to_string(), + when_context: format!("when {} keeps appearing in customer signals", cluster.theme), + i_want: format!("a clear fix plan for {}", cluster.theme), + so_i_can: "improve activation and pipeline quality".to_string(), + }); + prd_drafts.push(PrdDraft { + prd_id: prd_id.clone(), + problem: format!( + "{} occurred {} times across active segments", + cluster.theme, cluster.frequency + ), + users, + success_metrics: vec![ + "activation_rate_day_14".to_string(), + "meeting_to_sql_conversion".to_string(), + "support_ticket_volume".to_string(), + ], + scope: vec![ + format!("deliver solution for {}", cluster.theme), + "instrument KPI tracking".to_string(), + "publish rollout plan".to_string(), + ], + risks: derive_risks(&input, &cluster.theme), + }); + priority_scores.push(PriorityScore { + prd_id, + impact, + reach, + confidence, + effort, + overall, + }); + } + + let output = FeedbackPrdOutput { + insight_clusters, + job_stories, + prd_drafts, + priority_scores, + }; + + let mut events = Vec::new(); + events.push(self.make_event( + &envelope, + AgentId::RachelFeedbackPrdSynthesizer, + "feedback.cluster.updated", + SubjectType::Feature, + envelope.objective_id, + &serde_json::json!({ + "cluster_count": output.insight_clusters.len(), + "feedback_count": input.feedback_items.len(), + }), + )?); + for cluster in &output.insight_clusters { + events.push(self.make_event( + &envelope, + AgentId::RachelFeedbackPrdSynthesizer, + "feedback.insight.published", + SubjectType::Feature, + envelope.objective_id, + cluster, + )?); + } + for prd in &output.prd_drafts { + events.push(self.make_event( + &envelope, + AgentId::RachelFeedbackPrdSynthesizer, + "prd.draft.generated", + SubjectType::Feature, + envelope.objective_id, + prd, + )?); + } + + let partial = output.prd_drafts.is_empty(); + Ok(AgentTaskResult { + task_id: envelope.task_id, + status: if partial { + TaskStatus::Partial + } else { + TaskStatus::Succeeded + }, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence: if partial { + 0.4 + } else { + (0.55 + (input.feedback_items.len().min(12) as f32 * 0.03)).min(0.9) + }, + evidence_refs: envelope.input_refs.clone(), + next_action: if partial { + "Collect more feedback data or lower min_cluster_size to generate PRDs".to_string() + } else { + "Prioritize generated PRDs and hand off to product planning".to_string() + }, + errors: if partial { + vec!["no clusters met the minimum cluster size".to_string()] + } else { + Vec::new() + }, + }) + } + + fn make_event( + &self, + envelope: &AgentTaskEnvelope, + producer: AgentId, + event_type: &str, + subject_type: SubjectType, + subject_id: Uuid, + payload: &T, + ) -> Result { + Ok(EventEnvelope { + event_id: Uuid::new_v4(), + event_type: event_type.to_string(), + occurred_at: Utc::now(), + producer, + tenant_id: envelope.tenant_id, + subject_type, + subject_id, + schema_version: GTM_SCHEMA_VERSION.to_string(), + trace_id: envelope.trace_id, + idempotency_key: format!( + "{}:{}:{}", + envelope.idempotency_key, + producer.as_str(), + event_type + ), + payload: serde_json::to_value(payload)?, + }) + } +} + +fn claim_risk_rank(risk: ClaimRisk) -> u8 { + match risk { + ClaimRisk::Low => 0, + ClaimRisk::Medium => 1, + ClaimRisk::High => 2, + } +} + +fn select_channel(envelope: &AgentTaskEnvelope, input: &OutboundSdrInput) -> Option { + let mut candidates = Vec::new(); + if input.channel_policy.email_enabled { + candidates.push(GtmChannel::Email); + } + if input.channel_policy.linkedin_ads_enabled { + candidates.push(GtmChannel::LinkedinAds); + } + if input.channel_policy.linkedin_dm_enabled { + candidates.push(GtmChannel::LinkedinDm); + } + candidates.push(GtmChannel::HubspotWorkflow); + + candidates + .into_iter() + .find(|channel| envelope.policy_pack.allowed_channels.contains(channel)) +} + +fn score_account(account: &AccountSignal) -> (u8, Vec) { + let mut score = 0_i32; + let mut drivers = Vec::new(); + + let usage_points = (account.product_events_14d.min(12) * 3) as i32; + score += usage_points; + if usage_points > 0 { + drivers.push("strong_product_activity".to_string()); + } + if account.won_deals_12m >= account.lost_deals_12m && account.won_deals_12m > 0 { + score += 15; + drivers.push("win_rate_positive".to_string()); + } else if account.lost_deals_12m > account.won_deals_12m { + score -= 6; + } + if account.churned { + score -= 25; + drivers.push("recent_churn_signal".to_string()); + } else { + score += 12; + drivers.push("retention_signal".to_string()); + } + if account.activation_days <= 7 { + score += 14; + drivers.push("fast_activation".to_string()); + } else if account.activation_days <= 14 { + score += 8; + drivers.push("acceptable_activation".to_string()); + } + if account.support_tickets_30d <= 2 { + score += 10; + drivers.push("low_support_load".to_string()); + } else if account.support_tickets_30d >= 8 { + score -= 12; + drivers.push("high_support_load".to_string()); + } + if account.ltv_usd >= 5_000.0 { + score += 10; + drivers.push("high_ltv".to_string()); + } + if account.company_size >= 50 && account.company_size <= 500 { + score += 5; + drivers.push("target_company_size".to_string()); + } + + let clamped = score.clamp(0, 100) as u8; + (clamped, drivers) +} + +fn tier_for_score(score: u8) -> IcpTier { + match score { + 80..=100 => IcpTier::A, + 65..=79 => IcpTier::B, + 45..=64 => IcpTier::C, + _ => IcpTier::D, + } +} + +fn classify_feedback_theme(text: &str) -> &'static str { + let normalized = text.to_ascii_lowercase(); + if normalized.contains("onboard") + || normalized.contains("activation") + || normalized.contains("setup") + { + "onboarding friction" + } else if normalized.contains("integrat") + || normalized.contains("api") + || normalized.contains("webhook") + || normalized.contains("hubspot") + { + "integration gap" + } else if normalized.contains("price") + || normalized.contains("pricing") + || normalized.contains("budget") + { + "pricing objection" + } else if normalized.contains("deliverability") + || normalized.contains("spam") + || normalized.contains("unsubscribe") + { + "outbound quality" + } else { + "feature discoverability" + } +} + +fn persona_for_theme(theme: &str) -> &'static str { + match theme { + "integration gap" => "operations lead", + "pricing objection" => "budget owner", + "onboarding friction" => "new champion", + "outbound quality" => "demand generation manager", + _ => "growth manager", + } +} + +fn effort_for_theme(theme: &str) -> f32 { + match theme { + "integration gap" => 0.8, + "onboarding friction" => 0.55, + "pricing objection" => 0.45, + "outbound quality" => 0.5, + _ => 0.6, + } +} + +fn derive_risks(input: &FeedbackPrdInput, theme: &str) -> Vec { + let mut risks = Vec::new(); + if !input.product_context.constraints.is_empty() { + for constraint in input.product_context.constraints.iter().take(2) { + risks.push(format!("constraint: {}", constraint)); + } + } + risks.push(format!("insufficient telemetry for {}", theme)); + risks.push("cross-team dependency misalignment".to_string()); + risks +} + +fn slugify(value: &str) -> String { + let mut out = String::with_capacity(value.len()); + for ch in value.chars() { + if ch.is_ascii_alphanumeric() { + out.push(ch.to_ascii_lowercase()); + } else if ch.is_ascii_whitespace() || ch == '-' { + out.push('_'); + } + } + while out.contains("__") { + out = out.replace("__", "_"); + } + out.trim_matches('_').to_string() +} + +#[cfg(test)] +mod tests { + use chrono::Utc; + + use super::super::contracts::{ + AgentId, AgentTaskEnvelope, BusinessContext, ChannelPolicy, ClaimRisk, ClusterPolicy, + CurrentState, FeedbackItem, FeedbackPrdInput, FeedbackSource, MessageBundle, + MessageVariant, Objective, OrchestratorInput, OutboundSdrInput, PolicyPack, ProductContext, + ResourceLimits, SegmentContact, SequencePolicy, TaskPriority, TaskStatus, + }; + use super::*; + + fn base_envelope(agent_id: AgentId) -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(agent_id); + envelope.priority = TaskPriority::High; + envelope.policy_pack = PolicyPack::default(); + envelope.input_refs = vec!["warehouse://daily_snapshot".to_string()]; + envelope + } + + #[test] + fn orchestrator_assigns_three_phase1_agents() { + let engine = Phase1AgentEngine; + let envelope = base_envelope(AgentId::RachelOrchestrator); + let input = OrchestratorInput { + objective: Objective { + name: "Increase SQL quality".to_string(), + target_metric: "sql_conversion_rate".to_string(), + target_value: "0.24".to_string(), + due_date: None, + owner: "gtm_lead".to_string(), + }, + current_state: CurrentState::default(), + resource_limits: ResourceLimits { + daily_email_cap: 1000, + budget_cap_usd: 20_000, + human_review_capacity: 4, + }, + }; + + let result = engine.run_orchestrator(envelope, input).unwrap(); + assert_eq!(result.status, TaskStatus::Succeeded); + assert_eq!(result.output_payload.task_assignments.len(), 3); + assert!(result + .output_payload + .task_assignments + .iter() + .any(|assignment| assignment.agent_id == AgentId::RachelIcpScout)); + assert!(result + .output_payload + .task_assignments + .iter() + .any(|assignment| assignment.agent_id == AgentId::RachelOutboundSdr)); + assert!(result + .output_payload + .task_assignments + .iter() + .any(|assignment| assignment.agent_id == AgentId::RachelFeedbackPrdSynthesizer)); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "orchestrator.task.assigned")); + } + + #[test] + fn icp_scout_scores_accounts_and_promotes_segments() { + let engine = Phase1AgentEngine; + let envelope = base_envelope(AgentId::RachelIcpScout); + let input = IcpScoutInput { + accounts: vec![ + AccountSignal { + entity_id: Uuid::new_v4(), + company_size: 120, + industry: "SaaS".to_string(), + region: "US".to_string(), + product_events_14d: 14, + support_tickets_30d: 1, + won_deals_12m: 5, + lost_deals_12m: 1, + churned: false, + activation_days: 4, + ltv_usd: 9000.0, + }, + AccountSignal { + entity_id: Uuid::new_v4(), + company_size: 20, + industry: "SaaS".to_string(), + region: "US".to_string(), + product_events_14d: 6, + support_tickets_30d: 3, + won_deals_12m: 2, + lost_deals_12m: 2, + churned: false, + activation_days: 10, + ltv_usd: 3000.0, + }, + AccountSignal { + entity_id: Uuid::new_v4(), + company_size: 8, + industry: "Agency".to_string(), + region: "US".to_string(), + product_events_14d: 1, + support_tickets_30d: 10, + won_deals_12m: 0, + lost_deals_12m: 4, + churned: true, + activation_days: 45, + ltv_usd: 500.0, + }, + ], + current_segment_ids: vec!["tier_a_high_fit".to_string()], + min_sample_size: 2, + }; + + let result = engine.run_icp_scout(envelope, input).unwrap(); + assert_eq!(result.status, TaskStatus::Succeeded); + assert_eq!(result.output_payload.icp_scores.len(), 3); + assert!(result + .output_payload + .icp_scores + .iter() + .any(|score| score.tier == IcpTier::A)); + assert!(result.output_payload.segment_definitions.len() >= 2); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "icp.segment.promoted")); + } + + #[test] + fn outbound_requires_human_review_for_high_risk_copy() { + let engine = Phase1AgentEngine; + let envelope = base_envelope(AgentId::RachelOutboundSdr); + let input = OutboundSdrInput { + segment_manifest: vec![SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "prospect@example.com".to_string(), + first_name: Some("Taylor".to_string()), + job_title: Some("VP Revenue".to_string()), + company_name: Some("Acme".to_string()), + timezone: Some("America/Los_Angeles".to_string()), + }], + message_bundle: MessageBundle { + segment_id: "tier_a_high_fit".to_string(), + variants: vec![MessageVariant { + template_id: "v1".to_string(), + subject: "Guaranteed 4x pipeline in 2 weeks".to_string(), + body: "High-risk claim copy".to_string(), + claim_risk: ClaimRisk::High, + }], + }, + sequence_policy: SequencePolicy { + max_touches: 3, + cadence_days: 3, + stop_conditions: vec!["positive_reply".to_string()], + }, + channel_policy: ChannelPolicy { + email_enabled: true, + linkedin_ads_enabled: false, + linkedin_dm_enabled: false, + }, + }; + + let result = engine.run_outbound_sdr(envelope, input).unwrap(); + assert_eq!(result.status, TaskStatus::NeedsHuman); + assert!(result.output_payload.send_requests.is_empty()); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "approval.requested")); + } + + #[test] + fn feedback_prd_generates_cluster_and_prd_draft() { + let engine = Phase1AgentEngine; + let envelope = base_envelope(AgentId::RachelFeedbackPrdSynthesizer); + let now = Utc::now(); + let input = FeedbackPrdInput { + feedback_items: vec![ + FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::SupportTicket, + segment_id: Some("tier_a_high_fit".to_string()), + text: "HubSpot integration keeps failing for custom fields".to_string(), + created_at: now, + evidence_ref: Some("ticket:123".to_string()), + }, + FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::SalesCall, + segment_id: Some("tier_a_high_fit".to_string()), + text: "Need better API integration for CRM sync".to_string(), + created_at: now, + evidence_ref: Some("call:456".to_string()), + }, + FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::OutboundReply, + segment_id: Some("fast_activation".to_string()), + text: "Your integration story is unclear".to_string(), + created_at: now, + evidence_ref: Some("reply:789".to_string()), + }, + ], + product_context: ProductContext { + roadmap_refs: vec!["roadmap://q2".to_string()], + constraints: vec!["single backend engineer".to_string()], + architecture_notes: vec!["legacy sync worker".to_string()], + }, + business_context: BusinessContext { + revenue_goal: "expand enterprise ARR".to_string(), + strategic_themes: vec!["expansion".to_string()], + }, + cluster_policy: ClusterPolicy { + min_cluster_size: 2, + recency_weight: 0.7, + }, + }; + + let result = engine.run_feedback_prd(envelope, input).unwrap(); + assert_eq!(result.status, TaskStatus::Succeeded); + assert!(!result.output_payload.insight_clusters.is_empty()); + assert!(!result.output_payload.prd_drafts.is_empty()); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "prd.draft.generated")); + } + + #[test] + fn phase1_workflow_runs_all_four_agents() { + let engine = Phase1AgentEngine; + let base = base_envelope(AgentId::RachelOrchestrator); + let workflow_input = Phase1WorkflowInput { + base_envelope: base, + orchestrator: OrchestratorInput { + objective: Objective { + name: "Improve pipeline quality".to_string(), + target_metric: "meeting_to_sql".to_string(), + target_value: "0.3".to_string(), + due_date: None, + owner: "gtm_lead".to_string(), + }, + current_state: CurrentState::default(), + resource_limits: ResourceLimits { + daily_email_cap: 500, + budget_cap_usd: 10_000, + human_review_capacity: 2, + }, + }, + icp_scout: IcpScoutInput { + accounts: vec![AccountSignal { + entity_id: Uuid::new_v4(), + company_size: 75, + industry: "SaaS".to_string(), + region: "US".to_string(), + product_events_14d: 11, + support_tickets_30d: 1, + won_deals_12m: 3, + lost_deals_12m: 1, + churned: false, + activation_days: 6, + ltv_usd: 7000.0, + }], + current_segment_ids: Vec::new(), + min_sample_size: 1, + }, + outbound_sdr: OutboundSdrInput { + segment_manifest: vec![SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "prospect@company.com".to_string(), + first_name: Some("Alex".to_string()), + job_title: Some("Head of Marketing".to_string()), + company_name: Some("Company".to_string()), + timezone: None, + }], + message_bundle: MessageBundle { + segment_id: "tier_a_high_fit".to_string(), + variants: vec![MessageVariant { + template_id: "safe-v1".to_string(), + subject: "Idea to reduce time-to-value".to_string(), + body: "Low risk copy".to_string(), + claim_risk: ClaimRisk::Low, + }], + }, + sequence_policy: SequencePolicy { + max_touches: 2, + cadence_days: 3, + stop_conditions: vec!["meeting_booked".to_string()], + }, + channel_policy: ChannelPolicy { + email_enabled: true, + linkedin_ads_enabled: false, + linkedin_dm_enabled: false, + }, + }, + feedback_prd: FeedbackPrdInput { + feedback_items: vec![FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::Onboarding, + segment_id: Some("tier_a_high_fit".to_string()), + text: "Onboarding setup takes too long".to_string(), + created_at: Utc::now(), + evidence_ref: Some("onboarding:1".to_string()), + }], + product_context: ProductContext { + roadmap_refs: Vec::new(), + constraints: vec!["limited QA capacity".to_string()], + architecture_notes: Vec::new(), + }, + business_context: BusinessContext { + revenue_goal: "reduce churn".to_string(), + strategic_themes: vec!["activation".to_string()], + }, + cluster_policy: ClusterPolicy { + min_cluster_size: 1, + recency_weight: 0.5, + }, + }, + }; + + let result = engine.run_workflow(workflow_input).unwrap(); + assert_eq!(result.orchestrator.status, TaskStatus::Succeeded); + assert_eq!(result.icp_scout.status, TaskStatus::Succeeded); + assert_eq!(result.outbound_sdr.status, TaskStatus::Succeeded); + assert_eq!(result.feedback_prd.status, TaskStatus::Succeeded); + assert!(!result.events.is_empty()); + } +} diff --git a/DoWhiz_service/scheduler_module/src/gtm_agents/phase2.rs b/DoWhiz_service/scheduler_module/src/gtm_agents/phase2.rs new file mode 100644 index 00000000..025c14cd --- /dev/null +++ b/DoWhiz_service/scheduler_module/src/gtm_agents/phase2.rs @@ -0,0 +1,555 @@ +use std::cmp::Reverse; + +use chrono::Utc; +use serde::Serialize; +use uuid::Uuid; + +use super::contracts::{ + AgentId, AgentTaskEnvelope, AgentTaskResult, ApprovalRequest, AssetChannel, ContentAsset, + ContentInput, ContentOutput, EventEnvelope, FunnelStage, MessageMap, PositioningBundle, + PositioningInput, PositioningOutput, SubjectType, TaskStatus, GTM_SCHEMA_VERSION, +}; + +#[derive(Debug, thiserror::Error)] +pub enum Phase2AgentError { + #[error("event payload serialization failed: {0}")] + EventSerialization(#[from] serde_json::Error), + #[error("data contracts are not stable; phase 2 rollout remains gated")] + DataContractsNotStable, +} + +#[derive(Debug, Clone)] +pub struct Phase2WorkflowInput { + pub base_envelope: AgentTaskEnvelope, + pub data_contracts_stable: bool, + pub positioning: PositioningInput, + pub content: ContentInput, +} + +#[derive(Debug, Clone)] +pub struct Phase2WorkflowResult { + pub positioning: AgentTaskResult, + pub content: AgentTaskResult, + pub events: Vec, +} + +#[derive(Debug, Default, Clone)] +pub struct Phase2AgentEngine; + +impl Phase2AgentEngine { + pub fn run_workflow( + &self, + input: Phase2WorkflowInput, + ) -> Result { + if !input.data_contracts_stable { + return Err(Phase2AgentError::DataContractsNotStable); + } + + let positioning = self.run_positioning_pmm( + input + .base_envelope + .with_agent(AgentId::RachelPositioningPmm), + input.positioning, + )?; + + let mut content_input = input.content; + content_input.positioning_bundle = positioning.output_payload.positioning_bundle.clone(); + let content = self.run_content_studio( + input.base_envelope.with_agent(AgentId::RachelContentStudio), + content_input, + )?; + + let mut events = Vec::new(); + events.extend(positioning.emitted_events.clone()); + events.extend(content.emitted_events.clone()); + + Ok(Phase2WorkflowResult { + positioning, + content, + events, + }) + } + + pub fn run_positioning_pmm( + &self, + envelope: AgentTaskEnvelope, + input: PositioningInput, + ) -> Result, Phase2AgentError> { + let top_themes = input + .insight_clusters + .iter() + .map(|cluster| (cluster.frequency, cluster.theme.clone())) + .collect::>(); + let mut top_themes = top_themes; + top_themes.sort_by_key(|(frequency, _)| Reverse(*frequency)); + let top_theme_labels = top_themes + .into_iter() + .take(3) + .map(|(_, theme)| theme) + .collect::>(); + let proof_points = input + .prd_drafts + .iter() + .take(3) + .map(|prd| format!("PRD {} targets {}", prd.prd_id, prd.problem)) + .collect::>(); + + let message_maps = input + .segment_definitions + .iter() + .map(|segment| MessageMap { + segment_id: segment.segment_id.clone(), + value_proposition: format!( + "Help {} segment convert faster with lower GTM friction", + segment.segment_id + ), + pains: if top_theme_labels.is_empty() { + vec!["feature discoverability".to_string()] + } else { + top_theme_labels.clone() + }, + proof_points: if proof_points.is_empty() { + vec!["No PRD draft linked yet; use baseline claim-safe messaging".to_string()] + } else { + proof_points.clone() + }, + objection_handling: vec![ + "Start with one workflow and expand after KPI verification".to_string(), + "Use explicit approval gates for risky claims".to_string(), + format!("Validated against contract {}", input.data_contract_version), + ], + funnel_stage: if segment.segment_id.contains("fast_activation") { + FunnelStage::Awareness + } else if segment.segment_id.contains("tier_a") { + FunnelStage::Decision + } else { + FunnelStage::Consideration + }, + }) + .collect::>(); + + let claim_safe_list = { + let mut safe_claims = vec![ + "improve activation visibility".to_string(), + "reduce manual handoff overhead".to_string(), + "faster campaign iteration loop".to_string(), + ]; + for theme in &input.strategic_themes { + safe_claims.push(format!("aligned with strategic theme: {}", theme)); + } + safe_claims + }; + + let bundle = PositioningBundle { + bundle_id: format!("positioning-{}", envelope.task_id.simple()), + message_maps: message_maps.clone(), + claim_safe_list, + generated_at: Utc::now(), + }; + let output = PositioningOutput { + positioning_bundle: bundle, + }; + + let mut events = Vec::new(); + for map in &message_maps { + events.push(self.make_event( + &envelope, + AgentId::RachelPositioningPmm, + "positioning.message_map.generated", + SubjectType::Feature, + envelope.objective_id, + map, + )?); + } + events.push(self.make_event( + &envelope, + AgentId::RachelPositioningPmm, + "positioning.bundle.published", + SubjectType::Feature, + envelope.objective_id, + &output.positioning_bundle, + )?); + + let partial = output.positioning_bundle.message_maps.is_empty(); + Ok(AgentTaskResult { + task_id: envelope.task_id, + status: if partial { + TaskStatus::Partial + } else { + TaskStatus::Succeeded + }, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence: if partial { + 0.35 + } else { + (0.58 + (input.segment_definitions.len().min(5) as f32 * 0.06)).min(0.9) + }, + evidence_refs: envelope.input_refs.clone(), + next_action: if partial { + "Need at least one promoted segment to generate message maps".to_string() + } else { + "Publish positioning bundle for content generation".to_string() + }, + errors: if partial { + vec!["no segment definitions available for positioning".to_string()] + } else { + Vec::new() + }, + }) + } + + pub fn run_content_studio( + &self, + envelope: AgentTaskEnvelope, + input: ContentInput, + ) -> Result, Phase2AgentError> { + let no_channels = input.channels.is_empty(); + let no_message_maps = input.positioning_bundle.message_maps.is_empty(); + let asset_limit = input.max_assets_per_channel.clamp(1, 4) as usize; + + let mut errors = Vec::new(); + if no_channels { + errors.push("content channels list is empty".to_string()); + } + if no_message_maps { + errors.push("positioning bundle has no message maps".to_string()); + } + + let mut assets = Vec::new(); + if !no_channels && !no_message_maps { + for channel in &input.channels { + for (idx, message_map) in input + .positioning_bundle + .message_maps + .iter() + .take(asset_limit) + .enumerate() + { + assets.push(build_asset( + &input.positioning_bundle.bundle_id, + message_map, + *channel, + idx + 1, + )); + } + } + } + + let publish_ready = + !input.requires_human_review && !envelope.policy_pack.human_approval_required; + let output = ContentOutput { + assets, + publish_ready, + }; + + let mut events = Vec::new(); + for asset in &output.assets { + events.push(self.make_event( + &envelope, + AgentId::RachelContentStudio, + "content.asset.drafted", + SubjectType::Campaign, + envelope.objective_id, + asset, + )?); + } + if output.assets.is_empty() { + // no additional events + } else if output.publish_ready { + events.push(self.make_event( + &envelope, + AgentId::RachelContentStudio, + "content.asset.published", + SubjectType::Campaign, + envelope.objective_id, + &serde_json::json!({ + "asset_count": output.assets.len(), + "bundle_id": input.positioning_bundle.bundle_id, + }), + )?); + } else { + let approval = ApprovalRequest { + reason: "Content assets require human review before publishing".to_string(), + risk_level: "medium".to_string(), + reviewer_group: "content_ops".to_string(), + }; + events.push(self.make_event( + &envelope, + AgentId::RachelContentStudio, + "approval.requested", + SubjectType::Campaign, + envelope.objective_id, + &approval, + )?); + } + + let status = if no_channels || no_message_maps { + TaskStatus::Failed + } else if output.publish_ready { + TaskStatus::Succeeded + } else { + TaskStatus::NeedsHuman + }; + + Ok(AgentTaskResult { + task_id: envelope.task_id, + status, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence: match status { + TaskStatus::Failed => 0.3, + TaskStatus::NeedsHuman => 0.65, + _ => 0.82, + }, + evidence_refs: envelope.input_refs.clone(), + next_action: match status { + TaskStatus::Failed => { + "Provide valid channels and positioning message maps for content generation" + .to_string() + } + TaskStatus::NeedsHuman => { + "Collect human review approval then publish drafted assets".to_string() + } + _ => "Distribute published content assets by channel".to_string(), + }, + errors, + }) + } + + fn make_event( + &self, + envelope: &AgentTaskEnvelope, + producer: AgentId, + event_type: &str, + subject_type: SubjectType, + subject_id: Uuid, + payload: &T, + ) -> Result { + Ok(EventEnvelope { + event_id: Uuid::new_v4(), + event_type: event_type.to_string(), + occurred_at: Utc::now(), + producer, + tenant_id: envelope.tenant_id, + subject_type, + subject_id, + schema_version: GTM_SCHEMA_VERSION.to_string(), + trace_id: envelope.trace_id, + idempotency_key: format!( + "{}:{}:{}", + envelope.idempotency_key, + producer.as_str(), + event_type + ), + payload: serde_json::to_value(payload)?, + }) + } +} + +fn build_asset( + bundle_id: &str, + message_map: &MessageMap, + channel: AssetChannel, + ordinal: usize, +) -> ContentAsset { + let channel_name = match channel { + AssetChannel::Email => "email", + AssetChannel::LandingPage => "landing", + AssetChannel::LinkedinAd => "linkedin-ad", + AssetChannel::SalesOnePager => "sales-one-pager", + }; + + let title = match channel { + AssetChannel::Email => format!("{}: concise intro", message_map.segment_id), + AssetChannel::LandingPage => format!("{}: value narrative", message_map.segment_id), + AssetChannel::LinkedinAd => format!("{}: social proof angle", message_map.segment_id), + AssetChannel::SalesOnePager => format!("{}: enablement brief", message_map.segment_id), + }; + let body = format!( + "Value: {}.\nPain: {}.\nProof: {}.", + message_map.value_proposition, + message_map + .pains + .first() + .cloned() + .unwrap_or_else(|| "unspecified".to_string()), + message_map + .proof_points + .first() + .cloned() + .unwrap_or_else(|| "pending proof point".to_string()) + ); + + ContentAsset { + asset_id: format!( + "{}-{}-{}-{}", + bundle_id, channel_name, message_map.segment_id, ordinal + ), + channel, + segment_id: message_map.segment_id.clone(), + title, + body, + cta: "Book a 20-minute workflow fit session".to_string(), + } +} + +#[cfg(test)] +mod tests { + use chrono::Utc; + + use super::super::contracts::{ + AgentId, AgentTaskEnvelope, AssetChannel, ContentInput, FunnelStage, InsightCluster, + MessageMap, PolicyPack, PositioningBundle, PositioningInput, PrdDraft, SegmentDefinition, + TaskPriority, TaskStatus, + }; + use super::*; + + fn base_envelope(agent_id: AgentId) -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(agent_id); + envelope.priority = TaskPriority::High; + envelope.policy_pack = PolicyPack::default(); + envelope.input_refs = vec!["warehouse://contract_snapshot".to_string()]; + envelope + } + + fn positioning_input() -> PositioningInput { + PositioningInput { + segment_definitions: vec![ + SegmentDefinition { + segment_id: "tier_a_high_fit".to_string(), + rule_dsl: "icp_tier == 'A'".to_string(), + expected_lift: 1.32, + confidence: 0.81, + }, + SegmentDefinition { + segment_id: "fast_activation".to_string(), + rule_dsl: "activation_days <= 7".to_string(), + expected_lift: 1.2, + confidence: 0.75, + }, + ], + insight_clusters: vec![InsightCluster { + cluster_id: "cluster_integration_gap".to_string(), + theme: "integration gap".to_string(), + frequency: 4, + affected_segments: vec!["tier_a_high_fit".to_string()], + evidence_refs: vec!["ticket:123".to_string()], + }], + prd_drafts: vec![PrdDraft { + prd_id: "prd_cluster_integration_gap".to_string(), + problem: "integration gap occurred often".to_string(), + users: vec!["tier_a_high_fit".to_string()], + success_metrics: vec!["activation_rate_day_14".to_string()], + scope: vec!["hubspot sync reliability".to_string()], + risks: vec!["dependency risk".to_string()], + }], + strategic_themes: vec!["activation".to_string()], + data_contract_version: "1.0".to_string(), + } + } + + #[test] + fn positioning_generates_bundle_and_events() { + let engine = Phase2AgentEngine; + let envelope = base_envelope(AgentId::RachelPositioningPmm); + let result = engine + .run_positioning_pmm(envelope, positioning_input()) + .unwrap(); + assert_eq!(result.status, TaskStatus::Succeeded); + assert_eq!( + result.output_payload.positioning_bundle.message_maps.len(), + 2 + ); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "positioning.bundle.published")); + } + + #[test] + fn content_requires_human_when_policy_gate_enabled() { + let engine = Phase2AgentEngine; + let mut envelope = base_envelope(AgentId::RachelContentStudio); + envelope.policy_pack.human_approval_required = true; + let input = ContentInput { + positioning_bundle: PositioningBundle { + bundle_id: "positioning-x".to_string(), + message_maps: vec![MessageMap { + segment_id: "tier_a_high_fit".to_string(), + value_proposition: "Speed up GTM execution".to_string(), + pains: vec!["integration gap".to_string()], + proof_points: vec!["PRD linked".to_string()], + objection_handling: vec!["Start small".to_string()], + funnel_stage: FunnelStage::Decision, + }], + claim_safe_list: vec!["activation visibility".to_string()], + generated_at: Utc::now(), + }, + channels: vec![AssetChannel::Email], + max_assets_per_channel: 2, + requires_human_review: false, + }; + let result = engine.run_content_studio(envelope, input).unwrap(); + assert_eq!(result.status, TaskStatus::NeedsHuman); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "approval.requested")); + } + + #[test] + fn phase2_workflow_enforces_contract_stability_gate() { + let engine = Phase2AgentEngine; + let workflow = Phase2WorkflowInput { + base_envelope: base_envelope(AgentId::RachelOrchestrator), + data_contracts_stable: false, + positioning: positioning_input(), + content: ContentInput { + positioning_bundle: PositioningBundle { + bundle_id: "placeholder".to_string(), + message_maps: Vec::new(), + claim_safe_list: Vec::new(), + generated_at: Utc::now(), + }, + channels: vec![AssetChannel::Email], + max_assets_per_channel: 1, + requires_human_review: true, + }, + }; + let error = engine.run_workflow(workflow).unwrap_err(); + assert!(matches!(error, Phase2AgentError::DataContractsNotStable)); + } + + #[test] + fn phase2_workflow_runs_positioning_then_content() { + let engine = Phase2AgentEngine; + let workflow = Phase2WorkflowInput { + base_envelope: base_envelope(AgentId::RachelOrchestrator), + data_contracts_stable: true, + positioning: positioning_input(), + content: ContentInput { + positioning_bundle: PositioningBundle { + bundle_id: "placeholder".to_string(), + message_maps: Vec::new(), + claim_safe_list: Vec::new(), + generated_at: Utc::now(), + }, + channels: vec![AssetChannel::Email, AssetChannel::LinkedinAd], + max_assets_per_channel: 2, + requires_human_review: false, + }, + }; + + let result = engine.run_workflow(workflow).unwrap(); + assert_eq!(result.positioning.status, TaskStatus::Succeeded); + assert_eq!(result.content.status, TaskStatus::Succeeded); + assert!(!result.content.output_payload.assets.is_empty()); + assert!(result + .events + .iter() + .any(|event| event.event_type == "content.asset.published")); + } +} diff --git a/DoWhiz_service/scheduler_module/src/gtm_agents/phase3.rs b/DoWhiz_service/scheduler_module/src/gtm_agents/phase3.rs new file mode 100644 index 00000000..5633f41a --- /dev/null +++ b/DoWhiz_service/scheduler_module/src/gtm_agents/phase3.rs @@ -0,0 +1,578 @@ +use chrono::Utc; +use serde::Serialize; +use uuid::Uuid; + +use super::contracts::{ + AgentId, AgentTaskEnvelope, AgentTaskResult, ApprovalRequest, EventEnvelope, ExperimentDesign, + ExperimentInput, ExperimentOutput, ExperimentRecommendation, ExperimentResultSummary, + FeedbackItem, FeedbackSource, OnboardingInput, OnboardingMilestone, OnboardingOutput, + OnboardingRiskFlag, RiskSeverity, SubjectType, TaskStatus, GTM_SCHEMA_VERSION, +}; + +#[derive(Debug, thiserror::Error)] +pub enum Phase3AgentError { + #[error("event payload serialization failed: {0}")] + EventSerialization(#[from] serde_json::Error), + #[error("reliable metrics are required before phase 3 rollout")] + MetricsNotReliable, +} + +#[derive(Debug, Clone)] +pub struct Phase3WorkflowInput { + pub base_envelope: AgentTaskEnvelope, + pub metrics_reliable: bool, + pub onboarding: OnboardingInput, + pub experiment: ExperimentInput, +} + +#[derive(Debug, Clone)] +pub struct Phase3WorkflowResult { + pub onboarding: AgentTaskResult, + pub experiment: AgentTaskResult, + pub events: Vec, +} + +#[derive(Debug, Default, Clone)] +pub struct Phase3AgentEngine; + +impl Phase3AgentEngine { + pub fn run_workflow( + &self, + input: Phase3WorkflowInput, + ) -> Result { + if !input.metrics_reliable { + return Err(Phase3AgentError::MetricsNotReliable); + } + + let onboarding = self.run_onboarding_csm( + input.base_envelope.with_agent(AgentId::RachelOnboardingCsm), + input.onboarding, + )?; + let experiment = self.run_experiment_analyst( + input + .base_envelope + .with_agent(AgentId::RachelExperimentAnalyst), + input.experiment, + )?; + + let mut events = Vec::new(); + events.extend(onboarding.emitted_events.clone()); + events.extend(experiment.emitted_events.clone()); + + Ok(Phase3WorkflowResult { + onboarding, + experiment, + events, + }) + } + + pub fn run_onboarding_csm( + &self, + envelope: AgentTaskEnvelope, + input: OnboardingInput, + ) -> Result, Phase3AgentError> { + let onboarding_plan = vec![ + OnboardingMilestone { + milestone_id: format!("{}-kickoff", input.customer_id.simple()), + name: "Kickoff and success criteria alignment".to_string(), + due_in_days: 2, + owner_role: "csm".to_string(), + success_criteria: "Mutual action plan approved with target KPI and stakeholders" + .to_string(), + }, + OnboardingMilestone { + milestone_id: format!("{}-activation", input.customer_id.simple()), + name: "Activation workflow completion".to_string(), + due_in_days: 7, + owner_role: "implementation_specialist".to_string(), + success_criteria: "Primary workflow active and first business outcome observed" + .to_string(), + }, + OnboardingMilestone { + milestone_id: format!("{}-qbr", input.customer_id.simple()), + name: "First value review".to_string(), + due_in_days: 21, + owner_role: "csm".to_string(), + success_criteria: "QBR completed with next-quarter expansion hypothesis" + .to_string(), + }, + ]; + + let mut activation_risk_flags = Vec::new(); + let activation_gap = input.target_activation_rate - input.current_activation_rate; + if activation_gap > 0.2 { + activation_risk_flags.push(OnboardingRiskFlag { + code: "activation_gap_high".to_string(), + severity: RiskSeverity::High, + summary: format!( + "Activation rate ({:.2}) is far below target ({:.2})", + input.current_activation_rate, input.target_activation_rate + ), + mitigation: + "Run weekly unblock review and fast-track implementation specialist support" + .to_string(), + }); + } else if activation_gap > 0.1 { + activation_risk_flags.push(OnboardingRiskFlag { + code: "activation_gap_moderate".to_string(), + severity: RiskSeverity::Medium, + summary: format!( + "Activation rate ({:.2}) is below target ({:.2})", + input.current_activation_rate, input.target_activation_rate + ), + mitigation: "Prioritize setup checklist and tighten onboarding cadence".to_string(), + }); + } + for blocker in &input.known_blockers { + activation_risk_flags.push(OnboardingRiskFlag { + code: format!("blocker_{}", slugify(blocker)), + severity: RiskSeverity::Medium, + summary: blocker.clone(), + mitigation: "Assign owner and due date in weekly onboarding stand-up".to_string(), + }); + } + + let mut captured_feedback = Vec::new(); + for blocker in input.known_blockers.iter().take(5) { + captured_feedback.push(FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::Onboarding, + segment_id: Some(input.segment_id.clone()), + text: blocker.clone(), + created_at: Utc::now(), + evidence_ref: Some(format!("onboarding://{}", slugify(blocker))), + }); + } + if let Some(handoff) = input.handoff_summary.as_deref() { + captured_feedback.push(FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::Onboarding, + segment_id: Some(input.segment_id.clone()), + text: format!("handoff context: {}", handoff), + created_at: Utc::now(), + evidence_ref: Some("handoff://outbound_or_sales".to_string()), + }); + } + + let qbr_summary = format!( + "Account {} onboarding focus: {}. Goals: {}.", + input.account_name, + if activation_risk_flags.is_empty() { + "stabilize and expand usage" + } else { + "close activation gap and remove blockers" + }, + if input.customer_goals.is_empty() { + "no explicit goals captured".to_string() + } else { + input.customer_goals.join("; ") + } + ); + + let output = OnboardingOutput { + onboarding_plan, + activation_risk_flags, + captured_feedback, + qbr_summary, + }; + + let mut events = Vec::new(); + events.push(self.make_event( + &envelope, + AgentId::RachelOnboardingCsm, + "onboarding.plan.started", + SubjectType::Account, + input.customer_id, + &serde_json::json!({ + "milestone_count": output.onboarding_plan.len(), + "segment_id": input.segment_id, + }), + )?); + for risk in &output.activation_risk_flags { + events.push(self.make_event( + &envelope, + AgentId::RachelOnboardingCsm, + "onboarding.risk.flagged", + SubjectType::Account, + input.customer_id, + risk, + )?); + } + for feedback in &output.captured_feedback { + events.push(self.make_event( + &envelope, + AgentId::RachelOnboardingCsm, + "onboarding.feedback.captured", + SubjectType::Account, + input.customer_id, + feedback, + )?); + } + + let status = if output.onboarding_plan.is_empty() { + TaskStatus::Failed + } else if !output.activation_risk_flags.is_empty() + && output + .activation_risk_flags + .iter() + .any(|risk| risk.severity == RiskSeverity::High) + { + TaskStatus::NeedsHuman + } else { + TaskStatus::Succeeded + }; + + Ok(AgentTaskResult { + task_id: envelope.task_id, + status, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence: match status { + TaskStatus::Failed => 0.3, + TaskStatus::NeedsHuman => 0.64, + _ => 0.83, + }, + evidence_refs: envelope.input_refs.clone(), + next_action: match status { + TaskStatus::Failed => { + "Provide onboarding inputs and retry plan generation".to_string() + } + TaskStatus::NeedsHuman => { + "Escalate high-risk onboarding account for manual intervention".to_string() + } + _ => "Execute onboarding milestones and monitor activation weekly".to_string(), + }, + errors: Vec::new(), + }) + } + + pub fn run_experiment_analyst( + &self, + envelope: AgentTaskEnvelope, + input: ExperimentInput, + ) -> Result, Phase3AgentError> { + let safe_baseline = input.baseline_value.max(0.0001); + let uplift_ratio = (input.observed_value - safe_baseline) / safe_baseline; + let sample_ready = input.sample_size >= input.min_sample_size; + let confidence = input.confidence_estimate.clamp(0.0, 1.0); + let reliable = sample_ready && confidence >= 0.7; + + let experiment_design = ExperimentDesign { + experiment_id: format!("exp-{}", envelope.task_id.simple()), + hypothesis: format!( + "Improving {} should increase {}", + input.experiment_name, input.primary_metric + ), + success_metric: input.primary_metric.clone(), + guardrails: vec![ + "do not increase unsubscribe_rate".to_string(), + "hold support_ticket_volume within baseline tolerance".to_string(), + "keep CAC within approved budget band".to_string(), + ], + segments: input.segment_ids.clone(), + }; + + let result_summary = ExperimentResultSummary { + experiment_id: experiment_design.experiment_id.clone(), + uplift_ratio, + statistically_reliable: reliable, + confidence_estimate: confidence, + sample_size: input.sample_size, + }; + + let mut recommendations = Vec::new(); + if reliable && uplift_ratio > 0.05 { + recommendations.push(ExperimentRecommendation { + action: "scale_winning_variant".to_string(), + owner: "growth_ops".to_string(), + rationale: "Reliable positive uplift observed with sufficient sample".to_string(), + expected_impact: format!("{:.1}% primary metric uplift", uplift_ratio * 100.0), + }); + } else if reliable && uplift_ratio < -0.03 { + recommendations.push(ExperimentRecommendation { + action: "rollback_and_retest".to_string(), + owner: "pmm".to_string(), + rationale: "Reliable negative movement against baseline".to_string(), + expected_impact: "Recover baseline conversion performance".to_string(), + }); + } else { + recommendations.push(ExperimentRecommendation { + action: "continue_data_collection".to_string(), + owner: "experiment_analyst".to_string(), + rationale: "Need stronger statistical signal or larger sample".to_string(), + expected_impact: "Increase confidence for next decision cycle".to_string(), + }); + } + if !input.adoption_signals.is_empty() { + let avg_delta = input + .adoption_signals + .iter() + .map(|signal| signal.after_rate - signal.before_rate) + .sum::() + / input.adoption_signals.len() as f32; + recommendations.push(ExperimentRecommendation { + action: "feature_adoption_followup".to_string(), + owner: "product_growth".to_string(), + rationale: "Adoption deltas provide additional context for GTM impact".to_string(), + expected_impact: format!("{:.1}% mean adoption delta", avg_delta * 100.0), + }); + } + + let output = ExperimentOutput { + experiment_design, + result_summary, + recommendations, + }; + + let mut events = Vec::new(); + events.push(self.make_event( + &envelope, + AgentId::RachelExperimentAnalyst, + "experiment.started", + SubjectType::Campaign, + envelope.objective_id, + &output.experiment_design, + )?); + events.push(self.make_event( + &envelope, + AgentId::RachelExperimentAnalyst, + "experiment.result.published", + SubjectType::Campaign, + envelope.objective_id, + &output.result_summary, + )?); + for recommendation in &output.recommendations { + events.push(self.make_event( + &envelope, + AgentId::RachelExperimentAnalyst, + "experiment.recommendation.issued", + SubjectType::Campaign, + envelope.objective_id, + recommendation, + )?); + } + + let needs_human = !output.result_summary.statistically_reliable; + if needs_human { + let approval = ApprovalRequest { + reason: "Metrics reliability below threshold for autonomous scaling".to_string(), + risk_level: "medium".to_string(), + reviewer_group: "revops".to_string(), + }; + events.push(self.make_event( + &envelope, + AgentId::RachelExperimentAnalyst, + "approval.requested", + SubjectType::Campaign, + envelope.objective_id, + &approval, + )?); + } + + let status = if needs_human { + TaskStatus::NeedsHuman + } else { + TaskStatus::Succeeded + }; + + Ok(AgentTaskResult { + task_id: envelope.task_id, + status, + schema_version: GTM_SCHEMA_VERSION.to_string(), + output_payload: output, + emitted_events: events, + confidence: if needs_human { + 0.62 + } else { + confidence.max(0.75) + }, + evidence_refs: envelope.input_refs.clone(), + next_action: if needs_human { + "Review experiment reliability and decide whether to extend sample window" + .to_string() + } else { + "Apply experiment recommendation and schedule next validation cycle".to_string() + }, + errors: Vec::new(), + }) + } + + fn make_event( + &self, + envelope: &AgentTaskEnvelope, + producer: AgentId, + event_type: &str, + subject_type: SubjectType, + subject_id: Uuid, + payload: &T, + ) -> Result { + Ok(EventEnvelope { + event_id: Uuid::new_v4(), + event_type: event_type.to_string(), + occurred_at: Utc::now(), + producer, + tenant_id: envelope.tenant_id, + subject_type, + subject_id, + schema_version: GTM_SCHEMA_VERSION.to_string(), + trace_id: envelope.trace_id, + idempotency_key: format!( + "{}:{}:{}", + envelope.idempotency_key, + producer.as_str(), + event_type + ), + payload: serde_json::to_value(payload)?, + }) + } +} + +fn slugify(value: &str) -> String { + let mut out = String::with_capacity(value.len()); + for ch in value.chars() { + if ch.is_ascii_alphanumeric() { + out.push(ch.to_ascii_lowercase()); + } else if ch.is_ascii_whitespace() || ch == '-' { + out.push('_'); + } + } + while out.contains("__") { + out = out.replace("__", "_"); + } + out.trim_matches('_').to_string() +} + +#[cfg(test)] +mod tests { + use super::super::contracts::{ + AgentId, AgentTaskEnvelope, CampaignPerformance, ExperimentInput, FeatureAdoptionSignal, + OnboardingInput, PolicyPack, RiskSeverity, TaskPriority, TaskStatus, + }; + use super::*; + + fn base_envelope(agent_id: AgentId) -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(agent_id); + envelope.priority = TaskPriority::High; + envelope.policy_pack = PolicyPack::default(); + envelope.input_refs = vec!["warehouse://metrics_snapshot".to_string()]; + envelope + } + + fn onboarding_input() -> OnboardingInput { + OnboardingInput { + customer_id: Uuid::new_v4(), + account_name: "Acme Corp".to_string(), + segment_id: "tier_a_high_fit".to_string(), + customer_goals: vec![ + "launch first outbound workflow".to_string(), + "reach activation milestone in two weeks".to_string(), + ], + known_blockers: vec!["CRM field mapping incomplete".to_string()], + current_activation_rate: 0.52, + target_activation_rate: 0.72, + handoff_summary: Some("High intent from outbound SDR handoff".to_string()), + } + } + + fn experiment_input() -> ExperimentInput { + ExperimentInput { + experiment_name: "new onboarding sequence".to_string(), + primary_metric: "activation_rate_day_14".to_string(), + baseline_value: 0.42, + observed_value: 0.50, + sample_size: 320, + min_sample_size: 200, + confidence_estimate: 0.83, + segment_ids: vec!["tier_a_high_fit".to_string()], + campaign_results: vec![CampaignPerformance { + campaign_id: "cmp-001".to_string(), + segment_id: "tier_a_high_fit".to_string(), + spend_usd: 4500.0, + impressions: 120_000, + clicks: 3_200, + meetings: 210, + sqls: 62, + }], + adoption_signals: vec![FeatureAdoptionSignal { + feature_name: "workflow_builder".to_string(), + before_rate: 0.33, + after_rate: 0.49, + }], + } + } + + #[test] + fn onboarding_generates_plan_and_risk_flags() { + let engine = Phase3AgentEngine; + let result = engine + .run_onboarding_csm( + base_envelope(AgentId::RachelOnboardingCsm), + onboarding_input(), + ) + .unwrap(); + assert_eq!(result.status, TaskStatus::NeedsHuman); + assert!(!result.output_payload.onboarding_plan.is_empty()); + assert!(result + .output_payload + .activation_risk_flags + .iter() + .any(|risk| risk.severity == RiskSeverity::Medium)); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "onboarding.plan.started")); + } + + #[test] + fn experiment_publishes_recommendations() { + let engine = Phase3AgentEngine; + let result = engine + .run_experiment_analyst( + base_envelope(AgentId::RachelExperimentAnalyst), + experiment_input(), + ) + .unwrap(); + assert_eq!(result.status, TaskStatus::Succeeded); + assert!(result.output_payload.result_summary.statistically_reliable); + assert!(result + .emitted_events + .iter() + .any(|event| event.event_type == "experiment.result.published")); + } + + #[test] + fn phase3_workflow_requires_reliable_metrics_gate() { + let engine = Phase3AgentEngine; + let workflow = Phase3WorkflowInput { + base_envelope: base_envelope(AgentId::RachelOrchestrator), + metrics_reliable: false, + onboarding: onboarding_input(), + experiment: experiment_input(), + }; + let error = engine.run_workflow(workflow).unwrap_err(); + assert!(matches!(error, Phase3AgentError::MetricsNotReliable)); + } + + #[test] + fn phase3_workflow_runs_onboarding_and_experiment() { + let engine = Phase3AgentEngine; + let workflow = Phase3WorkflowInput { + base_envelope: base_envelope(AgentId::RachelOrchestrator), + metrics_reliable: true, + onboarding: onboarding_input(), + experiment: experiment_input(), + }; + + let result = engine.run_workflow(workflow).unwrap(); + assert!(!result.events.is_empty()); + assert!(result + .events + .iter() + .any(|event| event.event_type == "onboarding.feedback.captured")); + assert!(result + .events + .iter() + .any(|event| event.event_type == "experiment.recommendation.issued")); + } +} diff --git a/DoWhiz_service/scheduler_module/src/lib.rs b/DoWhiz_service/scheduler_module/src/lib.rs index 39b1a9fa..1757d0d8 100644 --- a/DoWhiz_service/scheduler_module/src/lib.rs +++ b/DoWhiz_service/scheduler_module/src/lib.rs @@ -8,6 +8,7 @@ pub mod env_alias; pub mod google_auth; pub mod google_docs_poller; pub mod google_workspace_poller; +pub mod gtm_agents; pub mod ingestion; pub mod ingestion_queue; pub mod mailbox; diff --git a/DoWhiz_service/scheduler_module/src/service/auth.rs b/DoWhiz_service/scheduler_module/src/service/auth.rs index bd41296d..a1edad45 100644 --- a/DoWhiz_service/scheduler_module/src/service/auth.rs +++ b/DoWhiz_service/scheduler_module/src/service/auth.rs @@ -4,7 +4,8 @@ use axum::response::{IntoResponse, Redirect}; use axum::routing::{delete, get, post}; use axum::{Json, Router}; use base64::Engine; -use jsonwebtoken::{decode, DecodingKey, Validation, Algorithm}; +use chrono::Utc; +use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation}; use serde::{Deserialize, Serialize}; use std::sync::Arc; use tokio::task; @@ -13,6 +14,12 @@ use uuid::Uuid; use crate::account_store::{AccountStore, AccountStoreError}; use crate::blob_store::BlobStore; +use crate::gtm_agents::{ + AccountSignal, AgentId, AgentTaskEnvelope, ChannelPolicy, ClaimRisk, GtmChannel, + HubspotModeAExecutor, IcpScoutInput, IcpTier, MessageBundle, MessageVariant, ModeAAgentEngine, + ModeAOutboundDispatchInput, ModeAOutboundDispatchOutput, OutboundSdrInput, Phase1AgentEngine, + PolicyPack, SegmentContact, SequencePolicy, TaskPriority, +}; use crate::user_store::UserStore; use crate::{load_tasks_with_status, TaskStatusSummary}; @@ -40,8 +47,8 @@ pub struct AuthState { /// JWT Claims from Supabase token #[derive(Debug, Deserialize)] struct JwtClaims { - sub: Uuid, // User ID - exp: usize, // Expiration time + sub: Uuid, // User ID + exp: usize, // Expiration time #[serde(default)] aud: Option, // Audience (optional) #[serde(default)] @@ -144,6 +151,62 @@ fn extract_bearer_token(headers: &HeaderMap) -> Option { .map(|s| s.to_string()) } +async fn resolve_authenticated_account_id( + state: &AuthState, + headers: &HeaderMap, +) -> Result { + let token = match extract_bearer_token(headers) { + Some(t) => t, + None => { + return Err(( + StatusCode::UNAUTHORIZED, + Json(serde_json::json!({ + "error": "Missing Authorization header" + })), + ) + .into_response()); + } + }; + + let auth_user = match validate_supabase_token(&state.supabase_url, &token).await { + Ok(user) => user, + Err((status, msg)) => { + return Err((status, Json(serde_json::json!({ "error": msg }))).into_response()); + } + }; + + let store = state.account_store.clone(); + let account_lookup = task::spawn_blocking(move || store.get_account_by_auth_user(auth_user.id)) + .await + .map_err(|err| { + error!("spawn_blocking panicked: {}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({ "error": "Internal error" })), + ) + .into_response() + })?; + + match account_lookup { + Ok(Some(account)) => Ok(account.id), + Ok(None) => Err(( + StatusCode::NOT_FOUND, + Json(serde_json::json!({ + "error": "No DoWhiz account found. Complete sign-in again to provision account." + })), + ) + .into_response()), + Err(err) => { + error!("Failed to resolve account: {}", err); + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({ "error": "Database error" })), + ) + .into_response()) + } + } +} + // ============================================================================ // Signup // ============================================================================ @@ -503,17 +566,16 @@ pub async fn link_identifier( let frontend_url = state.frontend_url.clone(); // Create verification token - let token_result = task::spawn_blocking(move || { - store.create_email_verification_token(account_id, &email) - }) - .await - .map_err(|e| { - error!("spawn_blocking panicked: {}", e); - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(serde_json::json!({ "error": "Internal error" })), - ) - }); + let token_result = + task::spawn_blocking(move || store.create_email_verification_token(account_id, &email)) + .await + .map_err(|e| { + error!("spawn_blocking panicked: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({ "error": "Internal error" })), + ) + }); let verification_token = match token_result { Ok(Ok(token)) => token, @@ -1373,7 +1435,10 @@ pub async fn discord_oauth_callback( .get("https://discord.com/api/users/@me") .header( "Authorization", - format!("{} {}", discord_token.token_type, discord_token.access_token), + format!( + "{} {}", + discord_token.token_type, discord_token.access_token + ), ) .send() .await; @@ -1424,10 +1489,9 @@ pub async fn discord_oauth_callback( // Link Discord ID to account let store = state.account_store.clone(); let discord_id = discord_user.id.clone(); - let link_result = task::spawn_blocking(move || { - store.create_identifier(account.id, "discord", &discord_id) - }) - .await; + let link_result = + task::spawn_blocking(move || store.create_identifier(account.id, "discord", &discord_id)) + .await; match link_result { Ok(Ok(_identifier)) => { @@ -1610,7 +1674,9 @@ pub async fn slack_oauth_callback( // Check if Slack returned an error if !slack_response.ok { - let error_msg = slack_response.error.unwrap_or_else(|| "unknown".to_string()); + let error_msg = slack_response + .error + .unwrap_or_else(|| "unknown".to_string()); error!("Slack OAuth error: {}", error_msg); return redirect_to(&format!( "/auth/index.html?slack=error&reason={}", @@ -1655,10 +1721,8 @@ pub async fn slack_oauth_callback( // Link Slack ID to account let store = state.account_store.clone(); let slack_id = slack_user.id.clone(); - let link_result = task::spawn_blocking(move || { - store.create_identifier(account.id, "slack", &slack_id) - }) - .await; + let link_result = + task::spawn_blocking(move || store.create_identifier(account.id, "slack", &slack_id)).await; match link_result { Ok(Ok(_identifier)) => { @@ -1684,11 +1748,14 @@ pub async fn slack_oauth_callback( // ============================================================================ /// Send a verification email with a magic link -async fn send_verification_email(email: &str, verify_url: &str) -> Result<(), Box> { +async fn send_verification_email( + email: &str, + verify_url: &str, +) -> Result<(), Box> { let postmark_token = std::env::var("POSTMARK_SERVER_TOKEN") .map_err(|_| "POSTMARK_SERVER_TOKEN not configured")?; - let from_email = std::env::var("POSTMARK_FROM_EMAIL") - .unwrap_or_else(|_| "noreply@dowhiz.com".to_string()); + let from_email = + std::env::var("POSTMARK_FROM_EMAIL").unwrap_or_else(|_| "noreply@dowhiz.com".to_string()); let html_body = format!( r#" @@ -1780,9 +1847,7 @@ pub async fn verify_email( error!("Failed to verify email: {}", e); redirect_to("/auth/index.html?email_verified=error&reason=database_error") } - Err(_) => { - redirect_to("/auth/index.html?email_verified=error&reason=internal_error") - } + Err(_) => redirect_to("/auth/index.html?email_verified=error&reason=internal_error"), } } @@ -1892,6 +1957,462 @@ pub async fn get_tasks( (StatusCode::OK, Json(TasksResponse { tasks })).into_response() } +// ============================================================================ +// GTM Mode A API +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct GtmIcpPreviewRequest { + pub leads: Vec, + pub min_sample_size: Option, +} + +#[derive(Debug, Deserialize)] +pub struct GtmLeadInput { + pub recipient_id: Option, + pub account_id: Option, + pub email: String, + pub first_name: Option, + pub job_title: Option, + pub company_name: Option, + pub timezone: Option, + pub company_size: Option, + pub industry: Option, + pub region: Option, + pub product_events_14d: Option, + pub support_tickets_30d: Option, + pub won_deals_12m: Option, + pub lost_deals_12m: Option, + pub churned: Option, + pub activation_days: Option, + pub ltv_usd: Option, +} + +#[derive(Debug, Serialize)] +pub struct GtmIcpPreviewLead { + pub recipient_id: Uuid, + pub account_id: Uuid, + pub email: String, + pub first_name: Option, + pub job_title: Option, + pub company_name: Option, + pub timezone: Option, + pub company_size: u32, + pub industry: String, + pub region: String, + pub score_0_100: u8, + pub tier: IcpTier, + pub recommended: bool, + pub top_drivers: Vec, +} + +#[derive(Debug, Serialize)] +pub struct GtmIcpPreviewResponse { + pub status: String, + pub errors: Vec, + pub segment_definitions: Vec, + pub anti_icp_rules: Vec, + pub leads: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct GtmOutboundPlanRequest { + pub leads: Vec, + pub segment_id: Option, + pub message_subject: String, + pub message_body: String, + pub claim_risk: Option, + pub max_touches: Option, + pub cadence_days: Option, + pub stop_conditions: Option>, + pub approval_required: Option, + pub assignee_team: Option, + pub reviewer_group: Option, +} + +#[derive(Debug, Deserialize)] +pub struct GtmOutboundLead { + #[serde(default = "default_true")] + pub selected: bool, + pub recipient_id: Option, + pub account_id: Option, + pub email: String, + pub first_name: Option, + pub job_title: Option, + pub company_name: Option, + pub timezone: Option, +} + +#[derive(Debug, Serialize)] +pub struct GtmOutboundPlanResponse { + pub outbound_status: String, + pub outbound_errors: Vec, + pub dispatch_status: String, + pub dispatch_errors: Vec, + pub mode_a_output: ModeAOutboundDispatchOutput, +} + +#[derive(Debug, Deserialize)] +pub struct GtmHubspotPushRequest { + pub mode_a_output: ModeAOutboundDispatchOutput, +} + +#[derive(Debug, Serialize)] +pub struct GtmHubspotPushResponse { + pub report: crate::gtm_agents::HubspotDispatchReport, +} + +pub async fn gtm_icp_preview( + State(state): State, + headers: HeaderMap, + Json(payload): Json, +) -> impl IntoResponse { + let account_id = match resolve_authenticated_account_id(&state, &headers).await { + Ok(id) => id, + Err(response) => return response, + }; + + if payload.leads.is_empty() { + return ( + StatusCode::BAD_REQUEST, + Json(serde_json::json!({ + "error": "At least one lead is required" + })), + ) + .into_response(); + } + + let engine = Phase1AgentEngine; + let mut envelope = AgentTaskEnvelope::new(AgentId::RachelOrchestrator); + envelope.tenant_id = account_id; + envelope.priority = TaskPriority::High; + envelope.input_refs = vec!["ui://gtm_linkedin_console".to_string()]; + + let mut leads = Vec::new(); + let mut accounts = Vec::new(); + for lead in payload.leads { + let normalized_email = lead.email.trim().to_lowercase(); + if normalized_email.is_empty() { + continue; + } + let account_id = lead.account_id.unwrap_or_else(Uuid::new_v4); + let recipient_id = lead.recipient_id.unwrap_or_else(Uuid::new_v4); + let company_size = lead.company_size.unwrap_or(50); + let industry = lead.industry.unwrap_or_else(|| "unknown".to_string()); + let region = lead.region.unwrap_or_else(|| "US".to_string()); + let product_events_14d = lead.product_events_14d.unwrap_or(3); + let support_tickets_30d = lead.support_tickets_30d.unwrap_or(1); + let won_deals_12m = lead.won_deals_12m.unwrap_or(1); + let lost_deals_12m = lead.lost_deals_12m.unwrap_or(1); + let churned = lead.churned.unwrap_or(false); + let activation_days = lead.activation_days.unwrap_or(14); + let ltv_usd = lead.ltv_usd.unwrap_or(1500.0); + + leads.push(GtmIcpPreviewLead { + recipient_id, + account_id, + email: normalized_email.clone(), + first_name: lead.first_name, + job_title: lead.job_title, + company_name: lead.company_name, + timezone: lead.timezone, + company_size, + industry: industry.clone(), + region: region.clone(), + score_0_100: 0, + tier: IcpTier::D, + recommended: false, + top_drivers: Vec::new(), + }); + + accounts.push(AccountSignal { + entity_id: account_id, + company_size, + industry, + region, + product_events_14d, + support_tickets_30d, + won_deals_12m, + lost_deals_12m, + churned, + activation_days, + ltv_usd, + }); + } + + if accounts.is_empty() { + return ( + StatusCode::BAD_REQUEST, + Json(serde_json::json!({ + "error": "No valid leads provided" + })), + ) + .into_response(); + } + + let min_sample_size = payload + .min_sample_size + .unwrap_or_else(|| accounts.len().min(25).max(2)); + let result = match engine.run_icp_scout( + envelope.with_agent(AgentId::RachelIcpScout), + IcpScoutInput { + accounts, + current_segment_ids: Vec::new(), + min_sample_size, + }, + ) { + Ok(value) => value, + Err(err) => { + error!("GTM ICP preview failed: {}", err); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({ + "error": "Failed to run ICP preview" + })), + ) + .into_response(); + } + }; + + let scores_by_entity = result + .output_payload + .icp_scores + .iter() + .map(|score| (score.entity_id, score)) + .collect::>(); + for lead in &mut leads { + if let Some(score) = scores_by_entity.get(&lead.account_id) { + lead.score_0_100 = score.score_0_100; + lead.tier = score.tier; + lead.top_drivers = score.top_drivers.clone(); + lead.recommended = matches!(score.tier, IcpTier::A | IcpTier::B); + } + } + + let status = match result.status { + crate::gtm_agents::TaskStatus::Succeeded => "succeeded", + crate::gtm_agents::TaskStatus::NeedsHuman => "needs_human", + crate::gtm_agents::TaskStatus::Failed => "failed", + crate::gtm_agents::TaskStatus::Partial => "partial", + }; + + ( + StatusCode::OK, + Json(GtmIcpPreviewResponse { + status: status.to_string(), + errors: result.errors, + segment_definitions: result.output_payload.segment_definitions, + anti_icp_rules: result.output_payload.anti_icp_rules, + leads, + }), + ) + .into_response() +} + +pub async fn gtm_outbound_plan( + State(state): State, + headers: HeaderMap, + Json(payload): Json, +) -> impl IntoResponse { + let account_id = match resolve_authenticated_account_id(&state, &headers).await { + Ok(id) => id, + Err(response) => return response, + }; + + if payload.message_subject.trim().is_empty() || payload.message_body.trim().is_empty() { + return ( + StatusCode::BAD_REQUEST, + Json(serde_json::json!({ + "error": "message_subject and message_body are required" + })), + ) + .into_response(); + } + + let segment_manifest = payload + .leads + .iter() + .filter(|lead| lead.selected) + .filter_map(|lead| { + let email = lead.email.trim().to_lowercase(); + if email.is_empty() { + return None; + } + Some(SegmentContact { + recipient_id: lead.recipient_id.unwrap_or_else(Uuid::new_v4), + account_id: lead.account_id.unwrap_or_else(Uuid::new_v4), + email, + first_name: lead.first_name.clone(), + job_title: lead.job_title.clone(), + company_name: lead.company_name.clone(), + timezone: lead.timezone.clone(), + }) + }) + .collect::>(); + + if segment_manifest.is_empty() { + return ( + StatusCode::BAD_REQUEST, + Json(serde_json::json!({ + "error": "Select at least one lead to plan outbound outreach" + })), + ) + .into_response(); + } + + let claim_risk = payload.claim_risk.unwrap_or(ClaimRisk::Low); + let segment_id = payload + .segment_id + .clone() + .unwrap_or_else(|| "linkedin_icp_selected".to_string()); + let outbound_input = OutboundSdrInput { + segment_manifest, + message_bundle: MessageBundle { + segment_id: segment_id.clone(), + variants: vec![MessageVariant { + template_id: format!("linkedin_dm_{}", Utc::now().timestamp()), + subject: payload.message_subject.clone(), + body: payload.message_body.clone(), + claim_risk, + }], + }, + sequence_policy: SequencePolicy { + max_touches: payload.max_touches.unwrap_or(2).clamp(1, 6), + cadence_days: payload.cadence_days.unwrap_or(2), + stop_conditions: payload + .stop_conditions + .clone() + .unwrap_or_else(|| vec!["positive_reply".to_string()]), + }, + channel_policy: ChannelPolicy { + email_enabled: false, + linkedin_ads_enabled: false, + linkedin_dm_enabled: true, + }, + }; + + let approval_required = payload.approval_required.unwrap_or(true); + let mut base_envelope = AgentTaskEnvelope::new(AgentId::RachelOrchestrator); + base_envelope.tenant_id = account_id; + base_envelope.priority = TaskPriority::High; + base_envelope.input_refs = vec![format!("ui://gtm/segment/{}", segment_id)]; + base_envelope.policy_pack = PolicyPack::default(); + base_envelope.policy_pack.human_approval_required = approval_required; + base_envelope.policy_pack.allowed_channels = + vec![GtmChannel::LinkedinDm, GtmChannel::HubspotWorkflow]; + + let phase1 = Phase1AgentEngine; + let outbound_result = match phase1.run_outbound_sdr( + base_envelope.with_agent(AgentId::RachelOutboundSdr), + outbound_input.clone(), + ) { + Ok(value) => value, + Err(err) => { + error!("Failed to run outbound planner: {}", err); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({ + "error": "Failed to generate outbound sequence" + })), + ) + .into_response(); + } + }; + + let mode_a = ModeAAgentEngine; + let dispatch = match mode_a.run_workflow(crate::gtm_agents::ModeAWorkflowInput { + base_envelope, + dispatch: ModeAOutboundDispatchInput { + outbound_input, + outbound_output: outbound_result.output_payload, + assignee_team: payload + .assignee_team + .clone() + .unwrap_or_else(|| "sdr_team".to_string()), + reviewer_group: payload + .reviewer_group + .clone() + .unwrap_or_else(|| "gtm_ops".to_string()), + approval_required, + }, + }) { + Ok(value) => value, + Err(err) => { + error!("Failed to run Mode A dispatch planner: {}", err); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({ + "error": "Failed to generate Mode A dispatch plan" + })), + ) + .into_response(); + } + }; + + let outbound_status = match outbound_result.status { + crate::gtm_agents::TaskStatus::Succeeded => "succeeded", + crate::gtm_agents::TaskStatus::NeedsHuman => "needs_human", + crate::gtm_agents::TaskStatus::Failed => "failed", + crate::gtm_agents::TaskStatus::Partial => "partial", + }; + let dispatch_status = match dispatch.dispatch.status { + crate::gtm_agents::TaskStatus::Succeeded => "succeeded", + crate::gtm_agents::TaskStatus::NeedsHuman => "needs_human", + crate::gtm_agents::TaskStatus::Failed => "failed", + crate::gtm_agents::TaskStatus::Partial => "partial", + }; + + ( + StatusCode::OK, + Json(GtmOutboundPlanResponse { + outbound_status: outbound_status.to_string(), + outbound_errors: outbound_result.errors, + dispatch_status: dispatch_status.to_string(), + dispatch_errors: dispatch.dispatch.errors, + mode_a_output: dispatch.dispatch.output_payload, + }), + ) + .into_response() +} + +pub async fn gtm_outbound_push_hubspot( + State(state): State, + headers: HeaderMap, + Json(payload): Json, +) -> impl IntoResponse { + if let Err(response) = resolve_authenticated_account_id(&state, &headers).await { + return response; + } + + let executor = match HubspotModeAExecutor::from_env() { + Ok(value) => value, + Err(err) => { + let status = if matches!( + err, + crate::gtm_agents::HubspotDispatchError::MissingAccessToken + ) { + StatusCode::SERVICE_UNAVAILABLE + } else { + StatusCode::INTERNAL_SERVER_ERROR + }; + return ( + status, + Json(serde_json::json!({ + "error": format!("HubSpot executor not available: {}", err) + })), + ) + .into_response(); + } + }; + + let report = executor.dispatch_mode_a_drafts(&payload.mode_a_output); + (StatusCode::OK, Json(GtmHubspotPushResponse { report })).into_response() +} + +fn default_true() -> bool { + true +} + // ============================================================================ // Router // ============================================================================ @@ -1910,5 +2431,11 @@ pub fn auth_router(state: AuthState) -> Router { .route("/auth/slack", get(slack_oauth_start)) .route("/auth/slack/callback", get(slack_oauth_callback)) .route("/api/tasks", get(get_tasks)) + .route("/api/gtm/icp/preview", post(gtm_icp_preview)) + .route("/api/gtm/outbound/plan", post(gtm_outbound_plan)) + .route( + "/api/gtm/outbound/push-hubspot", + post(gtm_outbound_push_hubspot), + ) .with_state(state) } diff --git a/DoWhiz_service/scheduler_module/tests/gtm_mode_a_e2e.rs b/DoWhiz_service/scheduler_module/tests/gtm_mode_a_e2e.rs new file mode 100644 index 00000000..a085e3f8 --- /dev/null +++ b/DoWhiz_service/scheduler_module/tests/gtm_mode_a_e2e.rs @@ -0,0 +1,151 @@ +use scheduler_module::gtm_agents::{ + AgentId, AgentTaskEnvelope, ChannelPolicy, ClaimRisk, GtmChannel, ManualDispatchStatus, + MessageBundle, MessageVariant, ModeAAgentEngine, ModeAOutboundDispatchInput, + ModeAWorkflowInput, OutboundSdrInput, Phase1AgentEngine, PolicyPack, SegmentContact, + SequencePolicy, TaskPriority, TaskStatus, +}; +use uuid::Uuid; + +fn base_envelope() -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(AgentId::RachelOrchestrator); + envelope.priority = TaskPriority::High; + envelope.policy_pack = PolicyPack::default(); + envelope.policy_pack.allowed_channels = + vec![GtmChannel::LinkedinDm, GtmChannel::HubspotWorkflow]; + envelope +} + +fn outbound_input() -> OutboundSdrInput { + OutboundSdrInput { + segment_manifest: vec![ + SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "alpha@example.com".to_string(), + first_name: Some("Avery".to_string()), + job_title: Some("Head of Growth".to_string()), + company_name: Some("Alpha".to_string()), + timezone: Some("America/Los_Angeles".to_string()), + }, + SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "bravo@example.com".to_string(), + first_name: Some("Bailey".to_string()), + job_title: Some("VP Marketing".to_string()), + company_name: Some("Bravo".to_string()), + timezone: Some("America/New_York".to_string()), + }, + ], + message_bundle: MessageBundle { + segment_id: "tier_a_high_fit".to_string(), + variants: vec![MessageVariant { + template_id: "linkedin_dm_safe_v1".to_string(), + subject: "Idea for faster GTM execution".to_string(), + body: "Sharing a short playbook.".to_string(), + claim_risk: ClaimRisk::Low, + }], + }, + sequence_policy: SequencePolicy { + max_touches: 2, + cadence_days: 2, + stop_conditions: vec!["positive_reply".to_string()], + }, + channel_policy: ChannelPolicy { + email_enabled: false, + linkedin_ads_enabled: false, + linkedin_dm_enabled: true, + }, + } +} + +#[test] +fn mode_a_workflow_builds_approval_queue_and_hubspot_drafts_from_phase1_outbound() { + let phase1 = Phase1AgentEngine; + let mode_a = ModeAAgentEngine; + let envelope = base_envelope(); + let outbound_in = outbound_input(); + + let outbound_result = phase1 + .run_outbound_sdr( + envelope.with_agent(AgentId::RachelOutboundSdr), + outbound_in.clone(), + ) + .expect("phase1 outbound should run"); + assert_eq!(outbound_result.status, TaskStatus::Succeeded); + + let result = mode_a + .run_workflow(ModeAWorkflowInput { + base_envelope: envelope, + dispatch: ModeAOutboundDispatchInput { + outbound_input: outbound_in, + outbound_output: outbound_result.output_payload, + assignee_team: "sdr_team".to_string(), + reviewer_group: "gtm_ops".to_string(), + approval_required: true, + }, + }) + .expect("mode a workflow should run"); + + assert_eq!(result.dispatch.status, TaskStatus::NeedsHuman); + assert_eq!(result.dispatch.output_payload.approval_queue.len(), 1); + assert_eq!(result.dispatch.output_payload.manual_send_tasks.len(), 2); + assert_eq!(result.dispatch.output_payload.hubspot_task_drafts.len(), 2); + assert_eq!( + result + .dispatch + .output_payload + .hubspot_communication_drafts + .len(), + 2 + ); + assert!(result + .dispatch + .output_payload + .manual_send_tasks + .iter() + .all(|task| task.status == ManualDispatchStatus::PendingApproval)); + assert!(result + .events + .iter() + .any(|event| event.event_type == "mode_a.approval.queued")); +} + +#[test] +fn mode_a_workflow_can_prepare_ready_for_rep_tasks() { + let phase1 = Phase1AgentEngine; + let mode_a = ModeAAgentEngine; + let envelope = base_envelope(); + let outbound_in = outbound_input(); + + let outbound_result = phase1 + .run_outbound_sdr( + envelope.with_agent(AgentId::RachelOutboundSdr), + outbound_in.clone(), + ) + .expect("phase1 outbound should run"); + assert_eq!(outbound_result.status, TaskStatus::Succeeded); + + let result = mode_a + .run_workflow(ModeAWorkflowInput { + base_envelope: envelope, + dispatch: ModeAOutboundDispatchInput { + outbound_input: outbound_in, + outbound_output: outbound_result.output_payload, + assignee_team: "sdr_team".to_string(), + reviewer_group: "gtm_ops".to_string(), + approval_required: false, + }, + }) + .expect("mode a workflow should run"); + + assert_eq!(result.dispatch.status, TaskStatus::Succeeded); + assert!(result.dispatch.output_payload.approval_queue.is_empty()); + assert_eq!(result.dispatch.output_payload.manual_send_tasks.len(), 2); + assert!(result + .dispatch + .output_payload + .manual_send_tasks + .iter() + .all(|task| task.status == ManualDispatchStatus::ReadyForRep)); +} diff --git a/DoWhiz_service/scheduler_module/tests/gtm_phase1_e2e.rs b/DoWhiz_service/scheduler_module/tests/gtm_phase1_e2e.rs new file mode 100644 index 00000000..c75bb2be --- /dev/null +++ b/DoWhiz_service/scheduler_module/tests/gtm_phase1_e2e.rs @@ -0,0 +1,261 @@ +use chrono::Utc; +use scheduler_module::gtm_agents::{ + AccountSignal, AgentId, AgentTaskEnvelope, BusinessContext, ChannelPolicy, ClaimRisk, + ClusterPolicy, CurrentState, FeedbackItem, FeedbackPrdInput, FeedbackSource, IcpScoutInput, + MessageBundle, MessageVariant, Objective, OrchestratorInput, OutboundSdrInput, + Phase1AgentEngine, Phase1WorkflowInput, PolicyPack, ProductContext, ResourceLimits, + SegmentContact, SequencePolicy, TaskPriority, TaskStatus, +}; +use uuid::Uuid; + +fn base_envelope() -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(AgentId::RachelOrchestrator); + envelope.priority = TaskPriority::High; + envelope.policy_pack = PolicyPack::default(); + envelope.input_refs = vec!["warehouse://daily_snapshot".to_string()]; + envelope +} + +#[test] +fn phase1_workflow_end_to_end_emits_cross_agent_events() { + let engine = Phase1AgentEngine; + let workflow = Phase1WorkflowInput { + base_envelope: base_envelope(), + orchestrator: OrchestratorInput { + objective: Objective { + name: "Increase high-fit meetings".to_string(), + target_metric: "meeting_rate".to_string(), + target_value: "0.18".to_string(), + due_date: None, + owner: "gtm_owner".to_string(), + }, + current_state: CurrentState::default(), + resource_limits: ResourceLimits { + daily_email_cap: 300, + budget_cap_usd: 15_000, + human_review_capacity: 3, + }, + }, + icp_scout: IcpScoutInput { + accounts: vec![ + AccountSignal { + entity_id: Uuid::new_v4(), + company_size: 90, + industry: "SaaS".to_string(), + region: "US".to_string(), + product_events_14d: 10, + support_tickets_30d: 1, + won_deals_12m: 4, + lost_deals_12m: 1, + churned: false, + activation_days: 5, + ltv_usd: 8_000.0, + }, + AccountSignal { + entity_id: Uuid::new_v4(), + company_size: 120, + industry: "SaaS".to_string(), + region: "US".to_string(), + product_events_14d: 8, + support_tickets_30d: 2, + won_deals_12m: 3, + lost_deals_12m: 2, + churned: false, + activation_days: 7, + ltv_usd: 6_000.0, + }, + ], + current_segment_ids: Vec::new(), + min_sample_size: 2, + }, + outbound_sdr: OutboundSdrInput { + segment_manifest: vec![ + SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "alpha@example.com".to_string(), + first_name: Some("Avery".to_string()), + job_title: Some("Head of Growth".to_string()), + company_name: Some("Alpha".to_string()), + timezone: Some("America/Los_Angeles".to_string()), + }, + SegmentContact { + recipient_id: Uuid::new_v4(), + account_id: Uuid::new_v4(), + email: "bravo@example.com".to_string(), + first_name: Some("Bailey".to_string()), + job_title: Some("VP Marketing".to_string()), + company_name: Some("Bravo".to_string()), + timezone: Some("America/New_York".to_string()), + }, + ], + message_bundle: MessageBundle { + segment_id: "tier_a_high_fit".to_string(), + variants: vec![MessageVariant { + template_id: "safe_email_v1".to_string(), + subject: "Could this shorten your onboarding time?".to_string(), + body: "Low-risk variant focused on activation metrics.".to_string(), + claim_risk: ClaimRisk::Low, + }], + }, + sequence_policy: SequencePolicy { + max_touches: 3, + cadence_days: 2, + stop_conditions: vec!["positive_reply".to_string()], + }, + channel_policy: ChannelPolicy { + email_enabled: true, + linkedin_ads_enabled: false, + linkedin_dm_enabled: false, + }, + }, + feedback_prd: FeedbackPrdInput { + feedback_items: vec![ + FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::SupportTicket, + segment_id: Some("tier_a_high_fit".to_string()), + text: "HubSpot integration misses custom properties".to_string(), + created_at: Utc::now(), + evidence_ref: Some("ticket:1001".to_string()), + }, + FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::OutboundReply, + segment_id: Some("tier_a_high_fit".to_string()), + text: "Need better integration for existing CRM workflows".to_string(), + created_at: Utc::now(), + evidence_ref: Some("reply:2002".to_string()), + }, + ], + product_context: ProductContext { + roadmap_refs: vec!["roadmap://q3".to_string()], + constraints: vec!["single platform squad".to_string()], + architecture_notes: vec!["legacy ETL path".to_string()], + }, + business_context: BusinessContext { + revenue_goal: "increase enterprise pipeline".to_string(), + strategic_themes: vec!["activation".to_string(), "expansion".to_string()], + }, + cluster_policy: ClusterPolicy { + min_cluster_size: 1, + recency_weight: 0.6, + }, + }, + }; + + let result = engine + .run_workflow(workflow) + .expect("phase1 workflow should run"); + assert_eq!(result.orchestrator.status, TaskStatus::Succeeded); + assert_eq!(result.icp_scout.status, TaskStatus::Succeeded); + assert_eq!(result.outbound_sdr.status, TaskStatus::Succeeded); + assert_eq!(result.feedback_prd.status, TaskStatus::Succeeded); + assert!(result + .events + .iter() + .any(|event| event.event_type == "orchestrator.task.assigned")); + assert!(result + .events + .iter() + .any(|event| event.event_type == "icp.segment.promoted")); + assert!(result + .events + .iter() + .any(|event| event.event_type == "outbound.send.requested")); + assert!(result + .events + .iter() + .any(|event| event.event_type == "prd.draft.generated")); +} + +#[test] +fn phase1_workflow_requires_human_when_icp_sample_too_small() { + let engine = Phase1AgentEngine; + let workflow = Phase1WorkflowInput { + base_envelope: base_envelope(), + orchestrator: OrchestratorInput { + objective: Objective { + name: "Validate niche segment".to_string(), + target_metric: "meeting_rate".to_string(), + target_value: "0.2".to_string(), + due_date: None, + owner: "gtm_owner".to_string(), + }, + current_state: CurrentState::default(), + resource_limits: ResourceLimits { + daily_email_cap: 100, + budget_cap_usd: 5_000, + human_review_capacity: 1, + }, + }, + icp_scout: IcpScoutInput { + accounts: vec![AccountSignal { + entity_id: Uuid::new_v4(), + company_size: 40, + industry: "Fintech".to_string(), + region: "US".to_string(), + product_events_14d: 4, + support_tickets_30d: 1, + won_deals_12m: 1, + lost_deals_12m: 0, + churned: false, + activation_days: 9, + ltv_usd: 3_500.0, + }], + current_segment_ids: Vec::new(), + min_sample_size: 3, + }, + outbound_sdr: OutboundSdrInput { + segment_manifest: Vec::new(), + message_bundle: MessageBundle { + segment_id: "tier_a_high_fit".to_string(), + variants: vec![MessageVariant { + template_id: "safe_email_v1".to_string(), + subject: "Quick question".to_string(), + body: "Short low-risk copy.".to_string(), + claim_risk: ClaimRisk::Low, + }], + }, + sequence_policy: SequencePolicy { + max_touches: 1, + cadence_days: 2, + stop_conditions: vec!["positive_reply".to_string()], + }, + channel_policy: ChannelPolicy { + email_enabled: true, + linkedin_ads_enabled: false, + linkedin_dm_enabled: false, + }, + }, + feedback_prd: FeedbackPrdInput { + feedback_items: vec![FeedbackItem { + feedback_id: Uuid::new_v4(), + source: FeedbackSource::Onboarding, + segment_id: Some("tier_a_high_fit".to_string()), + text: "Onboarding is confusing for admins".to_string(), + created_at: Utc::now(), + evidence_ref: Some("onboarding:88".to_string()), + }], + product_context: ProductContext { + roadmap_refs: Vec::new(), + constraints: vec!["limited eng bandwidth".to_string()], + architecture_notes: Vec::new(), + }, + business_context: BusinessContext { + revenue_goal: "reduce churn".to_string(), + strategic_themes: vec!["activation".to_string()], + }, + cluster_policy: ClusterPolicy { + min_cluster_size: 1, + recency_weight: 0.5, + }, + }, + }; + + let result = engine + .run_workflow(workflow) + .expect("phase1 workflow should run"); + assert_eq!(result.icp_scout.status, TaskStatus::NeedsHuman); + assert!(!result.icp_scout.errors.is_empty()); +} diff --git a/DoWhiz_service/scheduler_module/tests/gtm_phase2_e2e.rs b/DoWhiz_service/scheduler_module/tests/gtm_phase2_e2e.rs new file mode 100644 index 00000000..9de1c04d --- /dev/null +++ b/DoWhiz_service/scheduler_module/tests/gtm_phase2_e2e.rs @@ -0,0 +1,134 @@ +use chrono::Utc; +use scheduler_module::gtm_agents::{ + AgentId, AgentTaskEnvelope, AssetChannel, ContentInput, FunnelStage, InsightCluster, + MessageMap, Phase2AgentEngine, Phase2WorkflowInput, PolicyPack, PositioningBundle, + PositioningInput, PrdDraft, SegmentDefinition, TaskPriority, TaskStatus, +}; + +fn base_envelope() -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(AgentId::RachelOrchestrator); + envelope.priority = TaskPriority::High; + envelope.policy_pack = PolicyPack::default(); + envelope.input_refs = vec!["warehouse://stable_contracts_v1".to_string()]; + envelope +} + +fn positioning_input() -> PositioningInput { + PositioningInput { + segment_definitions: vec![ + SegmentDefinition { + segment_id: "tier_a_high_fit".to_string(), + rule_dsl: "icp_tier == 'A'".to_string(), + expected_lift: 1.35, + confidence: 0.84, + }, + SegmentDefinition { + segment_id: "industry_saas_high_fit".to_string(), + rule_dsl: "industry == 'saas'".to_string(), + expected_lift: 1.22, + confidence: 0.78, + }, + ], + insight_clusters: vec![ + InsightCluster { + cluster_id: "cluster_integration_gap".to_string(), + theme: "integration gap".to_string(), + frequency: 6, + affected_segments: vec!["tier_a_high_fit".to_string()], + evidence_refs: vec!["ticket:991".to_string()], + }, + InsightCluster { + cluster_id: "cluster_onboarding_friction".to_string(), + theme: "onboarding friction".to_string(), + frequency: 4, + affected_segments: vec!["tier_a_high_fit".to_string()], + evidence_refs: vec!["onboarding:122".to_string()], + }, + ], + prd_drafts: vec![PrdDraft { + prd_id: "prd_integration_gap".to_string(), + problem: "integration reliability impacts activation".to_string(), + users: vec!["tier_a_high_fit".to_string()], + success_metrics: vec!["activation_rate_day_14".to_string()], + scope: vec!["stabilize hubspot sync pipeline".to_string()], + risks: vec!["cross-team dependency".to_string()], + }], + strategic_themes: vec!["activation".to_string(), "expansion".to_string()], + data_contract_version: "1.0".to_string(), + } +} + +#[test] +fn phase2_workflow_generates_positioning_and_publishable_assets() { + let engine = Phase2AgentEngine; + let workflow = Phase2WorkflowInput { + base_envelope: base_envelope(), + data_contracts_stable: true, + positioning: positioning_input(), + content: ContentInput { + positioning_bundle: PositioningBundle { + bundle_id: "placeholder".to_string(), + message_maps: vec![MessageMap { + segment_id: "placeholder".to_string(), + value_proposition: "placeholder".to_string(), + pains: vec!["placeholder".to_string()], + proof_points: vec!["placeholder".to_string()], + objection_handling: vec!["placeholder".to_string()], + funnel_stage: FunnelStage::Awareness, + }], + claim_safe_list: vec!["placeholder".to_string()], + generated_at: Utc::now(), + }, + channels: vec![ + AssetChannel::Email, + AssetChannel::LandingPage, + AssetChannel::SalesOnePager, + ], + max_assets_per_channel: 2, + requires_human_review: false, + }, + }; + + let result = engine.run_workflow(workflow).expect("phase2 should run"); + assert_eq!(result.positioning.status, TaskStatus::Succeeded); + assert_eq!(result.content.status, TaskStatus::Succeeded); + assert!(result.content.output_payload.publish_ready); + assert!(result.content.output_payload.assets.len() >= 3); + assert!(result + .events + .iter() + .any(|event| event.event_type == "positioning.bundle.published")); + assert!(result + .events + .iter() + .any(|event| event.event_type == "content.asset.published")); +} + +#[test] +fn phase2_content_requests_review_when_explicitly_required() { + let engine = Phase2AgentEngine; + let workflow = Phase2WorkflowInput { + base_envelope: base_envelope(), + data_contracts_stable: true, + positioning: positioning_input(), + content: ContentInput { + positioning_bundle: PositioningBundle { + bundle_id: "placeholder".to_string(), + message_maps: Vec::new(), + claim_safe_list: Vec::new(), + generated_at: Utc::now(), + }, + channels: vec![AssetChannel::LinkedinAd], + max_assets_per_channel: 1, + requires_human_review: true, + }, + }; + + let result = engine.run_workflow(workflow).expect("phase2 should run"); + assert_eq!(result.content.status, TaskStatus::NeedsHuman); + assert!(result + .content + .emitted_events + .iter() + .any(|event| event.event_type == "approval.requested")); +} diff --git a/DoWhiz_service/scheduler_module/tests/gtm_phase3_e2e.rs b/DoWhiz_service/scheduler_module/tests/gtm_phase3_e2e.rs new file mode 100644 index 00000000..438272f0 --- /dev/null +++ b/DoWhiz_service/scheduler_module/tests/gtm_phase3_e2e.rs @@ -0,0 +1,126 @@ +use scheduler_module::gtm_agents::{ + AgentId, AgentTaskEnvelope, CampaignPerformance, ExperimentInput, FeatureAdoptionSignal, + OnboardingInput, Phase3AgentEngine, Phase3WorkflowInput, PolicyPack, TaskPriority, TaskStatus, +}; +use uuid::Uuid; + +fn base_envelope() -> AgentTaskEnvelope { + let mut envelope = AgentTaskEnvelope::new(AgentId::RachelOrchestrator); + envelope.priority = TaskPriority::High; + envelope.policy_pack = PolicyPack::default(); + envelope.input_refs = vec!["warehouse://reliable_metrics_weekly".to_string()]; + envelope +} + +fn onboarding_input() -> OnboardingInput { + OnboardingInput { + customer_id: Uuid::new_v4(), + account_name: "Northstar AI".to_string(), + segment_id: "tier_a_high_fit".to_string(), + customer_goals: vec![ + "shorten onboarding cycle".to_string(), + "ship first campaign this month".to_string(), + ], + known_blockers: vec![ + "email domain warmup incomplete".to_string(), + "lead source mapping unclear".to_string(), + ], + current_activation_rate: 0.48, + target_activation_rate: 0.72, + handoff_summary: Some("handoff from outbound SDR with high purchase intent".to_string()), + } +} + +fn experiment_input() -> ExperimentInput { + ExperimentInput { + experiment_name: "guided onboarding playbook".to_string(), + primary_metric: "activation_rate_day_14".to_string(), + baseline_value: 0.41, + observed_value: 0.50, + sample_size: 410, + min_sample_size: 250, + confidence_estimate: 0.82, + segment_ids: vec!["tier_a_high_fit".to_string(), "fast_activation".to_string()], + campaign_results: vec![ + CampaignPerformance { + campaign_id: "cmp_100".to_string(), + segment_id: "tier_a_high_fit".to_string(), + spend_usd: 5200.0, + impressions: 138_000, + clicks: 3520, + meetings: 220, + sqls: 72, + }, + CampaignPerformance { + campaign_id: "cmp_101".to_string(), + segment_id: "fast_activation".to_string(), + spend_usd: 2100.0, + impressions: 62_000, + clicks: 1510, + meetings: 88, + sqls: 25, + }, + ], + adoption_signals: vec![ + FeatureAdoptionSignal { + feature_name: "workflow_builder".to_string(), + before_rate: 0.34, + after_rate: 0.52, + }, + FeatureAdoptionSignal { + feature_name: "sequence_analytics".to_string(), + before_rate: 0.21, + after_rate: 0.33, + }, + ], + } +} + +#[test] +fn phase3_workflow_produces_onboarding_and_experiment_outputs() { + let engine = Phase3AgentEngine; + let workflow = Phase3WorkflowInput { + base_envelope: base_envelope(), + metrics_reliable: true, + onboarding: onboarding_input(), + experiment: experiment_input(), + }; + + let result = engine.run_workflow(workflow).expect("phase3 should run"); + assert_eq!(result.onboarding.status, TaskStatus::NeedsHuman); + assert_eq!(result.experiment.status, TaskStatus::Succeeded); + assert!(!result.onboarding.output_payload.onboarding_plan.is_empty()); + assert!(!result.experiment.output_payload.recommendations.is_empty()); + assert!(result + .events + .iter() + .any(|event| event.event_type == "onboarding.plan.started")); + assert!(result + .events + .iter() + .any(|event| event.event_type == "experiment.result.published")); +} + +#[test] +fn phase3_experiment_requests_human_review_when_signal_is_weak() { + let engine = Phase3AgentEngine; + let mut weak = experiment_input(); + weak.sample_size = 80; + weak.min_sample_size = 250; + weak.confidence_estimate = 0.55; + + let workflow = Phase3WorkflowInput { + base_envelope: base_envelope(), + metrics_reliable: true, + onboarding: onboarding_input(), + experiment: weak, + }; + + let result = engine.run_workflow(workflow).expect("phase3 should run"); + assert_eq!(result.experiment.status, TaskStatus::NeedsHuman); + assert!(result + .experiment + .emitted_events + .iter() + .any(|event| event.event_type == "approval.requested")); +} diff --git a/website/public/auth/index.html b/website/public/auth/index.html index 0ff0170d..6b22c196 100644 --- a/website/public/auth/index.html +++ b/website/public/auth/index.html @@ -92,6 +92,11 @@ border: 1px solid rgba(34, 197, 94, 0.3); color: #22c55e; } + .api-target-info { + margin-bottom: 1rem; + font-size: 0.8rem; + color: var(--text-muted, #888); + } .auth-toggle { text-align: center; margin-top: 1rem; @@ -286,6 +291,116 @@ font-size: 0.85rem; } + .gtm-panel-grid { + display: grid; + gap: 1rem; + } + + .gtm-controls { + display: flex; + flex-wrap: wrap; + gap: 0.75rem; + align-items: center; + } + + .gtm-status { + font-size: 0.85rem; + color: var(--text-muted, #888); + } + + .gtm-table-wrap { + overflow-x: auto; + border: 1px solid var(--border, #333); + border-radius: 8px; + } + + .gtm-table { + width: 100%; + border-collapse: collapse; + min-width: 920px; + } + + .gtm-table th, + .gtm-table td { + padding: 0.55rem 0.65rem; + border-bottom: 1px solid var(--border, #333); + text-align: left; + vertical-align: middle; + font-size: 0.85rem; + } + + .gtm-table th { + background: rgba(56, 189, 248, 0.12); + font-weight: 600; + } + + .gtm-table tr:last-child td { + border-bottom: none; + } + + .gtm-table input[type="text"] { + width: 100%; + padding: 0.35rem 0.45rem; + border: 1px solid var(--border, #333); + border-radius: 6px; + background: var(--surface, #1a1a1a); + color: var(--text, #fff); + } + + .gtm-tier-pill { + display: inline-flex; + align-items: center; + justify-content: center; + padding: 0.2rem 0.5rem; + border-radius: 999px; + font-size: 0.75rem; + font-weight: 700; + min-width: 2rem; + } + + .gtm-tier-a { + background: rgba(34, 197, 94, 0.2); + color: #22c55e; + } + + .gtm-tier-b { + background: rgba(59, 130, 246, 0.2); + color: #3b82f6; + } + + .gtm-tier-c { + background: rgba(245, 158, 11, 0.2); + color: #f59e0b; + } + + .gtm-tier-d { + background: rgba(239, 68, 68, 0.2); + color: #ef4444; + } + + .gtm-plan-list { + list-style: none; + padding: 0; + margin: 0; + display: grid; + gap: 0.5rem; + } + + .gtm-plan-item { + border: 1px solid var(--border, #333); + border-radius: 8px; + padding: 0.75rem; + background: var(--surface, #1a1a1a); + } + + .gtm-inline-code { + font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace; + font-size: 0.8rem; + background: rgba(148, 163, 184, 0.18); + padding: 0.15rem 0.35rem; + border-radius: 4px; + } + .memo-actions { display: flex; gap: 0.5rem; @@ -506,6 +621,7 @@

Sign in to DoWhiz

+