Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions app/ai-service/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
# API Keys (at least one is required for AI features)
OPENAI_API_KEY=your_openai_api_key_here
GROQ_API_KEY=your_groq_api_key_here
OPENAI_MODEL=gpt-4o-mini
GROQ_MODEL=llama-3.3-70b-versatile
LLM_TIMEOUT_SECONDS=30

# Application Settings
APP_ENV=development
Expand Down
34 changes: 34 additions & 0 deletions app/ai-service/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,40 @@ Response body:
### OCR Processing
- **POST** `/ai/ocr` - Identity document OCR with field extraction

### Humanitarian Verification
- **POST** `/ai/humanitarian/verify` - Standardized humanitarian claim verification (Sphere criteria + context factors + provider fallback)

Request body:

```json
{
"aid_claim": "Relief teams delivered hygiene kits to all registered households in Sector B.",
"supporting_evidence": ["Distribution list #B-17", "Field monitor report"],
"context_factors": {
"security_status": "stable",
"weather": "heavy_rain",
"displacement_level": "moderate"
},
"provider_preference": "auto"
}
```

Response body:

```json
{
"success": true,
"provider": "openai",
"model": "gpt-4o-mini",
"prompt_variant": "primary",
"verification": {
"verdict": "credible",
"confidence": 0.86,
"summary": "Evidence aligns with claim across key criteria"
}
}
```

```bash
curl -X POST "http://localhost:8000/ai/ocr" -F "image=@document.jpg"
```
Expand Down
17 changes: 12 additions & 5 deletions app/ai-service/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
Handles environment variables and API key management
"""

from pydantic_settings import BaseSettings
from pydantic_settings import BaseSettings, SettingsConfigDict
from typing import Optional
import logging

Expand All @@ -17,6 +17,9 @@ class Settings(BaseSettings):
Environment Variables:
OPENAI_API_KEY: OpenAI API key for AI model access
GROQ_API_KEY: Groq API key for AI model access (alternative to OpenAI)
OPENAI_MODEL: Default OpenAI model for humanitarian verification
GROQ_MODEL: Default Groq model for humanitarian verification
LLM_TIMEOUT_SECONDS: Timeout for LLM API requests
APP_ENV: Application environment (development, staging, production)
LOG_LEVEL: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
HOST: Server host (default: 0.0.0.0)
Expand All @@ -30,6 +33,9 @@ class Settings(BaseSettings):
# API Keys
openai_api_key: Optional[str] = None
groq_api_key: Optional[str] = None
openai_model: str = "gpt-4o-mini"
groq_model: str = "llama-3.3-70b-versatile"
llm_timeout_seconds: int = 30

# Application settings
app_env: str = "development"
Expand All @@ -47,10 +53,11 @@ class Settings(BaseSettings):
proof_of_life_confidence_threshold: float = 0.65
proof_of_life_min_face_size: int = 80

class Config:
env_file = ".env"
env_file_encoding = "utf-8"
case_sensitive = False
model_config = SettingsConfigDict(
env_file=".env",
env_file_encoding="utf-8",
case_sensitive=False,
)

def validate_api_keys(self) -> bool:
"""
Expand Down
24 changes: 24 additions & 0 deletions app/ai-service/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@
from config import settings
import tasks
from proof_of_life import ProofOfLifeAnalyzer, ProofOfLifeConfig
from schemas.humanitarian import (
HumanitarianVerificationRequest,
HumanitarianVerificationResponse,
)
from services.humanitarian_verification import HumanitarianVerificationService

limiter = Limiter(key_func=get_remote_address)

Expand Down Expand Up @@ -60,6 +65,7 @@ async def lifespan(app: FastAPI):
min_face_size=settings.proof_of_life_min_face_size,
)
)
humanitarian_verification_service = HumanitarianVerificationService()


# Request/Response models
Expand Down Expand Up @@ -229,6 +235,24 @@ async def analyze_proof_of_life(request: ProofOfLifeRequest):
)


@app.post("/ai/humanitarian/verify", response_model=HumanitarianVerificationResponse)
async def verify_humanitarian_claim(request: HumanitarianVerificationRequest):
"""Verify an aid claim against standardized humanitarian criteria."""
logger.info("Processing humanitarian verification request")

try:
result = humanitarian_verification_service.verify_claim(
aid_claim=request.aid_claim,
supporting_evidence=request.supporting_evidence,
context_factors=request.context_factors,
provider_preference=request.provider_preference,
)
return HumanitarianVerificationResponse(success=True, **result)
except Exception as e:
logger.error("Humanitarian verification failed: %s", str(e), exc_info=True)
return HumanitarianVerificationResponse(success=False, error=str(e))


@app.get("/ai/status/{task_id}", response_model=TaskStatusResponse)
async def get_task_status(task_id: str):
"""
Expand Down
3 changes: 3 additions & 0 deletions app/ai-service/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[pytest]
filterwarnings =
ignore:.*pkgutil.find_loader.*:DeprecationWarning:pytesseract.pytesseract
19 changes: 19 additions & 0 deletions app/ai-service/schemas/humanitarian.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from typing import Any, Dict, List, Literal, Optional

from pydantic import BaseModel, Field


class HumanitarianVerificationRequest(BaseModel):
aid_claim: str = Field(min_length=10, description="Aid claim to verify")
supporting_evidence: List[str] = Field(default_factory=list)
context_factors: Dict[str, Any] = Field(default_factory=dict)
provider_preference: Literal["auto", "openai", "groq"] = "auto"


class HumanitarianVerificationResponse(BaseModel):
success: bool
provider: Optional[str] = None
model: Optional[str] = None
prompt_variant: Optional[str] = None
verification: Optional[Dict[str, Any]] = None
error: Optional[str] = None
133 changes: 133 additions & 0 deletions app/ai-service/services/humanitarian_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
"""
Prompt templating for humanitarian aid claim verification.

This module standardizes prompt construction across providers and model families
(OpenAI/Groq-compatible APIs) to keep scoring objective and reproducible.
"""

from typing import Any, Dict, List


SPHERE_HANDBOOK_CRITERIA: Dict[str, List[str]] = {
"water_supply_sanitation_hygiene": [
"Minimum daily water access is sufficient and equitable.",
"Sanitation facilities are safe, accessible, and culturally appropriate.",
"Hygiene support (soap, menstrual hygiene, handwashing) is consistently available.",
],
"food_security_nutrition": [
"Food assistance is adequate in quantity, quality, and nutritional value.",
"Distribution is regular, impartial, and reaches vulnerable groups.",
"Nutrition-sensitive support addresses children, pregnant, and lactating women.",
],
"shelter_settlement": [
"Shelter provides safety, privacy, weather protection, and dignity.",
"Settlement planning reduces overcrowding and health risks.",
"Shelter materials and design align with local context and inclusion needs.",
],
"health": [
"Essential health services are accessible without discrimination.",
"Disease prevention and outbreak readiness are in place.",
"Referral pathways and continuity of care are functioning.",
],
"protection_inclusion_accountability": [
"Assistance is impartial and minimizes protection risks.",
"Affected people can provide feedback and raise complaints safely.",
"Data and decision-making include age, gender, disability, and risk context.",
],
}


class HumanitarianPromptEngine:
"""Builds standardized humanitarian verification prompts."""

def build_primary_prompt(
self,
aid_claim: str,
supporting_evidence: List[str],
context_factors: Dict[str, Any],
) -> Dict[str, str]:
criteria_text = self._format_sphere_criteria()
evidence_text = self._format_evidence(supporting_evidence)
context_text = self._format_context_factors(context_factors)

system_prompt = (
"You are an objective humanitarian verification analyst. "
"Evaluate aid claims only from provided evidence and context. "
"Apply a Humanitarian Standard grounded in Sphere criteria. "
"Do not infer facts that are not explicitly present. "
"Return valid JSON only."
)

user_prompt = (
"Humanitarian Standard Verification Task\n\n"
"Assess whether the aid claim is credible, partially credible, inconclusive, or not credible. "
"Your analysis must map to Sphere Handbook criteria and explain uncertainty.\n\n"
f"Sphere Criteria:\n{criteria_text}\n\n"
f"Aid Claim:\n{aid_claim}\n\n"
f"Supporting Evidence:\n{evidence_text}\n\n"
f"Context Factors (from backend):\n{context_text}\n\n"
"Output JSON schema exactly:\n"
"{\n"
" \"verdict\": \"credible|partially_credible|inconclusive|not_credible\",\n"
" \"confidence\": 0.0,\n"
" \"summary\": \"short neutral summary\",\n"
" \"criteria_assessment\": [\n"
" {\"criterion\": \"string\", \"status\": \"met|partially_met|not_met|unknown\", \"reason\": \"string\"}\n"
" ],\n"
" \"risk_flags\": [\"string\"],\n"
" \"missing_information\": [\"string\"],\n"
" \"recommended_next_steps\": [\"string\"]\n"
"}"
)

return {"system": system_prompt, "user": user_prompt}

def build_fallback_prompt(
self,
aid_claim: str,
supporting_evidence: List[str],
context_factors: Dict[str, Any],
) -> Dict[str, str]:
evidence_text = self._format_evidence(supporting_evidence)
context_text = self._format_context_factors(context_factors)

system_prompt = (
"You verify humanitarian aid claims conservatively. "
"Use only supplied inputs. Return strict JSON only."
)

user_prompt = (
"Fallback Humanitarian Verification\n\n"
f"Claim: {aid_claim}\n"
f"Evidence: {evidence_text}\n"
f"Context: {context_text}\n\n"
"Respond with JSON only:\n"
"{\"verdict\":\"credible|partially_credible|inconclusive|not_credible\","
"\"confidence\":0.0,\"summary\":\"\","
"\"risk_flags\":[],\"missing_information\":[],\"recommended_next_steps\":[]}"
)

return {"system": system_prompt, "user": user_prompt}

def _format_sphere_criteria(self) -> str:
lines: List[str] = []
for section, items in SPHERE_HANDBOOK_CRITERIA.items():
lines.append(f"- {section}:")
for item in items:
lines.append(f" * {item}")
return "\n".join(lines)

def _format_evidence(self, supporting_evidence: List[str]) -> str:
if not supporting_evidence:
return "- No supporting evidence provided"
return "\n".join(f"- {entry}" for entry in supporting_evidence)

def _format_context_factors(self, context_factors: Dict[str, Any]) -> str:
if not context_factors:
return "- No context factors provided"

lines: List[str] = []
for key in sorted(context_factors.keys()):
value = context_factors[key]
lines.append(f"- {key}: {value}")
return "\n".join(lines)
Loading
Loading