Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .env-example
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,6 @@ MODEL_CACHE_SIZE=1000 # Number of responses to cache

# Logging Configuration
ENVIRONMENT=development
LOG_LEVEL=INFO # DEBUG, INFO, WARNING, ERROR, CRITICAL
LOG_LEVEL=INFO
LOG_FORMAT=plain
# LOG_FILE=/path/to/log/file.log # Uncomment to enable file logging
# LOG_FILE=/path/to/log/file.log
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,10 @@ pyrightconfig.json

# Cursor
.cursorrules
.cursor
mcp.jason

#chainlit
.chainlit/

src/utils/test_real_data.py
5 changes: 4 additions & 1 deletion app.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,7 @@ async def progress_callback(msg, update):

@cl.on_chat_start
async def on_chat_start():
await cl.Message(content="👋 Welcome to VeriFact! The system is up and running. Type your claim or question to get started.").send()
await cl.Message(content="👋 Welcome to VeriFact! The system is up and running. Type your claim or question to get started.").send()

if __name__ == "__main__":
cl.run() # Let Chainlit handle it
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ dependencies = [
"chainlit",
"openai-agents>=0.0.15",
"serpapi>=0.1.5",
"supabase>=2.0.0",
]

[project.optional-dependencies]
Expand Down
2 changes: 1 addition & 1 deletion src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from fastapi import FastAPI

from api.factcheck import router as factcheck_router
from utils.logging.logging_config import setup_logging
from utils.logging_utils.logging_config import setup_logging

load_dotenv()
setup_logging()
Expand Down
131 changes: 131 additions & 0 deletions src/tests/test_database.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
#!/usr/bin/env python3
"""
Database test script for VeriFact
Test all database operations including vector similarity search.
"""

import asyncio
import sys
from pathlib import Path

# Add the project root to Python path
project_root = Path(__file__).resolve().parent.parent.parent
if str(project_root) not in sys.path:
sys.path.insert(0, str(project_root))

from dotenv import load_dotenv
from src.utils.db import db_manager
from src.verifact_agents.claim_detector import Claim
from src.verifact_agents.evidence_hunter import Evidence
from src.verifact_agents.verdict_writer import Verdict
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Move imports to the top of the file.

Module-level imports should be at the top to comply with PEP 8 standards.

-# Add the project root to Python path
-project_root = Path(__file__).resolve().parent.parent.parent
-if str(project_root) not in sys.path:
-    sys.path.insert(0, str(project_root))
-
-from dotenv import load_dotenv
-from src.utils.db import db_manager
-from src.verifact_agents.claim_detector import Claim
-from src.verifact_agents.evidence_hunter import Evidence
-from src.verifact_agents.verdict_writer import Verdict
+from dotenv import load_dotenv
+from src.utils.db import db_manager
+from src.verifact_agents.claim_detector import Claim
+from src.verifact_agents.evidence_hunter import Evidence
+from src.verifact_agents.verdict_writer import Verdict
+
+# Add the project root to Python path
+project_root = Path(__file__).resolve().parent.parent.parent
+if str(project_root) not in sys.path:
+    sys.path.insert(0, str(project_root))
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Add the project root to Python path
project_root = Path(__file__).resolve().parent.parent.parent
if str(project_root) not in sys.path:
sys.path.insert(0, str(project_root))
from dotenv import load_dotenv
from src.utils.db import db_manager
from src.verifact_agents.claim_detector import Claim
from src.verifact_agents.evidence_hunter import Evidence
from src.verifact_agents.verdict_writer import Verdict
from dotenv import load_dotenv
from src.utils.db import db_manager
from src.verifact_agents.claim_detector import Claim
from src.verifact_agents.evidence_hunter import Evidence
from src.verifact_agents.verdict_writer import Verdict
# Add the project root to Python path
project_root = Path(__file__).resolve().parent.parent.parent
if str(project_root) not in sys.path:
sys.path.insert(0, str(project_root))
🧰 Tools
🪛 GitHub Actions: CI

[error] 15-15: Ruff E402: Module level import not at top of file for 'from dotenv import load_dotenv'.


[error] 17-17: Ruff E402: Module level import not at top of file for 'from src.utils.db import db_manager'.


[error] 18-18: Ruff E402: Module level import not at top of file for 'from src.verifact_agents.claim_detector import Claim'.


[error] 19-19: Ruff E402: Module level import not at top of file for 'from src.verifact_agents.evidence_hunter import Evidence'.


[error] 20-20: Ruff E402: Module level import not at top of file for 'from src.verifact_agents.verdict_writer import Verdict'.

🤖 Prompt for AI Agents
In src/tests/test_database.py around lines 11 to 20, the imports are placed
after some code that modifies sys.path. To comply with PEP 8 standards, move all
module-level import statements to the very top of the file, before any other
code, including the sys.path modification. You can keep the sys.path
modification after the imports if necessary, but ideally, adjust the code so
that imports are at the top.


async def test_embedding():
"""Test embedding generation."""
print("\n0. Testing embedding generation...")

# Test embedding generation
text = "The sky is blue"
print(f"Testing embedding for: '{text}'")

embedding = await db_manager.generate_embedding(text)

if embedding:
print(f"✅ Embedding generated successfully")
print(f"📏 Embedding dimension: {len(embedding)}")
print(f"🔢 First 5 values: {embedding[:5]}")
print(f"🔢 Last 5 values: {embedding[-5:]}")
return True
else:
print("❌ Failed to generate embedding")
return False

async def test_database_operations():

Check warning on line 42 in src/tests/test_database.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/tests/test_database.py#L42

Method test_database_operations has 68 lines of code (limit is 50)

Check warning on line 42 in src/tests/test_database.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/tests/test_database.py#L42

Method test_database_operations has a cyclomatic complexity of 9 (limit is 8)
"""Test all database operations."""
load_dotenv()

print("🧪 Testing VeriFact database operations...")
print("=" * 50)

try:
# Test 0: Embedding generation
embedding_success = await test_embedding()
if not embedding_success:
print("❌ Embedding test failed - skipping database tests")
return False

# Test 1: Store a claim
print("\n1. Testing claim storage...")
test_claim = Claim(
text="The Earth is flat",
check_worthiness_score=0.9,
specificity_score=0.8
)

claim_id = await db_manager.store_claim(test_claim)
if claim_id:
print(f"✅ Claim stored successfully with ID: {claim_id}")
else:
print("❌ Failed to store claim")
return False

# Test 2: Store evidence
print("\n2. Testing evidence storage...")
test_evidence = [
Evidence(
content="NASA has provided extensive evidence that Earth is spherical",
source="https://nasa.gov",
relevance=0.9,
stance="contradicting"
)
]

evidence_ids = await db_manager.store_evidence(claim_id, test_evidence)
if evidence_ids:
print(f"✅ Evidence stored successfully: {len(evidence_ids)} items")
else:
print("❌ Failed to store evidence")

# Test 3: Store verdict
print("\n3. Testing verdict storage...")
test_verdict = Verdict(
claim="The Earth is flat", # Add the claim field
verdict="false",
confidence=0.95,
explanation="The claim that Earth is flat is contradicted by overwhelming scientific evidence",
sources=["https://nasa.gov", "https://scientific-american.com"]
)

verdict_id = await db_manager.store_verdict(claim_id, test_verdict)
if verdict_id:
print(f"✅ Verdict stored successfully with ID: {verdict_id}")
else:
print("❌ Failed to store verdict")

# Test 4: Similar claims search
print("\n4. Testing similar claims search...")
similar_claims = await db_manager.find_similar_claims(
"The Earth is not round",
similarity_threshold=0.7,
limit=3
)

if similar_claims:
print(f"✅ Found {len(similar_claims)} similar claims")
for i, result in enumerate(similar_claims, 1):
print(f" {i}. Similarity: {result.similarity_score:.3f}")
print(f" Claim: {result.claim.text[:50]}...")
if result.verdict:
print(f" Verdict: {result.verdict.verdict}")
else:
print("ℹ️ No similar claims found (this is normal for a new database)")

print("\n✅ All database tests completed successfully!")
return True

except Exception as e:
print(f"\n❌ Database test failed: {e}")
return False

if __name__ == "__main__":
success = asyncio.run(test_database_operations())
sys.exit(0 if success else 1)
Loading
Loading