diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..a2c41a0 --- /dev/null +++ b/.env.example @@ -0,0 +1,16 @@ +# === Payments (x402 on Base) === +PAYMENTS_RECEIVABLE_ADDRESS= # Your USDC wallet address on Base Mainnet +FACILITATOR_URL=https://facilitator.daydreams.systems # x402 facilitator endpoint +NETWORK=base # Blockchain network (base = Base Mainnet) + +# === API Keys === +BRAVE_API_KEY= # Brave Search API key (https://api.search.brave.com) +OPENAI_API_KEY= # OpenAI API key for GPT-4o-mini synthesis + +# === Server === +PORT=3000 # HTTP port (Railway sets this automatically) +CACHE_TTL_SECONDS=300 # In-memory cache TTL in seconds (default: 5 min) + +# === Optional === +# PRIVATE_KEY= # Agent wallet private key (for outbound payments) +# LOG_LEVEL=info # Logging level (debug|info|warn|error) diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d77474a --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +node_modules/ +bun.lock diff --git a/DEPLOY.md b/DEPLOY.md new file mode 100644 index 0000000..2a01aa7 --- /dev/null +++ b/DEPLOY.md @@ -0,0 +1,72 @@ +# Deploying Queryx on Railway + +## Prerequisites +- [Railway CLI](https://docs.railway.app/guides/cli) installed +- A Railway account +- Environment variables ready (see `.env.example`) + +## Quick Deploy + +### 1. Login & Init +```bash +railway login +railway init # creates a new project +railway link # or link to existing project +``` + +### 2. Set Environment Variables +Via Railway dashboard → your project → Variables tab, set: + +| Variable | Required | Description | +|----------|----------|-------------| +| `PAYMENTS_RECEIVABLE_ADDRESS` | ✅ | USDC wallet on Base | +| `FACILITATOR_URL` | ✅ | `https://facilitator.daydreams.systems` | +| `NETWORK` | ✅ | `base` | +| `BRAVE_API_KEY` | ✅ | Brave Search API key | +| `OPENAI_API_KEY` | ✅ | OpenAI API key | +| `PORT` | ❌ | Auto-set by Railway | +| `CACHE_TTL_SECONDS` | ❌ | Default: 300 | + +Or via CLI: +```bash +railway variables set BRAVE_API_KEY=your_key +railway variables set OPENAI_API_KEY=your_key +railway variables set PAYMENTS_RECEIVABLE_ADDRESS=0x... +railway variables set FACILITATOR_URL=https://facilitator.daydreams.systems +railway variables set NETWORK=base +``` + +### 3. Deploy +```bash +railway up +``` + +Railway auto-detects the `Dockerfile` and builds. + +### 4. Custom Domain (Optional) +1. Railway dashboard → Settings → Domains +2. Add custom domain: `queryx.run` +3. Add CNAME record pointing to Railway's domain +4. Wait for SSL provisioning (~2 min) + +### 5. Verify +```bash +# Health check +curl https://your-app.up.railway.app/health + +# Should return 402 (no payment) +curl https://your-app.up.railway.app/v1/search?q=test + +# Run smoke test +./scripts/smoke-test.sh https://your-app.up.railway.app +``` + +## Local Docker Test +```bash +docker build -t queryx . +docker run -p 3000:3000 --env-file .env queryx +curl http://localhost:3000/health +``` + +## CI +GitHub Actions runs on every push to `main` and on PRs. See `.github/workflows/ci.yml`. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..bee5b1c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,9 @@ +FROM oven/bun:1 AS base +WORKDIR /app +COPY package.json bun.lock* ./ +RUN bun install --frozen-lockfile || bun install +COPY . . +EXPOSE 3000 +HEALTHCHECK --interval=30s --timeout=5s --start-period=10s \ + CMD curl -f http://localhost:3000/health || exit 1 +CMD ["bun", "run", "src/index.ts"] diff --git a/README.md b/README.md index 76ec10f..3e86647 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ # Queryx 🔍 +![CI](https://github.com/langoustine69/queryx/actions/workflows/ci.yml/badge.svg) +![Network: Base](https://img.shields.io/badge/network-Base-blue) +![Payment: x402](https://img.shields.io/badge/payment-x402%20USDC-green) + > Agent-native search API. Pay per query in USDC via x402. No accounts. No subscriptions. Structured JSON. **5-14x cheaper than Perplexity. Native x402 payments. Zero friction for agents.** @@ -37,6 +41,13 @@ curl -H "PAYMENT-SIGNATURE: " \ | No account needed | ❌ | ❌ | ✅ | | Agent JSON output | ❌ | ✅ | ✅ | +## Documentation + +- [OpenAPI Spec](./openapi.json) — Full API specification +- [Agent Guide](./docs/AGENT_GUIDE.md) — How to use Queryx as an AI agent +- [Pricing](./docs/PRICING.md) — Detailed pricing and comparisons +- [Deploy Guide](./DEPLOY.md) — Railway deployment from zero + ## License MIT diff --git a/bun.lock b/bun.lock deleted file mode 100644 index 70b0f21..0000000 --- a/bun.lock +++ /dev/null @@ -1,21 +0,0 @@ -{ - "lockfileVersion": 1, - "configVersion": 1, - "workspaces": { - "": { - "name": "queryx", - "devDependencies": { - "@types/bun": "latest", - }, - }, - }, - "packages": { - "@types/bun": ["@types/bun@1.3.9", "", { "dependencies": { "bun-types": "1.3.9" } }, "sha512-KQ571yULOdWJiMH+RIWIOZ7B2RXQGpL1YQrBtLIV3FqDcCu6FsbFUBwhdKUlCKUpS3PJDsHlJ1QKlpxoVR+xtw=="], - - "@types/node": ["@types/node@25.3.2", "", { "dependencies": { "undici-types": "~7.18.0" } }, "sha512-RpV6r/ij22zRRdyBPcxDeKAzH43phWVKEjL2iksqo1Vz3CuBUrgmPpPhALKiRfU7OMCmeeO9vECBMsV0hMTG8Q=="], - - "bun-types": ["bun-types@1.3.9", "", { "dependencies": { "@types/node": "*" } }, "sha512-+UBWWOakIP4Tswh0Bt0QD0alpTY8cb5hvgiYeWCMet9YukHbzuruIEeXC2D7nMJPB12kbh8C7XJykSexEqGKJg=="], - - "undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="], - } -} diff --git a/docs/AGENT_GUIDE.md b/docs/AGENT_GUIDE.md new file mode 100644 index 0000000..0752a68 --- /dev/null +++ b/docs/AGENT_GUIDE.md @@ -0,0 +1,85 @@ +# Queryx Agent Guide + +> This guide is for AI agents consuming the Queryx API programmatically. + +## Overview + +Queryx is a web search API that returns AI-synthesized answers with source citations. Payment is per-query via the [x402 protocol](https://x402.org) using USDC on Base Mainnet. + +## Endpoints + +| Endpoint | Method | Cost | Description | +|----------|--------|------|-------------| +| `/v1/search?q=...` | GET | $0.001 USDC | Web search + synthesis | +| `/v1/search/news?q=...` | GET | $0.001 USDC | News-focused search | +| `/v1/search/deep` | POST | $0.005 USDC | Multi-source deep research | +| `/health` | GET | Free | Health check | + +## x402 Payment Flow + +1. **Make a request without payment** → Server returns `HTTP 402` +2. **Parse the 402 response** — headers contain: + - `X-PAYMENT-REQUIRED`: JSON with payment requirements + - Includes: `receiverAddress`, `amount`, `network`, `facilitatorUrl` +3. **Construct payment proof** — Sign a USDC transfer on Base using your wallet +4. **Retry with `X-PAYMENT` header** — Include the signed payment proof +5. **Server validates via facilitator** → Returns `200` with search results + +### Example (pseudo-code) + +``` +# Step 1: Initial request +GET /v1/search?q=quantum+computing HTTP/1.1 +→ 402 Payment Required +→ X-PAYMENT-REQUIRED: {"amount":"0.001","currency":"USDC","network":"base",...} + +# Step 2: Retry with payment +GET /v1/search?q=quantum+computing HTTP/1.1 +X-PAYMENT: +→ 200 OK +→ {"query":"quantum computing","answer":"...","sources":[...],...} +``` + +### Using `@x402/fetch` + +```typescript +import { createX402Fetch } from "@x402/fetch"; + +const x402Fetch = createX402Fetch({ privateKey: "0x..." }); +const res = await x402Fetch("https://queryx.run/v1/search?q=test"); +const data = await res.json(); +``` + +## Response Format + +All paid endpoints return: + +```json +{ + "query": "your query", + "answer": "Synthesized answer from multiple sources...", + "sources": [ + { "title": "Source Title", "url": "https://...", "snippet": "...", "published": "2026-02-27T..." } + ], + "confidence": 0.87, + "freshness": { "fetchedAt": "2026-02-27T10:00:00Z", "resultsAge": "4h" }, + "model": "queryx-fast-v1", + "tokens": { "in": 312, "out": 187 } +} +``` + +## Error Handling + +| Status | Meaning | Action | +|--------|---------|--------| +| 400 | Invalid query/body | Fix request parameters | +| 402 | Payment required | Construct and include x402 payment | +| 429 | Rate limited | Back off and retry | +| 500 | Server error | Retry with exponential backoff | + +## Best Practices + +- **Cache responses** client-side for repeated queries (server caches for 5 min) +- **Use `/v1/search`** for quick lookups, `/v1/search/deep` for research tasks +- **Check `confidence`** — below 0.5 may indicate low-quality sources +- **Use `count` parameter** — fewer sources = faster + cheaper synthesis diff --git a/docs/PRICING.md b/docs/PRICING.md new file mode 100644 index 0000000..5e1e5f3 --- /dev/null +++ b/docs/PRICING.md @@ -0,0 +1,46 @@ +# Queryx Pricing + +## Per-Query Costs + +| Endpoint | Cost (USDC) | Cost (USD) | +|----------|-------------|------------| +| `/v1/search` | 0.001 | $0.001 | +| `/v1/search/news` | 0.001 | $0.001 | +| `/v1/search/deep` | 0.005 | $0.005 | +| `/health` | Free | Free | + +All payments are in USDC on Base Mainnet via the x402 protocol. + +## Monthly Cost Estimates + +| Monthly Queries | Search Only | Deep Only | Mixed (80/20) | +|-----------------|-------------|-----------|----------------| +| 1,000 | $1.00 | $5.00 | $1.80 | +| 10,000 | $10.00 | $50.00 | $18.00 | +| 100,000 | $100.00 | $500.00 | $180.00 | + +*Mixed assumes 80% standard search, 20% deep research.* + +## Comparison + +| Provider | Search Cost | Includes AI Synthesis | Payment | +|----------|-----------|----------------------|---------| +| **Queryx** | $0.001/query | ✅ | USDC (x402) | +| Perplexity API | $0.005/query | ✅ | Credit card | +| Tavily | $0.001/query | Partial | Credit card | +| Brave Search API | Free tier / $0.003 | ❌ | Credit card | +| SerpAPI | $0.0125/query | ❌ | Credit card | + +### Why Queryx? + +- **No signup** — pay with USDC, no API key management +- **Agent-native** — designed for AI agent consumption +- **Transparent** — pay exactly per query, no subscriptions +- **Decentralized payments** — x402 on Base, no intermediary + +## Facilitator + +All payments are validated through the x402 facilitator: +- **URL:** `https://facilitator.daydreams.systems` +- **Network:** Base Mainnet +- **Currency:** USDC (0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913) diff --git a/openapi.json b/openapi.json new file mode 100644 index 0000000..3424b09 --- /dev/null +++ b/openapi.json @@ -0,0 +1,166 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Queryx", + "version": "0.1.0", + "description": "Agent-native web search API. Pay per query in USDC via x402 on Base.", + "contact": { "url": "https://queryx.run" }, + "license": { "name": "MIT" } + }, + "servers": [ + { "url": "https://queryx.run", "description": "Production" }, + { "url": "http://localhost:3000", "description": "Local development" } + ], + "security": [{ "x402": [] }], + "components": { + "securitySchemes": { + "x402": { + "type": "apiKey", + "in": "header", + "name": "X-PAYMENT", + "description": "x402 USDC payment proof on Base Mainnet. See https://x402.org for the protocol spec. The server returns HTTP 402 with payment requirements in the response headers if this is missing or invalid." + } + }, + "schemas": { + "Source": { + "type": "object", + "required": ["title", "url", "snippet"], + "properties": { + "title": { "type": "string" }, + "url": { "type": "string", "format": "uri" }, + "snippet": { "type": "string" }, + "published": { "type": "string", "format": "date-time" } + } + }, + "Freshness": { + "type": "object", + "required": ["fetchedAt", "resultsAge"], + "properties": { + "fetchedAt": { "type": "string", "format": "date-time" }, + "resultsAge": { "type": "string", "examples": ["4h", "2d", "<1h"] } + } + }, + "Tokens": { + "type": "object", + "required": ["in", "out"], + "properties": { + "in": { "type": "integer" }, + "out": { "type": "integer" } + } + }, + "SearchResponse": { + "type": "object", + "required": ["query", "answer", "sources", "confidence", "freshness", "model", "tokens"], + "properties": { + "query": { "type": "string" }, + "answer": { "type": "string" }, + "sources": { "type": "array", "items": { "$ref": "#/components/schemas/Source" } }, + "confidence": { "type": "number", "minimum": 0, "maximum": 1 }, + "freshness": { "$ref": "#/components/schemas/Freshness" }, + "model": { "type": "string" }, + "tokens": { "$ref": "#/components/schemas/Tokens" } + } + }, + "Error": { + "type": "object", + "required": ["error", "code", "status"], + "properties": { + "error": { "type": "string" }, + "code": { "type": "string" }, + "status": { "type": "integer" } + } + }, + "Health": { + "type": "object", + "required": ["status", "version", "uptime"], + "properties": { + "status": { "type": "string", "enum": ["ok"] }, + "version": { "type": "string" }, + "uptime": { "type": "integer", "description": "Uptime in seconds" } + } + } + } + }, + "paths": { + "/v1/search": { + "get": { + "operationId": "search", + "summary": "Web search + AI synthesis", + "description": "Search the web and get an AI-synthesized answer with sources. Costs $0.001 USDC per call.", + "parameters": [ + { "name": "q", "in": "query", "required": true, "schema": { "type": "string" }, "example": "latest developments in quantum computing" }, + { "name": "count", "in": "query", "required": false, "schema": { "type": "integer", "minimum": 1, "maximum": 20, "default": 5 } } + ], + "responses": { + "200": { + "description": "Successful search response", + "content": { "application/json": { "schema": { "$ref": "#/components/schemas/SearchResponse" } } } + }, + "400": { "description": "Invalid query", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Error" } } } }, + "402": { "description": "Payment required — include x402 payment header" }, + "429": { "description": "Rate limit exceeded" }, + "500": { "description": "Internal server error" } + } + } + }, + "/v1/search/news": { + "get": { + "operationId": "searchNews", + "summary": "News-focused search + AI synthesis", + "description": "Search recent news and get an AI-synthesized answer. Costs $0.001 USDC per call.", + "parameters": [ + { "name": "q", "in": "query", "required": true, "schema": { "type": "string" }, "example": "AI regulation Europe 2026" }, + { "name": "count", "in": "query", "required": false, "schema": { "type": "integer", "minimum": 1, "maximum": 20, "default": 5 } } + ], + "responses": { + "200": { "description": "Successful news search", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/SearchResponse" } } } }, + "400": { "description": "Invalid query" }, + "402": { "description": "Payment required" }, + "429": { "description": "Rate limit exceeded" }, + "500": { "description": "Internal server error" } + } + } + }, + "/v1/search/deep": { + "post": { + "operationId": "searchDeep", + "summary": "Multi-source deep research", + "description": "Deep research across multiple sources with comprehensive synthesis. Costs $0.005 USDC per call.", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "required": ["query"], + "properties": { + "query": { "type": "string", "example": "comparison of vector databases for production RAG systems" }, + "sources": { "type": "integer", "minimum": 1, "maximum": 10, "default": 5 } + } + } + } + } + }, + "responses": { + "200": { "description": "Successful deep research", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/SearchResponse" } } } }, + "400": { "description": "Invalid request body" }, + "402": { "description": "Payment required" }, + "422": { "description": "Unprocessable query" }, + "429": { "description": "Rate limit exceeded" }, + "500": { "description": "Internal server error" } + } + } + }, + "/health": { + "get": { + "operationId": "health", + "summary": "Health check (free)", + "description": "Returns server health status. No payment required.", + "security": [], + "responses": { + "200": { "description": "Healthy", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Health" } } } } + } + } + } + } +} diff --git a/package.json b/package.json index d5cd7f0..401ab01 100644 --- a/package.json +++ b/package.json @@ -8,5 +8,14 @@ }, "devDependencies": { "@types/bun": "latest" + }, + "dependencies": { + "@lucid-agents/core": "^2.5.0", + "@lucid-agents/hono": "^0.9.6", + "@lucid-agents/http": "^1.10.2", + "@lucid-agents/payments": "^2.5.0", + "@lucid-agents/wallet": "^0.6.2", + "hono": "^4.12.3", + "zod": "^3.25.0-beta.20250519T094321" } } diff --git a/railway.json b/railway.json new file mode 100644 index 0000000..e1c0d59 --- /dev/null +++ b/railway.json @@ -0,0 +1,10 @@ +{ + "$schema": "https://railway.app/railway.schema.json", + "build": { "builder": "DOCKERFILE" }, + "deploy": { + "healthcheckPath": "/health", + "healthcheckTimeout": 10, + "restartPolicyType": "ON_FAILURE", + "restartPolicyMaxRetries": 3 + } +} diff --git a/scripts/smoke-test.sh b/scripts/smoke-test.sh new file mode 100755 index 0000000..c127a9d --- /dev/null +++ b/scripts/smoke-test.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Queryx post-deploy smoke test +# Usage: ./scripts/smoke-test.sh [BASE_URL] + +set -euo pipefail + +BASE_URL="${1:-http://localhost:3000}" +PASS=0 +FAIL=0 + +check() { + local desc="$1" expected="$2" actual="$3" + if [ "$expected" = "$actual" ]; then + echo "✅ $desc (got $actual)" + ((PASS++)) + else + echo "❌ $desc (expected $expected, got $actual)" + ((FAIL++)) + fi +} + +echo "🔍 Smoke testing $BASE_URL" +echo "---" + +# 1. Health returns 200 +STATUS=$(curl -s -o /dev/null -w '%{http_code}' "$BASE_URL/health") +check "/health returns 200" "200" "$STATUS" + +# 2. Health body has status: ok +BODY=$(curl -s "$BASE_URL/health") +if echo "$BODY" | grep -q '"status":"ok"'; then + echo "✅ /health body contains status:ok" + ((PASS++)) +else + echo "❌ /health body missing status:ok — got: $BODY" + ((FAIL++)) +fi + +# 3. Search returns 402 without payment +STATUS=$(curl -s -o /dev/null -w '%{http_code}' "$BASE_URL/v1/search?q=test") +check "/v1/search returns 402 without payment" "402" "$STATUS" + +# 4. News returns 402 +STATUS=$(curl -s -o /dev/null -w '%{http_code}' "$BASE_URL/v1/search/news?q=test") +check "/v1/search/news returns 402 without payment" "402" "$STATUS" + +# 5. Deep returns 402 +STATUS=$(curl -s -o /dev/null -w '%{http_code}' -X POST -H 'Content-Type: application/json' -d '{"query":"test"}' "$BASE_URL/v1/search/deep") +check "/v1/search/deep returns 402 without payment" "402" "$STATUS" + +echo "---" +echo "Results: $PASS passed, $FAIL failed" + +[ "$FAIL" -eq 0 ] && exit 0 || exit 1 diff --git a/src/agent.ts b/src/agent.ts new file mode 100644 index 0000000..33770fc --- /dev/null +++ b/src/agent.ts @@ -0,0 +1,58 @@ +/** + * Queryx Lucid Agent — x402 paid API with TDD. + * Core agent setup with extensions and entrypoints. + */ +import { createAgent } from "@lucid-agents/core"; +import { payments, paymentsFromEnv } from "@lucid-agents/payments"; +import { z } from "zod"; +import { + SearchQuerySchema, + DeepSearchBodySchema, + SearchResponseSchema, +} from "./schemas"; + +const NETWORK = process.env.NETWORK || "base"; + +const paymentsConfig = paymentsFromEnv({ + network: NETWORK, +}); + +const paymentsExt = payments({ config: paymentsConfig }); + +export const runtime = await createAgent({ + name: "queryx", + url: `http://localhost:${process.env.PORT || 3000}`, + version: "0.1.0", + description: + "AI-powered web search agent accepting x402 USDC micropayments on Base.", + capabilities: { + streaming: false, + pushNotifications: false, + }, +}) + .use(paymentsExt) + .addEntrypoint({ + key: "search", + title: "Web Search", + description: "Web search + AI synthesis", + input: SearchQuerySchema, + output: SearchResponseSchema, + invoke: { price: { amount: "0.001", currency: "USDC" } }, + }) + .addEntrypoint({ + key: "search-news", + title: "News Search", + description: "News-focused search + AI synthesis", + input: SearchQuerySchema, + output: SearchResponseSchema, + invoke: { price: { amount: "0.001", currency: "USDC" } }, + }) + .addEntrypoint({ + key: "search-deep", + title: "Deep Research", + description: "Multi-source deep research + AI synthesis", + input: DeepSearchBodySchema, + output: SearchResponseSchema, + invoke: { price: { amount: "0.005", currency: "USDC" } }, + }) + .build(); diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 0000000..1f4b825 --- /dev/null +++ b/src/index.ts @@ -0,0 +1,37 @@ +/** + * Queryx server entrypoint. + * Uses Lucid Agents Hono adapter with x402 payment middleware. + */ +import { createAgentApp } from "@lucid-agents/hono"; +import { Hono } from "hono"; +import { runtime } from "./agent"; +import searchRoute from "./routes/search"; +import searchNewsRoute from "./routes/search-news"; +import searchDeepRoute from "./routes/search-deep"; + +const startTime = Date.now(); + +const { app } = await createAgentApp(runtime, { + afterMount(honoApp: Hono) { + // Mount custom routes after agent routes + honoApp.route("/v1/search/news", searchNewsRoute); + honoApp.route("/v1/search/deep", searchDeepRoute); + honoApp.route("/v1/search", searchRoute); + + // Health endpoint (free, no payment required) + honoApp.get("/health", (c) => + c.json({ + status: "ok" as const, + version: "0.1.0", + uptime: Math.floor((Date.now() - startTime) / 1000), + }) + ); + }, +}); + +const port = Number(process.env.PORT || 3000); + +export default { + port, + fetch: app.fetch, +}; diff --git a/src/logic/search.ts b/src/logic/search.ts new file mode 100644 index 0000000..1a104e3 --- /dev/null +++ b/src/logic/search.ts @@ -0,0 +1,84 @@ +/** + * Query handling + source coordination. + * Orchestrates brave search, ranking, synthesis, and caching. + */ +import { braveSearch, type SearchResult, type BraveSearchOptions } from "./brave"; +import { rank as rankAndDeduplicate } from "./rank"; +import { synthesise as synthesize } from "./synth"; +import { Cache } from "./cache"; +import type { SearchResponse } from "../schemas"; + +const cache = new Cache( + Number(process.env.CACHE_TTL_SECONDS || 300) * 1000 +); + + +export function normalizeQuery(q: string): string { + return q.trim().toLowerCase().replace(/\s+/g, " "); +} + +function computeResultsAge(sources: SearchResult[]): string { + if (!sources.length) return "unknown"; + const now = Date.now(); + const published = sources + .filter((s) => s.published) + .map((s) => new Date(s.published!).getTime()) + .filter((t) => !isNaN(t)); + if (!published.length) return "unknown"; + const newest = Math.max(...published); + const diffMs = now - newest; + const hours = Math.round(diffMs / 3600000); + if (hours < 1) return "<1h"; + if (hours < 24) return `${hours}h`; + return `${Math.round(hours / 24)}d`; +} + +export interface SearchOptions { + type?: "web" | "news"; + count?: number; + deep?: boolean; +} + +export async function search( + query: string, + options: SearchOptions = {} +): Promise { + const normalized = normalizeQuery(query); + const cacheKey = `${options.type || "web"}:${options.deep ? "deep:" : ""}${normalized}`; + + const cached = cache.get(cacheKey); + if (cached) return cached.value; + + const braveOpts: BraveSearchOptions = { + count: options.count || 5, + type: options.type || "web", + }; + if (options.type === "news") braveOpts.freshness = "day"; + + const rawResults = await braveSearch(normalized, braveOpts); + const ranked = rankAndDeduplicate(rawResults); + const synthResult = await synthesize(normalized, ranked); + + const response: SearchResponse = { + query, + answer: synthResult.answer, + sources: ranked.map((r) => ({ + title: r.title, + url: r.url, + snippet: r.snippet, + ...(r.published ? { published: r.published } : {}), + })), + confidence: synthResult.confidence, + freshness: { + fetchedAt: new Date().toISOString(), + resultsAge: computeResultsAge(ranked), + }, + model: synthResult.model, + tokens: synthResult.tokens, + }; + + cache.set(cacheKey, response); + return response; +} + +export { cache }; diff --git a/src/routes/search-deep.ts b/src/routes/search-deep.ts new file mode 100644 index 0000000..5ea24bf --- /dev/null +++ b/src/routes/search-deep.ts @@ -0,0 +1,32 @@ +/** + * POST /v1/search/deep — multi-source deep research + */ +import { Hono } from "hono"; +import { DeepSearchBodySchema } from "../schemas"; +import { search } from "../logic/search"; + +const app = new Hono(); + +app.post("/", async (c) => { + const body = await c.req.json().catch(() => null); + if (!body) { + return c.json( + { error: "Invalid JSON body", code: "INVALID_BODY", status: 400 }, + 400 + ); + } + const parsed = DeepSearchBodySchema.safeParse(body); + if (!parsed.success) { + return c.json( + { error: "Invalid request", code: "INVALID_BODY", status: 400 }, + 400 + ); + } + const result = await search(parsed.data.query, { + deep: true, + count: parsed.data.sources, + }); + return c.json(result); +}); + +export default app; diff --git a/src/routes/search-news.ts b/src/routes/search-news.ts new file mode 100644 index 0000000..7c84250 --- /dev/null +++ b/src/routes/search-news.ts @@ -0,0 +1,28 @@ +/** + * GET /v1/search/news — news-focused search + */ +import { Hono } from "hono"; +import { SearchQuerySchema } from "../schemas"; +import { search } from "../logic/search"; + +const app = new Hono(); + +app.get("/", async (c) => { + const parsed = SearchQuerySchema.safeParse({ + q: c.req.query("q"), + count: c.req.query("count"), + }); + if (!parsed.success) { + return c.json( + { error: "Invalid query", code: "INVALID_QUERY", status: 400 }, + 400 + ); + } + const result = await search(parsed.data.q, { + type: "news", + count: parsed.data.count, + }); + return c.json(result); +}); + +export default app; diff --git a/src/routes/search.ts b/src/routes/search.ts new file mode 100644 index 0000000..467f684 --- /dev/null +++ b/src/routes/search.ts @@ -0,0 +1,25 @@ +/** + * GET /v1/search — web search + AI synthesis + */ +import { Hono } from "hono"; +import { SearchQuerySchema } from "../schemas"; +import { search } from "../logic/search"; + +const app = new Hono(); + +app.get("/", async (c) => { + const parsed = SearchQuerySchema.safeParse({ + q: c.req.query("q"), + count: c.req.query("count"), + }); + if (!parsed.success) { + return c.json( + { error: "Invalid query", code: "INVALID_QUERY", status: 400 }, + 400 + ); + } + const result = await search(parsed.data.q, { count: parsed.data.count }); + return c.json(result); +}); + +export default app; diff --git a/src/schemas/index.ts b/src/schemas/index.ts new file mode 100644 index 0000000..3caa3f9 --- /dev/null +++ b/src/schemas/index.ts @@ -0,0 +1,57 @@ +/** + * Zod v4 schemas for all Queryx API endpoints. + */ +import { z } from "zod"; + +export const SourceSchema = z.object({ + title: z.string(), + url: z.string().url(), + snippet: z.string(), + published: z.string().optional(), +}); + +export const FreshnessSchema = z.object({ + fetchedAt: z.string(), + resultsAge: z.string(), +}); + +export const TokensSchema = z.object({ + in: z.number().int().nonnegative(), + out: z.number().int().nonnegative(), +}); + +export const SearchResponseSchema = z.object({ + query: z.string(), + answer: z.string(), + sources: z.array(SourceSchema), + confidence: z.number().min(0).max(1), + freshness: FreshnessSchema, + model: z.string(), + tokens: TokensSchema, +}); + +export const SearchQuerySchema = z.object({ + q: z.string().min(1), + count: z.coerce.number().int().min(1).max(20).optional().default(5), +}); + +export const DeepSearchBodySchema = z.object({ + query: z.string().min(1), + sources: z.number().int().min(1).max(10).optional().default(5), +}); + +export const ErrorSchema = z.object({ + error: z.string(), + code: z.string(), + status: z.number(), +}); + +export const HealthSchema = z.object({ + status: z.literal("ok"), + version: z.string(), + uptime: z.number(), +}); + +export type SearchResponse = z.infer; +export type Source = z.infer; +export type ErrorResponse = z.infer; diff --git a/tests/contract/schemas.test.ts b/tests/contract/schemas.test.ts new file mode 100644 index 0000000..9fed958 --- /dev/null +++ b/tests/contract/schemas.test.ts @@ -0,0 +1,136 @@ +/** + * Contract tests — all request/response schemas, error envelopes. + */ +import { describe, test, expect } from "bun:test"; +import { + SearchResponseSchema, + SearchQuerySchema, + DeepSearchBodySchema, + ErrorSchema, + HealthSchema, + SourceSchema, +} from "../../src/schemas"; + +describe("SearchQuerySchema", () => { + test("accepts valid query", () => { + const result = SearchQuerySchema.safeParse({ q: "test query" }); + expect(result.success).toBe(true); + }); + + test("rejects empty query", () => { + const result = SearchQuerySchema.safeParse({ q: "" }); + expect(result.success).toBe(false); + }); + + test("defaults count to 5", () => { + const result = SearchQuerySchema.parse({ q: "test" }); + expect(result.count).toBe(5); + }); + + test("coerces string count", () => { + const result = SearchQuerySchema.parse({ q: "test", count: "3" }); + expect(result.count).toBe(3); + }); +}); + +describe("DeepSearchBodySchema", () => { + test("accepts valid body", () => { + const result = DeepSearchBodySchema.safeParse({ query: "deep test" }); + expect(result.success).toBe(true); + }); + + test("defaults sources to 5", () => { + const result = DeepSearchBodySchema.parse({ query: "test" }); + expect(result.sources).toBe(5); + }); + + test("rejects sources > 10", () => { + const result = DeepSearchBodySchema.safeParse({ query: "t", sources: 15 }); + expect(result.success).toBe(false); + }); +}); + +describe("SearchResponseSchema", () => { + const validResponse = { + query: "test", + answer: "This is an answer.", + sources: [ + { title: "Source 1", url: "https://example.com", snippet: "snippet" }, + ], + confidence: 0.87, + freshness: { fetchedAt: "2026-02-27T10:00:00Z", resultsAge: "4h" }, + model: "queryx-fast-v1", + tokens: { in: 312, out: 187 }, + }; + + test("accepts valid response", () => { + const result = SearchResponseSchema.safeParse(validResponse); + expect(result.success).toBe(true); + }); + + test("rejects confidence > 1", () => { + const result = SearchResponseSchema.safeParse({ + ...validResponse, + confidence: 1.5, + }); + expect(result.success).toBe(false); + }); + + test("rejects missing freshness", () => { + const { freshness, ...rest } = validResponse; + const result = SearchResponseSchema.safeParse(rest); + expect(result.success).toBe(false); + }); +}); + +describe("SourceSchema", () => { + test("accepts source with published date", () => { + const result = SourceSchema.safeParse({ + title: "Test", + url: "https://example.com", + snippet: "A snippet", + published: "2026-02-27T10:00:00Z", + }); + expect(result.success).toBe(true); + }); + + test("accepts source without published date", () => { + const result = SourceSchema.safeParse({ + title: "Test", + url: "https://example.com", + snippet: "A snippet", + }); + expect(result.success).toBe(true); + }); +}); + +describe("ErrorSchema", () => { + test("accepts valid error", () => { + const result = ErrorSchema.safeParse({ + error: "Not found", + code: "NOT_FOUND", + status: 404, + }); + expect(result.success).toBe(true); + }); +}); + +describe("HealthSchema", () => { + test("accepts valid health", () => { + const result = HealthSchema.safeParse({ + status: "ok", + version: "0.1.0", + uptime: 3600, + }); + expect(result.success).toBe(true); + }); + + test("rejects wrong status literal", () => { + const result = HealthSchema.safeParse({ + status: "error", + version: "0.1.0", + uptime: 0, + }); + expect(result.success).toBe(false); + }); +}); diff --git a/tests/freshness/cache-freshness.test.ts b/tests/freshness/cache-freshness.test.ts new file mode 100644 index 0000000..0442475 --- /dev/null +++ b/tests/freshness/cache-freshness.test.ts @@ -0,0 +1,50 @@ +/** + * Freshness/cache tests — TTL behaviour, staleness metadata. + * Note: Cache constructor takes ttlSeconds (multiplied by 1000 internally). + */ +import { describe, test, expect } from "bun:test"; +import { Cache } from "../../src/logic/cache"; + +describe("Cache TTL", () => { + test("returns cached value within TTL", () => { + const cache = new Cache(60); // 60 seconds + cache.set("key", "value"); + const result = cache.get("key"); + expect(result).not.toBeNull(); + expect(result!.value).toBe("value"); + expect(result!.stale).toBe(false); + }); + + test("returns null after TTL expires", async () => { + const cache = new Cache(0.001); // 1ms = 0.001 seconds + cache.set("key", "value"); + await new Promise((r) => setTimeout(r, 20)); + expect(cache.get("key")).toBeNull(); + }); + + test("tracks hit/miss stats", () => { + const cache = new Cache(60); + cache.set("a", "1"); + cache.get("a"); // hit + cache.get("b"); // miss + const s = cache.stats(); + expect(s.hits).toBe(1); + expect(s.misses).toBe(1); + }); + + test("clear empties cache", () => { + const cache = new Cache(60); + cache.set("a", "1"); + cache.set("b", "2"); + cache.clear(); + expect(cache.get("a")).toBeNull(); + expect(cache.stats().size).toBe(0); + }); + + test("normalizeKey produces stable keys", () => { + expect(Cache.normalizeKey("Hello World")).toBe("hello world"); + expect(Cache.normalizeKey("test", { b: "2", a: "1" })).toBe( + "test|a=1&b=2" + ); + }); +}); diff --git a/tests/integration/endpoints.test.ts b/tests/integration/endpoints.test.ts new file mode 100644 index 0000000..a6b4c9f --- /dev/null +++ b/tests/integration/endpoints.test.ts @@ -0,0 +1,52 @@ +/** + * Integration tests — endpoint responses, error handling. + * Tests route handling without the payment middleware (unit-level integration). + */ +import { describe, test, expect } from "bun:test"; +import searchRoute from "../../src/routes/search"; +import searchNewsRoute from "../../src/routes/search-news"; +import searchDeepRoute from "../../src/routes/search-deep"; + +describe("GET /v1/search", () => { + test("returns 400 for missing query", async () => { + const req = new Request("http://localhost/"); + const res = await searchRoute.fetch(req); + expect(res.status).toBe(400); + const body = await res.json(); + expect(body.code).toBe("INVALID_QUERY"); + }); +}); + +describe("GET /v1/search/news", () => { + test("returns 400 for missing query", async () => { + const req = new Request("http://localhost/"); + const res = await searchNewsRoute.fetch(req); + expect(res.status).toBe(400); + const body = await res.json(); + expect(body.code).toBe("INVALID_QUERY"); + }); +}); + +describe("POST /v1/search/deep", () => { + test("returns 400 for invalid JSON", async () => { + const req = new Request("http://localhost/", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: "not json", + }); + const res = await searchDeepRoute.fetch(req); + expect(res.status).toBe(400); + const body = await res.json(); + expect(body.code).toBe("INVALID_BODY"); + }); + + test("returns 400 for missing query field", async () => { + const req = new Request("http://localhost/", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ sources: 3 }), + }); + const res = await searchDeepRoute.fetch(req); + expect(res.status).toBe(400); + }); +}); diff --git a/tests/logic/search-logic.test.ts b/tests/logic/search-logic.test.ts new file mode 100644 index 0000000..95102d3 --- /dev/null +++ b/tests/logic/search-logic.test.ts @@ -0,0 +1,27 @@ +/** + * Logic tests — query normalization, search coordination. + */ +import { describe, test, expect } from "bun:test"; +import { normalizeQuery } from "../../src/logic/search"; + +describe("normalizeQuery", () => { + test("trims whitespace", () => { + expect(normalizeQuery(" hello ")).toBe("hello"); + }); + + test("lowercases", () => { + expect(normalizeQuery("Hello World")).toBe("hello world"); + }); + + test("collapses multiple spaces", () => { + expect(normalizeQuery("hello world")).toBe("hello world"); + }); + + test("handles mixed", () => { + expect(normalizeQuery(" Hello WORLD ")).toBe("hello world"); + }); + + test("empty string stays empty", () => { + expect(normalizeQuery("")).toBe(""); + }); +});