-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env_coscientist
More file actions
126 lines (106 loc) · 3.02 KB
/
.env_coscientist
File metadata and controls
126 lines (106 loc) · 3.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Application
APP_NAME=AI-CoScientist
APP_VERSION=0.1.0
ENVIRONMENT=development
DEBUG=true
LOG_LEVEL=INFO
# API
API_HOST=0.0.0.0
API_PORT=8000
API_WORKERS=4
API_RELOAD=true
# Security
SECRET_KEY=ai-coscientist-secret-key-development-only-change-in-production-32chars
ALGORITHM=HS256
ACCESS_TOKEN_EXPIRE_MINUTES=60
REFRESH_TOKEN_EXPIRE_DAYS=7
# Database - Docker Compose Configuration
POSTGRES_USER=postgres
POSTGRES_PASSWORD=local_dev_password
POSTGRES_DB=ai_coscientist
POSTGRES_PORT=5434
DATABASE_URL=postgresql+asyncpg://postgres:local_dev_password@localhost:5434/ai_coscientist
DATABASE_ECHO=false
DATABASE_POOL_SIZE=5
DATABASE_MAX_OVERFLOW=10
# Redis - Docker Compose Configuration
REDIS_PORT=6380
REDIS_PASSWORD=
REDIS_URL=redis://localhost:6380/0
REDIS_CACHE_TTL=3600
REDIS_MAX_CONNECTIONS=10
# RabbitMQ
RABBITMQ_URL=amqp://guest:guest@localhost:5672/
RABBITMQ_EXCHANGE=ai_coscientist
RABBITMQ_QUEUE=tasks
# LLM Providers
OPENAI_API_KEY=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
OPENAI_MODEL=gpt-5-pro
OPENAI_MAX_TOKENS=4000
OPENAI_TEMPERATURE=0.7
# LLM Providers - Google Gemini
GOOGLE_API_KEY=AIzaSyAYyMV4f1DpTbINDeziSb-RuBl-PFuqFrI
GEMINI_API_KEY=AIzaSyD2dm3wYykmi8Mesk_JHlur5d3ICHu7sp8
GEMINI_MODEL=gemini-3-flash-preview
GEMINI_FALLBACK_MODEL=gemini-2.0-flash-exp
GEMINI_MAX_TOKENS=8192 # 🆕 Gemini 3.0 supports 8K+
# LLM Configuration
LLM_PRIMARY_PROVIDER=gemini
LLM_FALLBACK_PROVIDER=openai
LLM_CACHE_ENABLED=true
LLM_CACHE_TTL=3600
LLM_MAX_RETRIES=3
LLM_TIMEOUT=60
# Vector Database
CHROMADB_HOST=localhost
CHROMADB_PORT=8001
CHROMADB_COLLECTION=scientific_papers
EMBEDDING_MODEL=allenai/scibert_scivocab_uncased
EMBEDDING_DIMENSION=384
# External APIs
SEMANTIC_SCHOLAR_API_KEY=
CROSSREF_EMAIL=research@ai-coscientist.com
# Celery
CELERY_BROKER_URL=redis://localhost:6380/1
CELERY_RESULT_BACKEND=redis://localhost:6380/2
CELERY_TASK_ALWAYS_EAGER=false
# Monitoring
PROMETHEUS_PORT=9090
ENABLE_METRICS=true
GRAFANA_PORT=3001
GRAFANA_USER=admin
GRAFANA_PASSWORD=local_dev_grafana
GRAFANA_ROOT_URL=http://localhost:3001
GRAFANA_PLUGINS=
# CORS (using defaults from config.py)
# CORS_ORIGINS=["http://localhost:3001","http://localhost:8000"]
# CORS_ALLOW_CREDENTIALS=true
# CORS_ALLOW_METHODS=["*"]
# CORS_ALLOW_HEADERS=["*"]
# Rate Limiting
RATE_LIMIT_ENABLED=true
RATE_LIMIT_PER_MINUTE=100
# Storage
UPLOAD_DIR=./uploads
MAX_UPLOAD_SIZE=10485760 # 10MB
# RL System Configuration
RL_ENABLED=true
RL_AB_TESTING_ENABLED=true
RL_INITIAL_TRAFFIC_PCT=10
RL_MAX_TRAFFIC_PCT=90
RL_MODEL_PATH=./models/rl_agent_selection
RL_CONFIG_PATH=./config/rl/rl_system_config.yaml
RL_TRAINING_ENABLED=true
RL_PERFORMANCE_THRESHOLD=0.8
# Email (optional)
SMTP_HOST=smtp.gmail.com
SMTP_PORT=587
SMTP_USER=
SMTP_PASSWORD=
SMTP_FROM=noreply@ai-coscientist.com
TAVILY_API_KEY=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
# DeepSeek API
DEEPSEEK_MODEL=deepseek-chat
# DeepSeek API
DEEPSEEK_MODEL=deepseek-chat
DEEPSEEK_API_KEY=sk-871ea77c3bde47b6ac06bd671dcb96ff