@@ -669,7 +669,7 @@ def __init__(
669669 self .debugger = None
670670
671671 # Initialize LLM client
672- # Note: Model selection is handled by LLMMultiAgentPolicy, so model should always be provided
672+ # Note: API key validation is handled by LLMMultiAgentPolicy before creating agent policies
673673 if self .provider == "openai" :
674674 from openai import OpenAI
675675
@@ -1170,6 +1170,30 @@ def __init__(
11701170 self .context_window_size = context_window_size
11711171 self .mg_cfg = mg_cfg
11721172
1173+ # Check API key before model selection for paid providers
1174+ if provider == "openai" and not os .getenv ("OPENAI_API_KEY" ):
1175+ print (
1176+ "\n \033 [1;31mError:\033 [0m OPENAI_API_KEY environment variable is not set.\n \n "
1177+ "To use OpenAI GPT models, you need to:\n "
1178+ " 1. Get an API key from https://platform.openai.com/api-keys\n "
1179+ " 2. Export it in your terminal:\n "
1180+ " export OPENAI_API_KEY='your-api-key-here'\n \n "
1181+ "Alternatively, use local Ollama (free):\n "
1182+ " cogames play -m <mission> -p class=llm-ollama\n "
1183+ )
1184+ sys .exit (1 )
1185+ elif provider == "anthropic" and not os .getenv ("ANTHROPIC_API_KEY" ):
1186+ print (
1187+ "\n \033 [1;31mError:\033 [0m ANTHROPIC_API_KEY environment variable is not set.\n \n "
1188+ "To use Anthropic Claude models, you need to:\n "
1189+ " 1. Get an API key from https://console.anthropic.com/settings/keys\n "
1190+ " 2. Export it in your terminal:\n "
1191+ " export ANTHROPIC_API_KEY='your-api-key-here'\n \n "
1192+ "Alternatively, use local Ollama (free):\n "
1193+ " cogames play -m <mission> -p class=llm-ollama\n "
1194+ )
1195+ sys .exit (1 )
1196+
11731197 # Select model once for all agents if not specified
11741198 if model is None :
11751199 if provider == "openai" :
0 commit comments