diff --git a/conf/openrouter_models.json b/conf/openrouter_models.json index e3b929db6..7e3d878e8 100644 --- a/conf/openrouter_models.json +++ b/conf/openrouter_models.json @@ -471,6 +471,43 @@ "description": "GPT-5.1 Codex mini (400K context, 128K output) - Cost-efficient Codex variant with streaming support", "intelligence_score": 16 }, + { + "model_name": "moonshotai/kimi-k2-thinking", + "aliases": [ + "kimi", + "kimi-k2", + "kimi-thinking", + "kimi-k2-thinking" + ], + "context_window": 262144, + "max_output_tokens": 65536, + "supports_extended_thinking": false, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": false, + "max_image_size_mb": 0.0, + "allow_code_generation": true, + "description": "MoonshotAI Kimi K2 Thinking via OpenRouter (262,144 context)", + "intelligence_score": 15 + }, + { + "model_name": "z-ai/glm-4.7", + "aliases": [ + "glm", + "glm-4.7", + "glm47" + ], + "context_window": 202752, + "max_output_tokens": 65536, + "supports_extended_thinking": false, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": false, + "max_image_size_mb": 0.0, + "allow_code_generation": true, + "description": "Z.AI GLM 4.7 via OpenRouter (202,752 context)", + "intelligence_score": 13 + }, { "model_name": "x-ai/grok-4", "aliases": [