From bb39424e99b7774f6748961dd8dd8d315cc92977 Mon Sep 17 00:00:00 2001 From: Richard Tang Date: Sun, 19 Apr 2026 15:19:26 -0700 Subject: [PATCH] chore: update model context config --- core/framework/llm/model_catalog.json | 31 +++++++++++---------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/core/framework/llm/model_catalog.json b/core/framework/llm/model_catalog.json index fca7b8c7..a88f5538 100644 --- a/core/framework/llm/model_catalog.json +++ b/core/framework/llm/model_catalog.json @@ -115,13 +115,6 @@ "max_tokens": 40960, "max_context_tokens": 131072 }, - { - "id": "llama3.1-8b", - "label": "Llama 3.1 8B - Fastest production", - "recommended": false, - "max_tokens": 8192, - "max_context_tokens": 32768 - }, { "id": "zai-glm-4.7", "label": "Z.ai GLM 4.7 - Strong coding preview", @@ -145,15 +138,15 @@ "id": "MiniMax-M2.7", "label": "MiniMax M2.7 - Best coding quality", "recommended": true, - "max_tokens": 32768, - "max_context_tokens": 204800 + "max_tokens": 40960, + "max_context_tokens": 180000 }, { "id": "MiniMax-M2.5", "label": "MiniMax M2.5 - Strong value", "recommended": false, - "max_tokens": 32768, - "max_context_tokens": 204800 + "max_tokens": 40960, + "max_context_tokens": 180000 } ] }, @@ -316,7 +309,7 @@ "label": "Qwen 3.6 Plus - Strong reasoning", "recommended": true, "max_tokens": 32768, - "max_context_tokens": 131072 + "max_context_tokens": 240000 }, { "id": "z-ai/glm-5v-turbo", @@ -329,15 +322,15 @@ "id": "z-ai/glm-5.1", "label": "GLM-5.1 - Better but Slower", "recommended": true, - "max_tokens": 32768, + "max_tokens": 40960, "max_context_tokens": 192000 }, { - "id": "x-ai/grok-4.20", - "label": "Grok 4.20 - xAI flagship", + "id": "minimax/minimax-m2.7", + "label": "Minimax M2.7 - Minimax flagship", "recommended": false, - "max_tokens": 32768, - "max_context_tokens": 131072 + "max_tokens": 40960, + "max_context_tokens": 180000 }, { "id": "xiaomi/mimo-v2-pro", @@ -375,8 +368,8 @@ "provider": "minimax", "api_key_env_var": "MINIMAX_API_KEY", "model": "MiniMax-M2.7", - "max_tokens": 32768, - "max_context_tokens": 204800, + "max_tokens": 40960, + "max_context_tokens": 180800, "api_base": "https://api.minimax.io/v1" }, "kimi_code": {