chore: update model context config

This commit is contained in:
Richard Tang
2026-04-19 15:19:26 -07:00
parent b27c7a029e
commit bb39424e99
+12 -19
View File
@@ -115,13 +115,6 @@
"max_tokens": 40960,
"max_context_tokens": 131072
},
{
"id": "llama3.1-8b",
"label": "Llama 3.1 8B - Fastest production",
"recommended": false,
"max_tokens": 8192,
"max_context_tokens": 32768
},
{
"id": "zai-glm-4.7",
"label": "Z.ai GLM 4.7 - Strong coding preview",
@@ -145,15 +138,15 @@
"id": "MiniMax-M2.7",
"label": "MiniMax M2.7 - Best coding quality",
"recommended": true,
"max_tokens": 32768,
"max_context_tokens": 204800
"max_tokens": 40960,
"max_context_tokens": 180000
},
{
"id": "MiniMax-M2.5",
"label": "MiniMax M2.5 - Strong value",
"recommended": false,
"max_tokens": 32768,
"max_context_tokens": 204800
"max_tokens": 40960,
"max_context_tokens": 180000
}
]
},
@@ -316,7 +309,7 @@
"label": "Qwen 3.6 Plus - Strong reasoning",
"recommended": true,
"max_tokens": 32768,
"max_context_tokens": 131072
"max_context_tokens": 240000
},
{
"id": "z-ai/glm-5v-turbo",
@@ -329,15 +322,15 @@
"id": "z-ai/glm-5.1",
"label": "GLM-5.1 - Better but Slower",
"recommended": true,
"max_tokens": 32768,
"max_tokens": 40960,
"max_context_tokens": 192000
},
{
"id": "x-ai/grok-4.20",
"label": "Grok 4.20 - xAI flagship",
"id": "minimax/minimax-m2.7",
"label": "Minimax M2.7 - Minimax flagship",
"recommended": false,
"max_tokens": 32768,
"max_context_tokens": 131072
"max_tokens": 40960,
"max_context_tokens": 180000
},
{
"id": "xiaomi/mimo-v2-pro",
@@ -375,8 +368,8 @@
"provider": "minimax",
"api_key_env_var": "MINIMAX_API_KEY",
"model": "MiniMax-M2.7",
"max_tokens": 32768,
"max_context_tokens": 204800,
"max_tokens": 40960,
"max_context_tokens": 180800,
"api_base": "https://api.minimax.io/v1"
},
"kimi_code": {