fix(config): correct MiniMax M2.7 highspeed model name and add thinking support (#1596)

* fix(config): correct MiniMax M2.7 highspeed model name and add thinking support

- Rename minimax-m2.5-highspeed to minimax-m2.7-highspeed for CN region
- Add supports_thinking: true for both M2.7 and M2.7-highspeed models

* Add supports_thinking option to config examples

Added support_thinking configuration option in examples.

---------

Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
This commit is contained in:
finallylly 2026-03-30 11:13:47 +08:00 committed by GitHub
parent c5034c03c7
commit ef58bb8d3c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -188,6 +188,7 @@ models:
# max_tokens: 4096 # max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0] # temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true # supports_vision: true
# supports_thinking: true
# - name: minimax-m2.5-highspeed # - name: minimax-m2.5-highspeed
# display_name: MiniMax M2.5 Highspeed # display_name: MiniMax M2.5 Highspeed
@ -200,6 +201,7 @@ models:
# max_tokens: 4096 # max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0] # temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true # supports_vision: true
# supports_thinking: true
# Example: MiniMax (OpenAI-compatible) - CN 中国区用户 # Example: MiniMax (OpenAI-compatible) - CN 中国区用户
# MiniMax provides high-performance models with 204K context window # MiniMax provides high-performance models with 204K context window
@ -215,11 +217,12 @@ models:
# max_tokens: 4096 # max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0] # temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true # supports_vision: true
# supports_thinking: true
# - name: minimax-m2.5-highspeed # - name: minimax-m2.7-highspeed
# display_name: MiniMax M2.5 Highspeed # display_name: MiniMax M2.7 Highspeed
# use: langchain_openai:ChatOpenAI # use: langchain_openai:ChatOpenAI
# model: MiniMax-M2.5-highspeed # model: MiniMax-M2.7-highspeed
# api_key: $MINIMAX_API_KEY # api_key: $MINIMAX_API_KEY
# base_url: https://api.minimaxi.com/v1 # base_url: https://api.minimaxi.com/v1
# request_timeout: 600.0 # request_timeout: 600.0
@ -227,6 +230,7 @@ models:
# max_tokens: 4096 # max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0] # temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true # supports_vision: true
# supports_thinking: true
# Example: OpenRouter (OpenAI-compatible) # Example: OpenRouter (OpenAI-compatible)