docs(config): add timeout and max_retries examples for model providers (#1549)

Added explicit timeout and retry configurations to `config.example.yaml` to help users properly configure their model connections.

Since different LangChain provider classes use different parameter names, this update maps the correct arguments for each:
- ChatOpenAI (OpenAI, MiniMax, Novita, OpenRouter): added `request_timeout` and `max_retries`
- ChatAnthropic (Claude): added `default_request_timeout` and `max_retries`
- ChatGoogleGenerativeAI (Gemini): added `timeout` and `max_retries`
- PatchedChatDeepSeek (Doubao, DeepSeek, Kimi): added `timeout` and `max_retries`

Default example values are set to 600.0 seconds for timeouts and 2 for max retries.
This commit is contained in:
knukn 2026-03-29 19:29:55 +08:00 committed by GitHub
parent 70e9f2dd2c
commit 6091ba83c4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -41,6 +41,8 @@ models:
# model: doubao-seed-1-8-251228
# api_base: https://ark.cn-beijing.volces.com/api/v3
# api_key: $VOLCENGINE_API_KEY
# timeout: 600.0
# max_retries: 2
# supports_thinking: true
# supports_vision: true
# supports_reasoning_effort: true
@ -55,6 +57,8 @@ models:
# use: langchain_openai:ChatOpenAI
# model: gpt-4
# api_key: $OPENAI_API_KEY # Use environment variable
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 4096
# temperature: 0.7
# supports_vision: true # Enable vision support for view_image tool
@ -65,6 +69,8 @@ models:
# use: langchain_openai:ChatOpenAI
# model: gpt-5
# api_key: $OPENAI_API_KEY
# request_timeout: 600.0
# max_retries: 2
# use_responses_api: true
# output_version: responses/v1
# supports_vision: true
@ -75,6 +81,8 @@ models:
# use: langchain_anthropic:ChatAnthropic
# model: claude-3-5-sonnet-20241022
# api_key: $ANTHROPIC_API_KEY
# default_request_timeout: 600.0
# max_retries: 2
# max_tokens: 8192
# supports_vision: true # Enable vision support for view_image tool
# when_thinking_enabled:
@ -87,6 +95,8 @@ models:
# use: langchain_google_genai:ChatGoogleGenerativeAI
# model: gemini-2.5-pro
# gemini_api_key: $GEMINI_API_KEY
# timeout: 600.0
# max_retries: 2
# max_tokens: 8192
# supports_vision: true
@ -101,6 +111,8 @@ models:
# model: google/gemini-2.5-pro-preview # model name as expected by your gateway
# api_key: $GEMINI_API_KEY
# base_url: https://<your-openai-compat-gateway>/v1
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 16384
# supports_thinking: true
# supports_vision: true
@ -115,6 +127,8 @@ models:
# use: deerflow.models.patched_deepseek:PatchedChatDeepSeek
# model: deepseek-reasoner
# api_key: $DEEPSEEK_API_KEY
# timeout: 600.0
# max_retries: 2
# max_tokens: 8192
# supports_thinking: true
# supports_vision: false # DeepSeek V3 does not support vision
@ -130,6 +144,8 @@ models:
# model: kimi-k2.5
# api_base: https://api.moonshot.cn/v1
# api_key: $MOONSHOT_API_KEY
# timeout: 600.0
# max_retries: 2
# max_tokens: 32768
# supports_thinking: true
# supports_vision: true # Check your specific model's capabilities
@ -147,6 +163,8 @@ models:
# model: deepseek/deepseek-v3.2
# api_key: $NOVITA_API_KEY
# base_url: https://api.novita.ai/openai
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 4096
# temperature: 0.7
# supports_thinking: true
@ -165,6 +183,8 @@ models:
# model: MiniMax-M2.5
# api_key: $MINIMAX_API_KEY
# base_url: https://api.minimax.io/v1
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true
@ -175,6 +195,8 @@ models:
# model: MiniMax-M2.5-highspeed
# api_key: $MINIMAX_API_KEY
# base_url: https://api.minimax.io/v1
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true
@ -188,6 +210,8 @@ models:
# model: MiniMax-M2.7
# api_key: $MINIMAX_API_KEY
# base_url: https://api.minimaxi.com/v1
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true
@ -198,6 +222,8 @@ models:
# model: MiniMax-M2.5-highspeed
# api_key: $MINIMAX_API_KEY
# base_url: https://api.minimaxi.com/v1
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 4096
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
# supports_vision: true
@ -211,6 +237,8 @@ models:
# model: google/gemini-2.5-flash-preview
# api_key: $OPENAI_API_KEY
# base_url: https://openrouter.ai/api/v1
# request_timeout: 600.0
# max_retries: 2
# max_tokens: 8192
# temperature: 0.7