NmanQAQ dd30e609f7
feat(models): add vLLM provider support (#1860)
support for vLLM 0.19.0 OpenAI-compatible chat endpoints and fixes the Qwen reasoning toggle so flash mode can actually disable thinking.

Co-authored-by: NmanQAQ <normangyao@qq.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
2026-04-06 15:18:34 +08:00

120 lines
5.5 KiB
Python

import logging
from langchain.chat_models import BaseChatModel
from deerflow.config import get_app_config
from deerflow.reflection import resolve_class
from deerflow.tracing import build_tracing_callbacks
logger = logging.getLogger(__name__)
def _deep_merge_dicts(base: dict | None, override: dict) -> dict:
"""Recursively merge two dictionaries without mutating the inputs."""
merged = dict(base or {})
for key, value in override.items():
if isinstance(value, dict) and isinstance(merged.get(key), dict):
merged[key] = _deep_merge_dicts(merged[key], value)
else:
merged[key] = value
return merged
def _vllm_disable_chat_template_kwargs(chat_template_kwargs: dict) -> dict:
"""Build the disable payload for vLLM/Qwen chat template kwargs."""
disable_kwargs: dict[str, bool] = {}
if "thinking" in chat_template_kwargs:
disable_kwargs["thinking"] = False
if "enable_thinking" in chat_template_kwargs:
disable_kwargs["enable_thinking"] = False
return disable_kwargs
def create_chat_model(name: str | None = None, thinking_enabled: bool = False, **kwargs) -> BaseChatModel:
"""Create a chat model instance from the config.
Args:
name: The name of the model to create. If None, the first model in the config will be used.
Returns:
A chat model instance.
"""
config = get_app_config()
if name is None:
name = config.models[0].name
model_config = config.get_model_config(name)
if model_config is None:
raise ValueError(f"Model {name} not found in config") from None
model_class = resolve_class(model_config.use, BaseChatModel)
model_settings_from_config = model_config.model_dump(
exclude_none=True,
exclude={
"use",
"name",
"display_name",
"description",
"supports_thinking",
"supports_reasoning_effort",
"when_thinking_enabled",
"thinking",
"supports_vision",
},
)
# Compute effective when_thinking_enabled by merging in the `thinking` shortcut field.
# The `thinking` shortcut is equivalent to setting when_thinking_enabled["thinking"].
has_thinking_settings = (model_config.when_thinking_enabled is not None) or (model_config.thinking is not None)
effective_wte: dict = dict(model_config.when_thinking_enabled) if model_config.when_thinking_enabled else {}
if model_config.thinking is not None:
merged_thinking = {**(effective_wte.get("thinking") or {}), **model_config.thinking}
effective_wte = {**effective_wte, "thinking": merged_thinking}
if thinking_enabled and has_thinking_settings:
if not model_config.supports_thinking:
raise ValueError(f"Model {name} does not support thinking. Set `supports_thinking` to true in the `config.yaml` to enable thinking.") from None
if effective_wte:
model_settings_from_config.update(effective_wte)
if not thinking_enabled and has_thinking_settings:
if effective_wte.get("extra_body", {}).get("thinking", {}).get("type"):
# OpenAI-compatible gateway: thinking is nested under extra_body
model_settings_from_config["extra_body"] = _deep_merge_dicts(
model_settings_from_config.get("extra_body"),
{"thinking": {"type": "disabled"}},
)
model_settings_from_config["reasoning_effort"] = "minimal"
elif disable_chat_template_kwargs := _vllm_disable_chat_template_kwargs(effective_wte.get("extra_body", {}).get("chat_template_kwargs") or {}):
# vLLM uses chat template kwargs to switch thinking on/off.
model_settings_from_config["extra_body"] = _deep_merge_dicts(
model_settings_from_config.get("extra_body"),
{"chat_template_kwargs": disable_chat_template_kwargs},
)
elif effective_wte.get("thinking", {}).get("type"):
# Native langchain_anthropic: thinking is a direct constructor parameter
model_settings_from_config["thinking"] = {"type": "disabled"}
if not model_config.supports_reasoning_effort:
kwargs.pop("reasoning_effort", None)
model_settings_from_config.pop("reasoning_effort", None)
# For Codex Responses API models: map thinking mode to reasoning_effort
from deerflow.models.openai_codex_provider import CodexChatModel
if issubclass(model_class, CodexChatModel):
# The ChatGPT Codex endpoint currently rejects max_tokens/max_output_tokens.
model_settings_from_config.pop("max_tokens", None)
# Use explicit reasoning_effort from frontend if provided (low/medium/high)
explicit_effort = kwargs.pop("reasoning_effort", None)
if not thinking_enabled:
model_settings_from_config["reasoning_effort"] = "none"
elif explicit_effort and explicit_effort in ("low", "medium", "high", "xhigh"):
model_settings_from_config["reasoning_effort"] = explicit_effort
elif "reasoning_effort" not in model_settings_from_config:
model_settings_from_config["reasoning_effort"] = "medium"
model_instance = model_class(**kwargs, **model_settings_from_config)
callbacks = build_tracing_callbacks()
if callbacks:
existing_callbacks = model_instance.callbacks or []
model_instance.callbacks = [*existing_callbacks, *callbacks]
logger.debug(f"Tracing attached to model '{name}' with providers={len(callbacks)}")
return model_instance