mirror of
https://github.com/bytedance/deer-flow.git
synced 2026-04-25 11:18:22 +00:00
Review feedback: the previous sitting added AppConfig.current() fallback inside every new explicit-config helper, which perpetuated the implicit lookup we are trying to eliminate. This commit: - requires app_config as a non-None parameter in internal functions that have clean callers (build_lead_runtime_middlewares, _build_runtime_middlewares) - isolates the AppConfig.current() fallback to the single boundary where LangGraph Server's registration API genuinely cannot pass config (make_lead_agent), and to the two factories still reachable from not-yet-migrated community tool paths (create_chat_model, get_available_tools), each marked with a TODO(P2-10) grep anchor - types RunContext.app_config as AppConfig | None instead of Any - drops the narrative # Phase 2: comments from production source and test bodies; they belong in commit messages, not the code - drops the AppConfig.resolve() helper introduced last commit — it was just another name for the implicit-lookup pattern Make _build_middlewares's kw-only separator explicit so the app_config / config distinction is clear at call sites. 196 targeted tests pass.
146 lines
6.8 KiB
Python
146 lines
6.8 KiB
Python
import logging
|
|
|
|
from langchain.chat_models import BaseChatModel
|
|
|
|
from deerflow.config.app_config import AppConfig
|
|
from deerflow.reflection import resolve_class
|
|
from deerflow.tracing import build_tracing_callbacks
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def _deep_merge_dicts(base: dict | None, override: dict) -> dict:
|
|
"""Recursively merge two dictionaries without mutating the inputs."""
|
|
merged = dict(base or {})
|
|
for key, value in override.items():
|
|
if isinstance(value, dict) and isinstance(merged.get(key), dict):
|
|
merged[key] = _deep_merge_dicts(merged[key], value)
|
|
else:
|
|
merged[key] = value
|
|
return merged
|
|
|
|
|
|
def _vllm_disable_chat_template_kwargs(chat_template_kwargs: dict) -> dict:
|
|
"""Build the disable payload for vLLM/Qwen chat template kwargs."""
|
|
disable_kwargs: dict[str, bool] = {}
|
|
if "thinking" in chat_template_kwargs:
|
|
disable_kwargs["thinking"] = False
|
|
if "enable_thinking" in chat_template_kwargs:
|
|
disable_kwargs["enable_thinking"] = False
|
|
return disable_kwargs
|
|
|
|
|
|
def create_chat_model(
|
|
name: str | None = None,
|
|
thinking_enabled: bool = False,
|
|
*,
|
|
app_config: "AppConfig | None" = None,
|
|
**kwargs,
|
|
) -> BaseChatModel:
|
|
"""Create a chat model instance from the config.
|
|
|
|
Args:
|
|
name: The name of the model to create. If None, the first model in the config will be used.
|
|
app_config: Application config. Falls back to AppConfig.current() when
|
|
omitted; new callers should pass this explicitly.
|
|
|
|
Returns:
|
|
A chat model instance.
|
|
"""
|
|
if app_config is None:
|
|
# TODO(P2-10): fold into a required parameter once all callers
|
|
# (memory updater, summarization middleware's implicit model) thread
|
|
# config explicitly.
|
|
app_config = AppConfig.current()
|
|
config = app_config
|
|
if name is None:
|
|
name = config.models[0].name
|
|
model_config = config.get_model_config(name)
|
|
if model_config is None:
|
|
raise ValueError(f"Model {name} not found in config") from None
|
|
model_class = resolve_class(model_config.use, BaseChatModel)
|
|
model_settings_from_config = model_config.model_dump(
|
|
exclude_none=True,
|
|
exclude={
|
|
"use",
|
|
"name",
|
|
"display_name",
|
|
"description",
|
|
"supports_thinking",
|
|
"supports_reasoning_effort",
|
|
"when_thinking_enabled",
|
|
"when_thinking_disabled",
|
|
"thinking",
|
|
"supports_vision",
|
|
},
|
|
)
|
|
# Compute effective when_thinking_enabled by merging in the `thinking` shortcut field.
|
|
# The `thinking` shortcut is equivalent to setting when_thinking_enabled["thinking"].
|
|
has_thinking_settings = (model_config.when_thinking_enabled is not None) or (model_config.thinking is not None)
|
|
effective_wte: dict = dict(model_config.when_thinking_enabled) if model_config.when_thinking_enabled else {}
|
|
if model_config.thinking is not None:
|
|
merged_thinking = {**(effective_wte.get("thinking") or {}), **model_config.thinking}
|
|
effective_wte = {**effective_wte, "thinking": merged_thinking}
|
|
if thinking_enabled and has_thinking_settings:
|
|
if not model_config.supports_thinking:
|
|
raise ValueError(f"Model {name} does not support thinking. Set `supports_thinking` to true in the `config.yaml` to enable thinking.") from None
|
|
if effective_wte:
|
|
model_settings_from_config.update(effective_wte)
|
|
if not thinking_enabled:
|
|
if model_config.when_thinking_disabled is not None:
|
|
# User-provided disable settings take full precedence
|
|
model_settings_from_config.update(model_config.when_thinking_disabled)
|
|
elif has_thinking_settings and effective_wte.get("extra_body", {}).get("thinking", {}).get("type"):
|
|
# OpenAI-compatible gateway: thinking is nested under extra_body
|
|
model_settings_from_config["extra_body"] = _deep_merge_dicts(
|
|
model_settings_from_config.get("extra_body"),
|
|
{"thinking": {"type": "disabled"}},
|
|
)
|
|
model_settings_from_config["reasoning_effort"] = "minimal"
|
|
elif has_thinking_settings and (disable_chat_template_kwargs := _vllm_disable_chat_template_kwargs(effective_wte.get("extra_body", {}).get("chat_template_kwargs") or {})):
|
|
# vLLM uses chat template kwargs to switch thinking on/off.
|
|
model_settings_from_config["extra_body"] = _deep_merge_dicts(
|
|
model_settings_from_config.get("extra_body"),
|
|
{"chat_template_kwargs": disable_chat_template_kwargs},
|
|
)
|
|
elif has_thinking_settings and effective_wte.get("thinking", {}).get("type"):
|
|
# Native langchain_anthropic: thinking is a direct constructor parameter
|
|
model_settings_from_config["thinking"] = {"type": "disabled"}
|
|
if not model_config.supports_reasoning_effort:
|
|
kwargs.pop("reasoning_effort", None)
|
|
model_settings_from_config.pop("reasoning_effort", None)
|
|
|
|
# For Codex Responses API models: map thinking mode to reasoning_effort
|
|
from deerflow.models.openai_codex_provider import CodexChatModel
|
|
|
|
if issubclass(model_class, CodexChatModel):
|
|
# The ChatGPT Codex endpoint currently rejects max_tokens/max_output_tokens.
|
|
model_settings_from_config.pop("max_tokens", None)
|
|
|
|
# Use explicit reasoning_effort from frontend if provided (low/medium/high)
|
|
explicit_effort = kwargs.pop("reasoning_effort", None)
|
|
if not thinking_enabled:
|
|
model_settings_from_config["reasoning_effort"] = "none"
|
|
elif explicit_effort and explicit_effort in ("low", "medium", "high", "xhigh"):
|
|
model_settings_from_config["reasoning_effort"] = explicit_effort
|
|
elif "reasoning_effort" not in model_settings_from_config:
|
|
model_settings_from_config["reasoning_effort"] = "medium"
|
|
|
|
# Ensure stream_usage is enabled so that token usage metadata is available
|
|
# in streaming responses. LangChain's BaseChatOpenAI only defaults
|
|
# stream_usage=True when no custom base_url/api_base is set, so models
|
|
# hitting third-party endpoints (e.g. doubao, deepseek) silently lose
|
|
# usage data. We default it to True unless explicitly configured.
|
|
if "stream_usage" not in model_settings_from_config and "stream_usage" not in kwargs:
|
|
if "stream_usage" in getattr(model_class, "model_fields", {}):
|
|
model_settings_from_config["stream_usage"] = True
|
|
|
|
model_instance = model_class(**kwargs, **model_settings_from_config)
|
|
|
|
callbacks = build_tracing_callbacks()
|
|
if callbacks:
|
|
existing_callbacks = model_instance.callbacks or []
|
|
model_instance.callbacks = [*existing_callbacks, *callbacks]
|
|
logger.debug(f"Tracing attached to model '{name}' with providers={len(callbacks)}")
|
|
return model_instance
|