fix: read lead agent options from context (#2515)

* fix: read lead agent options from context

* fix: validate runtime context config
This commit is contained in:
DanielWalnut 2026-04-24 22:46:51 +08:00 committed by GitHub
parent ec8a8cae38
commit b970993425
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 139 additions and 19 deletions

View File

@ -12,6 +12,7 @@ import json
import logging
import re
import time
from collections.abc import Mapping
from typing import Any
from fastapi import HTTPException, Request
@ -101,9 +102,10 @@ def resolve_agent_factory(assistant_id: str | None):
"""Resolve the agent factory callable from config.
Custom agents are implemented as ``lead_agent`` + an ``agent_name``
injected into ``configurable`` see :func:`build_run_config`. All
``assistant_id`` values therefore map to the same factory; the routing
happens inside ``make_lead_agent`` when it reads ``cfg["agent_name"]``.
injected into ``configurable`` or ``context`` see
:func:`build_run_config`. All ``assistant_id`` values therefore map to the
same factory; the routing happens inside ``make_lead_agent`` when it reads
``cfg["agent_name"]``.
"""
from deerflow.agents.lead_agent.agent import make_lead_agent
@ -120,10 +122,12 @@ def build_run_config(
"""Build a RunnableConfig dict for the agent.
When *assistant_id* refers to a custom agent (anything other than
``"lead_agent"`` / ``None``), the name is forwarded as
``configurable["agent_name"]``. ``make_lead_agent`` reads this key to
load the matching ``agents/<name>/SOUL.md`` and per-agent config
without it the agent silently runs as the default lead agent.
``"lead_agent"`` / ``None``), the name is forwarded as ``agent_name`` in
whichever runtime options container is active: ``context`` for
LangGraph >= 0.6.0 requests, otherwise ``configurable``.
``make_lead_agent`` reads this key to load the matching
``agents/<name>/SOUL.md`` and per-agent config without it the agent
silently runs as the default lead agent.
This mirrors the channel manager's ``_resolve_run_params`` logic so that
the LangGraph Platform-compatible HTTP API and the IM channel path behave
@ -142,7 +146,14 @@ def build_run_config(
thread_id,
list(request_config.get("configurable", {}).keys()),
)
config["context"] = request_config["context"]
context_value = request_config["context"]
if context_value is None:
context = {}
elif isinstance(context_value, Mapping):
context = dict(context_value)
else:
raise ValueError("request config 'context' must be a mapping or null.")
config["context"] = context
else:
configurable = {"thread_id": thread_id}
configurable.update(request_config.get("configurable", {}))
@ -154,13 +165,19 @@ def build_run_config(
config["configurable"] = {"thread_id": thread_id}
# Inject custom agent name when the caller specified a non-default assistant.
# Honour an explicit configurable["agent_name"] in the request if already set.
if assistant_id and assistant_id != _DEFAULT_ASSISTANT_ID and "configurable" in config:
if "agent_name" not in config["configurable"]:
normalized = assistant_id.strip().lower().replace("_", "-")
if not normalized or not re.fullmatch(r"[a-z0-9-]+", normalized):
raise ValueError(f"Invalid assistant_id {assistant_id!r}: must contain only letters, digits, and hyphens after normalization.")
config["configurable"]["agent_name"] = normalized
# Honour an explicit agent_name in the active runtime options container.
if assistant_id and assistant_id != _DEFAULT_ASSISTANT_ID:
normalized = assistant_id.strip().lower().replace("_", "-")
if not normalized or not re.fullmatch(r"[a-z0-9-]+", normalized):
raise ValueError(f"Invalid assistant_id {assistant_id!r}: must contain only letters, digits, and hyphens after normalization.")
if "configurable" in config:
target = config["configurable"]
elif "context" in config:
target = config["context"]
else:
target = config.setdefault("configurable", {})
if target is not None and "agent_name" not in target:
target["agent_name"] = normalized
if metadata:
config.setdefault("metadata", {}).update(metadata)
return config

View File

@ -26,6 +26,15 @@ from deerflow.models import create_chat_model
logger = logging.getLogger(__name__)
def _get_runtime_config(config: RunnableConfig) -> dict:
"""Merge legacy configurable options with LangGraph runtime context."""
cfg = dict(config.get("configurable", {}) or {})
context = config.get("context", {}) or {}
if isinstance(context, dict):
cfg.update(context)
return cfg
def _resolve_model_name(requested_model_name: str | None = None) -> str:
"""Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured."""
app_config = get_app_config()
@ -248,7 +257,8 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
middlewares.append(summarization_middleware)
# Add TodoList middleware if plan mode is enabled
is_plan_mode = config.get("configurable", {}).get("is_plan_mode", False)
cfg = _get_runtime_config(config)
is_plan_mode = cfg.get("is_plan_mode", False)
todo_list_middleware = _create_todo_list_middleware(is_plan_mode)
if todo_list_middleware is not None:
middlewares.append(todo_list_middleware)
@ -277,9 +287,9 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
middlewares.append(DeferredToolFilterMiddleware())
# Add SubagentLimitMiddleware to truncate excess parallel task calls
subagent_enabled = config.get("configurable", {}).get("subagent_enabled", False)
subagent_enabled = cfg.get("subagent_enabled", False)
if subagent_enabled:
max_concurrent_subagents = config.get("configurable", {}).get("max_concurrent_subagents", 3)
max_concurrent_subagents = cfg.get("max_concurrent_subagents", 3)
middlewares.append(SubagentLimitMiddleware(max_concurrent=max_concurrent_subagents))
# LoopDetectionMiddleware — detect and break repetitive tool call loops
@ -299,7 +309,7 @@ def make_lead_agent(config: RunnableConfig):
from deerflow.tools import get_available_tools
from deerflow.tools.builtins import setup_agent
cfg = config.get("configurable", {})
cfg = _get_runtime_config(config)
thinking_enabled = cfg.get("thinking_enabled", True)
reasoning_effort = cfg.get("reasoning_effort", None)

View File

@ -145,6 +145,21 @@ def test_build_run_config_explicit_agent_name_not_overwritten():
assert config["configurable"]["agent_name"] == "explicit-agent"
def test_build_run_config_context_custom_agent_injects_agent_name():
"""Custom assistant_id must be forwarded as context['agent_name'] in context mode."""
from app.gateway.services import build_run_config
config = build_run_config(
"thread-1",
{"context": {"model_name": "deepseek-v3"}},
None,
assistant_id="finalis",
)
assert config["context"]["agent_name"] == "finalis"
assert "configurable" not in config
def test_resolve_agent_factory_returns_make_lead_agent():
"""resolve_agent_factory always returns make_lead_agent regardless of assistant_id."""
from app.gateway.services import resolve_agent_factory
@ -298,6 +313,36 @@ def test_build_run_config_with_context():
assert config["recursion_limit"] == 100
def test_build_run_config_null_context_becomes_empty_context():
"""When caller sends context=null, treat it as an empty context object."""
from app.gateway.services import build_run_config
config = build_run_config("thread-1", {"context": None}, None)
assert config["context"] == {}
assert "configurable" not in config
def test_build_run_config_rejects_non_mapping_context():
"""When caller sends a non-object context, raise a clear error instead of a TypeError."""
import pytest
from app.gateway.services import build_run_config
with pytest.raises(ValueError, match="context"):
build_run_config("thread-1", {"context": "bad-context"}, None)
def test_build_run_config_null_context_custom_agent_injects_agent_name():
"""Custom assistant_id can still be injected when context=null starts context mode."""
from app.gateway.services import build_run_config
config = build_run_config("thread-1", {"context": None}, None, assistant_id="finalis")
assert config["context"] == {"agent_name": "finalis"}
assert "configurable" not in config
def test_build_run_config_context_plus_configurable_warns(caplog):
"""When caller sends both 'context' and 'configurable', prefer 'context' and log a warning."""
import logging

View File

@ -113,6 +113,54 @@ def test_make_lead_agent_disables_thinking_when_model_does_not_support_it(monkey
assert result["model"] is not None
def test_make_lead_agent_reads_runtime_options_from_context(monkeypatch):
app_config = _make_app_config(
[
_make_model("default-model", supports_thinking=False),
_make_model("context-model", supports_thinking=True),
]
)
import deerflow.tools as tools_module
get_available_tools = MagicMock(return_value=[])
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
monkeypatch.setattr(tools_module, "get_available_tools", get_available_tools)
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None: [])
captured: dict[str, object] = {}
def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None):
captured["name"] = name
captured["thinking_enabled"] = thinking_enabled
captured["reasoning_effort"] = reasoning_effort
return object()
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
result = lead_agent_module.make_lead_agent(
{
"context": {
"model_name": "context-model",
"thinking_enabled": False,
"reasoning_effort": "high",
"is_plan_mode": True,
"subagent_enabled": True,
"max_concurrent_subagents": 7,
}
}
)
assert captured == {
"name": "context-model",
"thinking_enabled": False,
"reasoning_effort": "high",
}
get_available_tools.assert_called_once_with(model_name="context-model", groups=None, subagent_enabled=True)
assert result["model"] is not None
def test_make_lead_agent_rejects_invalid_bootstrap_agent_name(monkeypatch):
app_config = _make_app_config([_make_model("safe-model", supports_thinking=False)])