refactor: thread app_config through lead and subagent task path (#2666)

* refactor: thread app config through lead prompt

* fix: honor explicit app config across runtime paths

* style: format subagent executor tests

* fix: thread resolved app config and guard subagents-only fallback

Address two PR review findings:

1. _create_summarization_middleware passed the original (possibly None)
   app_config into create_chat_model, forcing the model factory back to
   ambient get_app_config() and risking config drift between the
   middleware's resolved view and the model's view. Pass the resolved
   AppConfig instance through end-to-end.

2. get_available_subagent_names accepted Any-typed config and forwarded
   it to is_host_bash_allowed, which reads ``.sandbox``. A
   SubagentsAppConfig (also accepted upstream as a sum-type input) has
   no ``.sandbox`` attribute and would be silently treated as "no
   sandbox configured", incorrectly disabling the bash subagent. Guard
   on hasattr and fall back to ambient lookup otherwise.

Adds regression tests for both paths.

* chore: simplify hasattr guard and tighten regression tests

- Collapse if/else into ternary in get_available_subagent_names; hasattr(None, ...) is False so the explicit None check was redundant.
- Drop comments that narrate the change rather than explain non-obvious WHY (test names already convey intent).
- Replace stringly-typed sentinel "no-arg" in regression test with direct args tuple comparison.

---------

Co-authored-by: greatmengqi <chenmengqi.0376@bytedance.com>
This commit is contained in:
greatmengqi 2026-05-02 06:37:49 +08:00 committed by GitHub
parent 189b82405c
commit 8ba01dfd83
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 769 additions and 153 deletions

View File

@ -19,8 +19,6 @@ from deerflow.agents.middlewares.view_image_middleware import ViewImageMiddlewar
from deerflow.agents.thread_state import ThreadState
from deerflow.config.agents_config import load_agent_config, validate_agent_name
from deerflow.config.app_config import AppConfig, get_app_config
from deerflow.config.memory_config import get_memory_config
from deerflow.config.summarization_config import get_summarization_config
from deerflow.models import create_chat_model
logger = logging.getLogger(__name__)
@ -52,7 +50,8 @@ def _resolve_model_name(requested_model_name: str | None = None, *, app_config:
def _create_summarization_middleware(*, app_config: AppConfig | None = None) -> DeerFlowSummarizationMiddleware | None:
"""Create and configure the summarization middleware from config."""
config = get_summarization_config()
resolved_app_config = app_config or get_app_config()
config = resolved_app_config.summarization
if not config.enabled:
return None
@ -73,9 +72,9 @@ def _create_summarization_middleware(*, app_config: AppConfig | None = None) ->
# as middleware rather than lead_agent (SummarizationMiddleware is a
# LangChain built-in, so we tag the model at creation time).
if config.model_name:
model = create_chat_model(name=config.model_name, thinking_enabled=False, app_config=app_config)
model = create_chat_model(name=config.model_name, thinking_enabled=False, app_config=resolved_app_config)
else:
model = create_chat_model(thinking_enabled=False, app_config=app_config)
model = create_chat_model(thinking_enabled=False, app_config=resolved_app_config)
model = model.with_config(tags=["middleware:summarize"])
# Prepare kwargs
@ -92,18 +91,13 @@ def _create_summarization_middleware(*, app_config: AppConfig | None = None) ->
kwargs["summary_prompt"] = config.summary_prompt
hooks: list[BeforeSummarizationHook] = []
if get_memory_config().enabled:
if resolved_app_config.memory.enabled:
hooks.append(memory_flush_hook)
# The logic below relies on two assumptions holding true: this factory is
# the sole entry point for DeerFlowSummarizationMiddleware, and the runtime
# config is not expected to change after startup.
try:
resolved_app_config = app_config or get_app_config()
skills_container_path = resolved_app_config.skills.container_path or "/mnt/skills"
except Exception:
logger.exception("Failed to resolve skills container path; falling back to default")
skills_container_path = "/mnt/skills"
skills_container_path = resolved_app_config.skills.container_path or "/mnt/skills"
return DeerFlowSummarizationMiddleware(
**kwargs,
@ -279,10 +273,10 @@ def _build_middlewares(
middlewares.append(TokenUsageMiddleware())
# Add TitleMiddleware
middlewares.append(TitleMiddleware())
middlewares.append(TitleMiddleware(app_config=resolved_app_config))
# Add MemoryMiddleware (after TitleMiddleware)
middlewares.append(MemoryMiddleware(agent_name=agent_name))
middlewares.append(MemoryMiddleware(agent_name=agent_name, memory_config=resolved_app_config.memory))
# Add ViewImageMiddleware only if the current model supports vision.
# Use the resolved runtime model_name from make_lead_agent to avoid stale config values.
@ -316,7 +310,9 @@ def _build_middlewares(
def make_lead_agent(config: RunnableConfig):
"""LangGraph graph factory; keep the signature compatible with LangGraph Server."""
return _make_lead_agent(config, app_config=get_app_config())
runtime_config = _get_runtime_config(config)
runtime_app_config = runtime_config.get("app_config")
return _make_lead_agent(config, app_config=runtime_app_config or get_app_config())
def _make_lead_agent(config: RunnableConfig, *, app_config: AppConfig):

View File

@ -158,7 +158,7 @@ Skip simple one-off tasks.
"""
def _build_available_subagents_description(available_names: list[str], bash_available: bool) -> str:
def _build_available_subagents_description(available_names: list[str], bash_available: bool, *, app_config: AppConfig | None = None) -> str:
"""Dynamically build subagent type descriptions from registry.
Mirrors Codex's pattern where agent_type_description is dynamically generated
@ -180,7 +180,7 @@ def _build_available_subagents_description(available_names: list[str], bash_avai
if name in builtin_descriptions:
lines.append(f"- **{name}**: {builtin_descriptions[name]}")
else:
config = get_subagent_config(name)
config = get_subagent_config(name, app_config=app_config)
if config is not None:
desc = config.description.split("\n")[0].strip() # First line only for brevity
lines.append(f"- **{name}**: {desc}")
@ -188,7 +188,7 @@ def _build_available_subagents_description(available_names: list[str], bash_avai
return "\n".join(lines)
def _build_subagent_section(max_concurrent: int) -> str:
def _build_subagent_section(max_concurrent: int, *, app_config: AppConfig | None = None) -> str:
"""Build the subagent system prompt section with dynamic concurrency limit.
Args:
@ -198,12 +198,12 @@ def _build_subagent_section(max_concurrent: int) -> str:
Formatted subagent section string.
"""
n = max_concurrent
available_names = get_available_subagent_names()
available_names = get_available_subagent_names(app_config=app_config) if app_config is not None else get_available_subagent_names()
bash_available = "bash" in available_names
# Dynamically build subagent type descriptions from registry (aligned with Codex's
# agent_type_description pattern where all registered roles are listed in the tool spec).
available_subagents = _build_available_subagents_description(available_names, bash_available)
available_subagents = _build_available_subagents_description(available_names, bash_available, app_config=app_config)
direct_tool_examples = "bash, ls, read_file, web_search, etc." if bash_available else "ls, read_file, web_search, etc."
direct_execution_example = (
'# User asks: "Run the tests"\n# Thinking: Cannot decompose into parallel sub-tasks\n# → Execute directly\n\nbash("npm test") # Direct execution, not task()'
@ -530,21 +530,28 @@ combined with a FastAPI gateway for REST API access [citation:FastAPI](https://f
"""
def _get_memory_context(agent_name: str | None = None) -> str:
def _get_memory_context(agent_name: str | None = None, *, app_config: AppConfig | None = None) -> str:
"""Get memory context for injection into system prompt.
Args:
agent_name: If provided, loads per-agent memory. If None, loads global memory.
app_config: Explicit application config. When provided, memory options
are read from this value instead of the global config singleton.
Returns:
Formatted memory context string wrapped in XML tags, or empty string if disabled.
"""
try:
from deerflow.agents.memory import format_memory_for_injection, get_memory_data
from deerflow.config.memory_config import get_memory_config
from deerflow.runtime.user_context import get_effective_user_id
config = get_memory_config()
if app_config is None:
from deerflow.config.memory_config import get_memory_config
config = get_memory_config()
else:
config = app_config.memory
if not config.enabled or not config.injection_enabled:
return ""
@ -558,8 +565,8 @@ def _get_memory_context(agent_name: str | None = None) -> str:
{memory_content}
</memory>
"""
except Exception as e:
logger.error("Failed to load memory context: %s", e)
except Exception:
logger.exception("Failed to load memory context")
return ""
@ -599,15 +606,20 @@ def get_skills_prompt_section(available_skills: set[str] | None = None, *, app_c
"""Generate the skills prompt section with available skills list."""
skills = _get_enabled_skills_for_config(app_config)
try:
from deerflow.config import get_app_config
if app_config is None:
try:
from deerflow.config import get_app_config
config = app_config or get_app_config()
config = get_app_config()
container_base_path = config.skills.container_path
skill_evolution_enabled = config.skill_evolution.enabled
except Exception:
container_base_path = "/mnt/skills"
skill_evolution_enabled = False
else:
config = app_config
container_base_path = config.skills.container_path
skill_evolution_enabled = config.skill_evolution.enabled
except Exception:
container_base_path = "/mnt/skills"
skill_evolution_enabled = False
if not skills and not skill_evolution_enabled:
return ""
@ -640,13 +652,17 @@ def get_deferred_tools_prompt_section(*, app_config: AppConfig | None = None) ->
"""
from deerflow.tools.builtins.tool_search import get_deferred_registry
try:
from deerflow.config import get_app_config
if app_config is None:
try:
from deerflow.config import get_app_config
config = app_config or get_app_config()
if not config.tool_search.enabled:
config = get_app_config()
except Exception:
return ""
except Exception:
else:
config = app_config
if not config.tool_search.enabled:
return ""
registry = get_deferred_registry()
@ -657,15 +673,19 @@ def get_deferred_tools_prompt_section(*, app_config: AppConfig | None = None) ->
return f"<available-deferred-tools>\n{names}\n</available-deferred-tools>"
def _build_acp_section() -> str:
def _build_acp_section(*, app_config: AppConfig | None = None) -> str:
"""Build the ACP agent prompt section, only if ACP agents are configured."""
try:
from deerflow.config.acp_config import get_acp_agents
if app_config is None:
try:
from deerflow.config.acp_config import get_acp_agents
agents = get_acp_agents()
if not agents:
agents = get_acp_agents()
except Exception:
return ""
except Exception:
else:
agents = getattr(app_config, "acp_agents", {}) or {}
if not agents:
return ""
return (
@ -679,14 +699,18 @@ def _build_acp_section() -> str:
def _build_custom_mounts_section(*, app_config: AppConfig | None = None) -> str:
"""Build a prompt section for explicitly configured sandbox mounts."""
try:
from deerflow.config import get_app_config
if app_config is None:
try:
from deerflow.config import get_app_config
config = app_config or get_app_config()
mounts = config.sandbox.mounts or []
except Exception:
logger.exception("Failed to load configured sandbox mounts for the lead-agent prompt")
return ""
config = get_app_config()
except Exception:
logger.exception("Failed to load configured sandbox mounts for the lead-agent prompt")
return ""
else:
config = app_config
mounts = config.sandbox.mounts or []
if not mounts:
return ""
@ -709,11 +733,11 @@ def apply_prompt_template(
app_config: AppConfig | None = None,
) -> str:
# Get memory context
memory_context = _get_memory_context(agent_name)
memory_context = _get_memory_context(agent_name, app_config=app_config)
# Include subagent section only if enabled (from runtime parameter)
n = max_concurrent_subagents
subagent_section = _build_subagent_section(n) if subagent_enabled else ""
subagent_section = _build_subagent_section(n, app_config=app_config) if subagent_enabled else ""
# Add subagent reminder to critical_reminders if enabled
subagent_reminder = (
@ -740,7 +764,7 @@ def apply_prompt_template(
deferred_tools_section = get_deferred_tools_prompt_section(app_config=app_config)
# Build ACP agent section only if ACP agents are configured
acp_section = _build_acp_section()
acp_section = _build_acp_section(app_config=app_config)
custom_mounts_section = _build_custom_mounts_section(app_config=app_config)
acp_and_mounts_section = "\n".join(section for section in (acp_section, custom_mounts_section) if section)

View File

@ -1,7 +1,7 @@
"""Middleware for memory mechanism."""
import logging
from typing import override
from typing import TYPE_CHECKING, override
from langchain.agents import AgentState
from langchain.agents.middleware import AgentMiddleware
@ -13,6 +13,9 @@ from deerflow.agents.memory.queue import get_memory_queue
from deerflow.config.memory_config import get_memory_config
from deerflow.runtime.user_context import get_effective_user_id
if TYPE_CHECKING:
from deerflow.config.memory_config import MemoryConfig
logger = logging.getLogger(__name__)
@ -34,14 +37,17 @@ class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
state_schema = MemoryMiddlewareState
def __init__(self, agent_name: str | None = None):
def __init__(self, agent_name: str | None = None, *, memory_config: "MemoryConfig | None" = None):
"""Initialize the MemoryMiddleware.
Args:
agent_name: If provided, memory is stored per-agent. If None, uses global memory.
memory_config: Explicit memory config. When omitted, legacy global
config fallback is used.
"""
super().__init__()
self._agent_name = agent_name
self._memory_config = memory_config
@override
def after_agent(self, state: MemoryMiddlewareState, runtime: Runtime) -> dict | None:
@ -54,7 +60,7 @@ class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
Returns:
None (no state changes needed from this middleware).
"""
config = get_memory_config()
config = self._memory_config or get_memory_config()
if not config.enabled:
return None

View File

@ -2,7 +2,7 @@
import logging
import re
from typing import Any, NotRequired, override
from typing import TYPE_CHECKING, Any, NotRequired, override
from langchain.agents import AgentState
from langchain.agents.middleware import AgentMiddleware
@ -12,6 +12,10 @@ from langgraph.runtime import Runtime
from deerflow.config.title_config import get_title_config
from deerflow.models import create_chat_model
if TYPE_CHECKING:
from deerflow.config.app_config import AppConfig
from deerflow.config.title_config import TitleConfig
logger = logging.getLogger(__name__)
@ -26,6 +30,18 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
state_schema = TitleMiddlewareState
def __init__(self, *, app_config: "AppConfig | None" = None, title_config: "TitleConfig | None" = None):
super().__init__()
self._app_config = app_config
self._title_config = title_config
def _get_title_config(self):
if self._title_config is not None:
return self._title_config
if self._app_config is not None:
return self._app_config.title
return get_title_config()
def _normalize_content(self, content: object) -> str:
if isinstance(content, str):
return content
@ -47,7 +63,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
def _should_generate_title(self, state: TitleMiddlewareState) -> bool:
"""Check if we should generate a title for this thread."""
config = get_title_config()
config = self._get_title_config()
if not config.enabled:
return False
@ -72,7 +88,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
Returns (prompt_string, user_msg) so callers can use user_msg as fallback.
"""
config = get_title_config()
config = self._get_title_config()
messages = state.get("messages", [])
user_msg_content = next((m.content for m in messages if m.type == "human"), "")
@ -94,14 +110,14 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
def _parse_title(self, content: object) -> str:
"""Normalize model output into a clean title string."""
config = get_title_config()
config = self._get_title_config()
title_content = self._normalize_content(content)
title_content = self._strip_think_tags(title_content)
title = title_content.strip().strip('"').strip("'")
return title[: config.max_chars] if len(title) > config.max_chars else title
def _fallback_title(self, user_msg: str) -> str:
config = get_title_config()
config = self._get_title_config()
fallback_chars = min(config.max_chars, 50)
if len(user_msg) > fallback_chars:
return user_msg[:fallback_chars].rstrip() + "..."
@ -135,14 +151,17 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
if not self._should_generate_title(state):
return None
config = get_title_config()
config = self._get_title_config()
prompt, user_msg = self._build_title_prompt(state)
try:
model_kwargs = {"thinking_enabled": False}
if self._app_config is not None:
model_kwargs["app_config"] = self._app_config
if config.model_name:
model = create_chat_model(name=config.model_name, thinking_enabled=False)
model = create_chat_model(name=config.model_name, **model_kwargs)
else:
model = create_chat_model(thinking_enabled=False)
model = create_chat_model(**model_kwargs)
response = await model.ainvoke(prompt, config=self._get_runnable_config())
title = self._parse_title(response.content)
if title:

View File

@ -8,7 +8,7 @@ import yaml
from dotenv import load_dotenv
from pydantic import BaseModel, ConfigDict, Field
from deerflow.config.acp_config import load_acp_config_from_dict
from deerflow.config.acp_config import ACPAgentConfig, load_acp_config_from_dict
from deerflow.config.agents_api_config import AgentsApiConfig, load_agents_api_config_from_dict
from deerflow.config.checkpointer_config import CheckpointerConfig, load_checkpointer_config_from_dict
from deerflow.config.database_config import DatabaseConfig
@ -95,6 +95,7 @@ class AppConfig(BaseModel):
summarization: SummarizationConfig = Field(default_factory=SummarizationConfig, description="Conversation summarization configuration")
memory: MemoryConfig = Field(default_factory=MemoryConfig, description="Memory subsystem configuration")
agents_api: AgentsApiConfig = Field(default_factory=AgentsApiConfig, description="Custom-agent management API configuration")
acp_agents: dict[str, ACPAgentConfig] = Field(default_factory=dict, description="ACP-compatible agent configuration")
subagents: SubagentsAppConfig = Field(default_factory=SubagentsAppConfig, description="Subagent runtime configuration")
guardrails: GuardrailsConfig = Field(default_factory=GuardrailsConfig, description="Guardrail middleware configuration")
circuit_breaker: CircuitBreakerConfig = Field(default_factory=CircuitBreakerConfig, description="LLM circuit breaker configuration")

View File

@ -21,7 +21,7 @@ import inspect
import logging
from dataclasses import dataclass, field
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Literal
from typing import TYPE_CHECKING, Any, Literal, cast
if TYPE_CHECKING:
from langchain_core.messages import HumanMessage
@ -39,12 +39,19 @@ logger = logging.getLogger(__name__)
_VALID_LG_MODES = {"values", "updates", "checkpoints", "tasks", "debug", "messages", "custom"}
def _build_runtime_context(thread_id: str, run_id: str, caller_context: Any | None) -> dict[str, Any]:
def _build_runtime_context(
thread_id: str,
run_id: str,
caller_context: Any | None,
app_config: AppConfig | None = None,
) -> dict[str, Any]:
"""Build the dict that becomes ``ToolRuntime.context`` for the run.
Always includes ``thread_id`` and ``run_id``. Additional keys from the caller's
``config['context']`` (e.g. ``agent_name`` for the bootstrap flow issue #2677)
are merged in but never override ``thread_id``/``run_id``.
are merged in but never override ``thread_id``/``run_id``. The resolved
``AppConfig`` is added by the worker so tools can consume it without ambient
global lookups.
langgraph 1.1+ surfaces this as ``runtime.context`` via the parent runtime stored
under ``config['configurable']['__pregel_runtime']`` see
@ -54,6 +61,8 @@ def _build_runtime_context(thread_id: str, run_id: str, caller_context: Any | No
if isinstance(caller_context, dict):
for key, value in caller_context.items():
runtime_ctx.setdefault(key, value)
if app_config is not None:
runtime_ctx["app_config"] = app_config
return runtime_ctx
@ -74,6 +83,18 @@ class RunContext:
app_config: AppConfig | None = field(default=None)
def _install_runtime_context(config: dict, runtime_context: dict[str, Any]) -> None:
existing_context = config.get("context")
if isinstance(existing_context, dict):
existing_context.setdefault("thread_id", runtime_context["thread_id"])
existing_context.setdefault("run_id", runtime_context["run_id"])
if "app_config" in runtime_context:
existing_context["app_config"] = runtime_context["app_config"]
return
config["context"] = dict(runtime_context)
def _compute_agent_factory_supports_app_config(agent_factory: Any) -> bool:
try:
return "app_config" in inspect.signature(agent_factory).parameters
@ -191,11 +212,9 @@ async def run_agent(
# access thread-level data. langgraph-cli does this automatically; we must do it
# manually here because we drive the graph through ``agent.astream(config=...)``
# without passing the official ``context=`` parameter.
runtime_ctx = _build_runtime_context(thread_id, run_id, config.get("context"))
if "context" in config and isinstance(config["context"], dict):
config["context"].setdefault("thread_id", thread_id)
config["context"].setdefault("run_id", run_id)
runtime = Runtime(context=runtime_ctx, store=store)
runtime_ctx = _build_runtime_context(thread_id, run_id, config.get("context"), ctx.app_config)
_install_runtime_context(config, runtime_ctx)
runtime = Runtime(context=cast(Any, runtime_ctx), store=store)
config.setdefault("configurable", {})["__pregel_runtime"] = runtime
# Inject RunJournal as a LangChain callback handler.

View File

@ -168,6 +168,8 @@ def _get_isolated_subagent_loop() -> asyncio.AbstractEventLoop:
_isolated_subagent_loop_thread = thread
_isolated_subagent_loop_started = started_event
if _isolated_subagent_loop is None:
raise RuntimeError("Isolated subagent event loop is not initialized")
return _isolated_subagent_loop
@ -308,8 +310,10 @@ class SubagentExecutor:
try:
from deerflow.skills.storage import get_or_new_skill_storage
storage_kwargs = {"app_config": self.app_config} if self.app_config is not None else {}
storage = await asyncio.to_thread(get_or_new_skill_storage, **storage_kwargs)
# Use asyncio.to_thread to avoid blocking the event loop (LangGraph ASGI requirement)
all_skills = await asyncio.to_thread(get_or_new_skill_storage().load_skills, enabled_only=True)
all_skills = await asyncio.to_thread(storage.load_skills, enabled_only=True)
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} loaded {len(all_skills)} enabled skills from disk")
except Exception:
logger.warning(f"[trace={self.trace_id}] Failed to load skills for subagent {self.config.name}", exc_info=True)
@ -395,6 +399,10 @@ class SubagentExecutor:
status=SubagentStatus.RUNNING,
started_at=datetime.now(),
)
ai_messages = result.ai_messages
if ai_messages is None:
ai_messages = []
result.ai_messages = ai_messages
try:
agent = self._create_agent()
@ -404,10 +412,12 @@ class SubagentExecutor:
run_config: RunnableConfig = {
"recursion_limit": self.config.max_turns,
}
context = {}
context: dict[str, Any] = {}
if self.thread_id:
run_config["configurable"] = {"thread_id": self.thread_id}
context["thread_id"] = self.thread_id
if self.app_config is not None:
context["app_config"] = self.app_config
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} starting async execution with max_turns={self.config.max_turns}")
@ -454,13 +464,13 @@ class SubagentExecutor:
message_id = message_dict.get("id")
is_duplicate = False
if message_id:
is_duplicate = any(msg.get("id") == message_id for msg in result.ai_messages)
is_duplicate = any(msg.get("id") == message_id for msg in ai_messages)
else:
is_duplicate = message_dict in result.ai_messages
is_duplicate = message_dict in ai_messages
if not is_duplicate:
result.ai_messages.append(message_dict)
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} captured AI message #{len(result.ai_messages)}")
ai_messages.append(message_dict)
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} captured AI message #{len(ai_messages)}")
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} completed async execution")

View File

@ -2,6 +2,7 @@
import logging
from dataclasses import replace
from typing import Any
from deerflow.sandbox.security import is_host_bash_allowed
from deerflow.subagents.builtins import BUILTIN_SUBAGENTS
@ -10,19 +11,26 @@ from deerflow.subagents.config import SubagentConfig
logger = logging.getLogger(__name__)
def _build_custom_subagent_config(name: str) -> SubagentConfig | None:
def _resolve_subagents_app_config(app_config: Any | None = None):
if app_config is None:
from deerflow.config.subagents_config import get_subagents_app_config
return get_subagents_app_config()
return getattr(app_config, "subagents", app_config)
def _build_custom_subagent_config(name: str, *, app_config: Any | None = None) -> SubagentConfig | None:
"""Build a SubagentConfig from config.yaml custom_agents section.
Args:
name: The name of the custom subagent.
app_config: Optional AppConfig or SubagentsAppConfig to resolve from.
Returns:
SubagentConfig if found in custom_agents, None otherwise.
"""
from deerflow.config.subagents_config import get_subagents_app_config
app_config = get_subagents_app_config()
custom = app_config.custom_agents.get(name)
subagents_config = _resolve_subagents_app_config(app_config)
custom = subagents_config.custom_agents.get(name)
if custom is None:
return None
@ -39,7 +47,7 @@ def _build_custom_subagent_config(name: str) -> SubagentConfig | None:
)
def get_subagent_config(name: str) -> SubagentConfig | None:
def get_subagent_config(name: str, *, app_config: Any | None = None) -> SubagentConfig | None:
"""Get a subagent configuration by name, with config.yaml overrides applied.
Resolution order (mirrors Codex's config layering):
@ -49,6 +57,7 @@ def get_subagent_config(name: str) -> SubagentConfig | None:
Args:
name: The name of the subagent.
app_config: Optional AppConfig or SubagentsAppConfig to resolve overrides from.
Returns:
SubagentConfig if found (with any config.yaml overrides applied), None otherwise.
@ -56,7 +65,7 @@ def get_subagent_config(name: str) -> SubagentConfig | None:
# Step 1: Look up built-in, then fall back to custom_agents
config = BUILTIN_SUBAGENTS.get(name)
if config is None:
config = _build_custom_subagent_config(name)
config = _build_custom_subagent_config(name, app_config=app_config)
if config is None:
return None
@ -65,12 +74,9 @@ def get_subagent_config(name: str) -> SubagentConfig | None:
# (timeout_seconds, max_turns at the top level) apply to built-in agents
# but must NOT override custom agents' own values — custom agents define
# their own defaults in the custom_agents section.
# Lazy import to avoid circular deps.
from deerflow.config.subagents_config import get_subagents_app_config
app_config = get_subagents_app_config()
subagents_config = _resolve_subagents_app_config(app_config)
is_builtin = name in BUILTIN_SUBAGENTS
agent_override = app_config.agents.get(name)
agent_override = subagents_config.agents.get(name)
overrides = {}
@ -79,27 +85,27 @@ def get_subagent_config(name: str) -> SubagentConfig | None:
if agent_override.timeout_seconds != config.timeout_seconds:
logger.debug("Subagent '%s': timeout overridden (%ss -> %ss)", name, config.timeout_seconds, agent_override.timeout_seconds)
overrides["timeout_seconds"] = agent_override.timeout_seconds
elif is_builtin and app_config.timeout_seconds != config.timeout_seconds:
logger.debug("Subagent '%s': timeout from global default (%ss -> %ss)", name, config.timeout_seconds, app_config.timeout_seconds)
overrides["timeout_seconds"] = app_config.timeout_seconds
elif is_builtin and subagents_config.timeout_seconds != config.timeout_seconds:
logger.debug("Subagent '%s': timeout from global default (%ss -> %ss)", name, config.timeout_seconds, subagents_config.timeout_seconds)
overrides["timeout_seconds"] = subagents_config.timeout_seconds
# Max turns: per-agent override > global default (builtins only) > config's own value
if agent_override is not None and agent_override.max_turns is not None:
if agent_override.max_turns != config.max_turns:
logger.debug("Subagent '%s': max_turns overridden (%s -> %s)", name, config.max_turns, agent_override.max_turns)
overrides["max_turns"] = agent_override.max_turns
elif is_builtin and app_config.max_turns is not None and app_config.max_turns != config.max_turns:
logger.debug("Subagent '%s': max_turns from global default (%s -> %s)", name, config.max_turns, app_config.max_turns)
overrides["max_turns"] = app_config.max_turns
elif is_builtin and subagents_config.max_turns is not None and subagents_config.max_turns != config.max_turns:
logger.debug("Subagent '%s': max_turns from global default (%s -> %s)", name, config.max_turns, subagents_config.max_turns)
overrides["max_turns"] = subagents_config.max_turns
# Model: per-agent override only (no global default for model)
effective_model = app_config.get_model_for(name)
effective_model = subagents_config.get_model_for(name)
if effective_model is not None and effective_model != config.model:
logger.debug("Subagent '%s': model overridden (%s -> %s)", name, config.model, effective_model)
overrides["model"] = effective_model
# Skills: per-agent override only (no global default for skills)
effective_skills = app_config.get_skills_for(name)
effective_skills = subagents_config.get_skills_for(name)
if effective_skills is not None and effective_skills != config.skills:
logger.debug("Subagent '%s': skills overridden (%s -> %s)", name, config.skills, effective_skills)
overrides["skills"] = effective_skills
@ -110,21 +116,21 @@ def get_subagent_config(name: str) -> SubagentConfig | None:
return config
def list_subagents() -> list[SubagentConfig]:
def list_subagents(*, app_config: Any | None = None) -> list[SubagentConfig]:
"""List all available subagent configurations (with config.yaml overrides applied).
Returns:
List of all registered SubagentConfig instances (built-in + custom).
"""
configs = []
for name in get_subagent_names():
config = get_subagent_config(name)
for name in get_subagent_names(app_config=app_config):
config = get_subagent_config(name, app_config=app_config)
if config is not None:
configs.append(config)
return configs
def get_subagent_names() -> list[str]:
def get_subagent_names(*, app_config: Any | None = None) -> list[str]:
"""Get all available subagent names (built-in + custom).
Returns:
@ -133,25 +139,23 @@ def get_subagent_names() -> list[str]:
names = list(BUILTIN_SUBAGENTS.keys())
# Merge custom_agents from config.yaml
from deerflow.config.subagents_config import get_subagents_app_config
app_config = get_subagents_app_config()
for custom_name in app_config.custom_agents:
subagents_config = _resolve_subagents_app_config(app_config)
for custom_name in subagents_config.custom_agents:
if custom_name not in names:
names.append(custom_name)
return names
def get_available_subagent_names() -> list[str]:
def get_available_subagent_names(*, app_config: Any | None = None) -> list[str]:
"""Get subagent names that should be exposed to the active runtime.
Returns:
List of subagent names visible to the current sandbox configuration.
"""
names = get_subagent_names()
names = get_subagent_names(app_config=app_config)
try:
host_bash_allowed = is_host_bash_allowed()
host_bash_allowed = is_host_bash_allowed(app_config) if hasattr(app_config, "sandbox") else is_host_bash_allowed()
except Exception:
logger.debug("Could not determine host bash availability; exposing all subagents")
return names

View File

@ -4,7 +4,7 @@ import asyncio
import logging
import uuid
from dataclasses import replace
from typing import Annotated
from typing import TYPE_CHECKING, Annotated, Any, cast
from langchain.tools import InjectedToolCallId, ToolRuntime, tool
from langgraph.config import get_stream_writer
@ -22,9 +22,21 @@ from deerflow.subagents.executor import (
request_cancel_background_task,
)
if TYPE_CHECKING:
from deerflow.config.app_config import AppConfig
logger = logging.getLogger(__name__)
def _get_runtime_app_config(runtime: Any) -> "AppConfig | None":
context = getattr(runtime, "context", None)
if isinstance(context, dict):
app_config = context.get("app_config")
if app_config is not None:
return cast("AppConfig", app_config)
return None
def _merge_skill_allowlists(parent: list[str] | None, child: list[str] | None) -> list[str] | None:
"""Return the effective subagent skill allowlist under the parent policy."""
if parent is None:
@ -81,15 +93,18 @@ async def task_tool(
subagent_type: The type of subagent to use. ALWAYS PROVIDE THIS PARAMETER THIRD.
max_turns: Optional maximum number of agent turns. Defaults to subagent's configured max.
"""
available_subagent_names = get_available_subagent_names()
runtime_app_config = _get_runtime_app_config(runtime)
available_subagent_names = get_available_subagent_names(app_config=runtime_app_config) if runtime_app_config is not None else get_available_subagent_names()
# Get subagent configuration
config = get_subagent_config(subagent_type)
config = get_subagent_config(subagent_type, app_config=runtime_app_config) if runtime_app_config is not None else get_subagent_config(subagent_type)
if config is None:
available = ", ".join(available_subagent_names)
return f"Error: Unknown subagent type '{subagent_type}'. Available: {available}"
if subagent_type == "bash" and not is_host_bash_allowed():
return f"Error: {LOCAL_BASH_SUBAGENT_DISABLED_MESSAGE}"
if subagent_type == "bash":
host_bash_allowed = is_host_bash_allowed(runtime_app_config) if runtime_app_config is not None else is_host_bash_allowed()
if not host_bash_allowed:
return f"Error: {LOCAL_BASH_SUBAGENT_DISABLED_MESSAGE}"
# Build config overrides
overrides: dict = {}
@ -136,25 +151,34 @@ async def task_tool(
# Inherit parent agent's tool_groups so subagents respect the same restrictions
parent_tool_groups = metadata.get("tool_groups")
app_config = None
if config.model == "inherit" and parent_model is None:
app_config = get_app_config()
effective_model = resolve_subagent_model_name(config, parent_model, app_config=app_config)
resolved_app_config = runtime_app_config
if config.model == "inherit" and parent_model is None and resolved_app_config is None:
resolved_app_config = get_app_config()
effective_model = resolve_subagent_model_name(config, parent_model, app_config=resolved_app_config)
# Subagents should not have subagent tools enabled (prevent recursive nesting)
tools = get_available_tools(model_name=effective_model, groups=parent_tool_groups, subagent_enabled=False)
available_tools_kwargs = {
"model_name": effective_model,
"groups": parent_tool_groups,
"subagent_enabled": False,
}
if resolved_app_config is not None:
available_tools_kwargs["app_config"] = resolved_app_config
tools = get_available_tools(**available_tools_kwargs)
# Create executor
executor = SubagentExecutor(
config=config,
tools=tools,
app_config=app_config,
parent_model=parent_model,
sandbox_state=sandbox_state,
thread_data=thread_data,
thread_id=thread_id,
trace_id=trace_id,
)
executor_kwargs = {
"config": config,
"tools": tools,
"parent_model": parent_model,
"sandbox_state": sandbox_state,
"thread_data": thread_data,
"thread_id": thread_id,
"trace_id": trace_id,
}
if resolved_app_config is not None:
executor_kwargs["app_config"] = resolved_app_config
executor = SubagentExecutor(**executor_kwargs)
# Start background execution (always async to prevent blocking)
# Use tool_call_id as task_id for better traceability
@ -189,11 +213,12 @@ async def task_tool(
last_status = result.status
# Check for new AI messages and send task_running events
current_message_count = len(result.ai_messages)
ai_messages = result.ai_messages or []
current_message_count = len(ai_messages)
if current_message_count > last_message_count:
# Send task_running event for each new message
for i in range(last_message_count, current_message_count):
message = result.ai_messages[i]
message = ai_messages[i]
writer(
{
"type": "task_running",

View File

@ -141,10 +141,14 @@ def get_available_tools(
# Add invoke_acp_agent tool if any ACP agents are configured
acp_tools: list[BaseTool] = []
try:
from deerflow.config.acp_config import get_acp_agents
from deerflow.tools.builtins.invoke_acp_agent_tool import build_invoke_acp_agent_tool
acp_agents = get_acp_agents()
if app_config is None:
from deerflow.config.acp_config import get_acp_agents
acp_agents = get_acp_agents()
else:
acp_agents = getattr(config, "acp_agents", {}) or {}
if acp_agents:
acp_tools.append(build_invoke_acp_agent_tool(acp_agents))
logger.info(f"Including invoke_acp_agent tool ({len(acp_agents)} agent(s): {list(acp_agents.keys())})")

View File

@ -697,3 +697,33 @@ def test_get_available_tools_includes_invoke_acp_agent_when_agents_configured(mo
assert "invoke_acp_agent" in [tool.name for tool in tools]
load_acp_config_from_dict({})
def test_get_available_tools_uses_explicit_app_config_for_acp_agents(monkeypatch):
explicit_agents = {"codex": ACPAgentConfig(command="codex-acp", description="Codex CLI")}
explicit_config = SimpleNamespace(
tools=[],
models=[],
tool_search=SimpleNamespace(enabled=False),
skill_evolution=SimpleNamespace(enabled=False),
get_model_config=lambda name: None,
acp_agents=explicit_agents,
)
sentinel_tool = SimpleNamespace(name="invoke_acp_agent")
captured: dict[str, object] = {}
def fail_get_acp_agents():
raise AssertionError("ambient get_acp_agents() must not be used when app_config is explicit")
def fake_build_invoke_acp_agent_tool(agents):
captured["agents"] = agents
return sentinel_tool
monkeypatch.setattr("deerflow.tools.tools.is_host_bash_allowed", lambda config=None: True)
monkeypatch.setattr("deerflow.config.acp_config.get_acp_agents", fail_get_acp_agents)
monkeypatch.setattr("deerflow.tools.builtins.invoke_acp_agent_tool.build_invoke_acp_agent_tool", fake_build_invoke_acp_agent_tool)
tools = get_available_tools(include_mcp=False, subagent_enabled=False, app_config=explicit_config)
assert captured["agents"] is explicit_agents
assert "invoke_acp_agent" in [tool.name for tool in tools]

View File

@ -72,6 +72,44 @@ def test_internal_make_lead_agent_uses_explicit_app_config(monkeypatch):
assert result["model"] is not None
def test_make_lead_agent_uses_runtime_app_config_from_context_without_global_read(monkeypatch):
app_config = _make_app_config([_make_model("context-model", supports_thinking=False)])
import deerflow.tools as tools_module
def _raise_get_app_config():
raise AssertionError("ambient get_app_config() must not be used when runtime context already carries app_config")
monkeypatch.setattr(lead_agent_module, "get_app_config", _raise_get_app_config)
monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: [])
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None, **kwargs: [])
captured: dict[str, object] = {}
def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None, app_config=None):
captured["name"] = name
captured["app_config"] = app_config
return object()
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
result = lead_agent_module.make_lead_agent(
{
"context": {
"model_name": "context-model",
"app_config": app_config,
}
}
)
assert captured == {
"name": "context-model",
"app_config": app_config,
}
assert result["model"] is not None
def test_resolve_model_name_falls_back_to_default(monkeypatch, caplog):
app_config = _make_app_config(
[
@ -276,6 +314,16 @@ def test_build_middlewares_passes_explicit_app_config_to_shared_factory(monkeypa
)
monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda **kwargs: None)
monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None)
monkeypatch.setattr(
lead_agent_module,
"TitleMiddleware",
lambda *, app_config: captured.setdefault("title_app_config", app_config) or "title-middleware",
)
monkeypatch.setattr(
lead_agent_module,
"MemoryMiddleware",
lambda agent_name=None, *, memory_config: captured.setdefault("memory_config", memory_config) or "memory-middleware",
)
middlewares = lead_agent_module._build_middlewares(
{"configurable": {"is_plan_mode": False, "subagent_enabled": False}},
@ -286,17 +334,16 @@ def test_build_middlewares_passes_explicit_app_config_to_shared_factory(monkeypa
assert captured == {
"app_config": app_config,
"lazy_init": True,
"title_app_config": app_config,
"memory_config": app_config.memory,
}
assert middlewares[0] == "base-middleware"
def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch):
monkeypatch.setattr(
lead_agent_module,
"get_summarization_config",
lambda: SummarizationConfig(enabled=True, model_name="model-masswork"),
)
monkeypatch.setattr(lead_agent_module, "get_memory_config", lambda: MemoryConfig(enabled=False))
app_config = _make_app_config([_make_model("model-masswork", supports_thinking=False)])
app_config.summarization = SummarizationConfig(enabled=True, model_name="model-masswork")
app_config.memory = MemoryConfig(enabled=False)
from unittest.mock import MagicMock
@ -311,13 +358,55 @@ def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch
captured["app_config"] = app_config
return fake_model
def _raise_get_app_config():
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
monkeypatch.setattr(lead_agent_module, "get_app_config", _raise_get_app_config)
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
monkeypatch.setattr(lead_agent_module, "DeerFlowSummarizationMiddleware", lambda **kwargs: kwargs)
middleware = lead_agent_module._create_summarization_middleware(app_config=_make_app_config([_make_model("model-masswork", supports_thinking=False)]))
middleware = lead_agent_module._create_summarization_middleware(app_config=app_config)
assert captured["name"] == "model-masswork"
assert captured["thinking_enabled"] is False
assert captured["app_config"] is not None
assert captured["app_config"] is app_config
assert middleware["model"] is fake_model
fake_model.with_config.assert_called_once_with(tags=["middleware:summarize"])
def test_create_summarization_middleware_threads_resolved_app_config_to_model(monkeypatch):
fallback_app_config = _make_app_config([_make_model("fallback-model", supports_thinking=False)])
fallback_app_config.summarization = SummarizationConfig(enabled=True, model_name="fallback-model")
fallback_app_config.memory = MemoryConfig(enabled=False)
from unittest.mock import MagicMock
captured: dict[str, object] = {}
fake_model = MagicMock()
fake_model.with_config.return_value = fake_model
def _fake_create_chat_model(*, name=None, thinking_enabled, reasoning_effort=None, app_config=None):
captured["app_config"] = app_config
return fake_model
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: fallback_app_config)
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
monkeypatch.setattr(lead_agent_module, "DeerFlowSummarizationMiddleware", lambda **kwargs: kwargs)
lead_agent_module._create_summarization_middleware()
assert captured["app_config"] is fallback_app_config
def test_memory_middleware_uses_explicit_memory_config_without_global_read(monkeypatch):
from deerflow.agents.middlewares import memory_middleware as memory_middleware_module
from deerflow.agents.middlewares.memory_middleware import MemoryMiddleware
def _raise_get_memory_config():
raise AssertionError("ambient get_memory_config() must not be used when memory_config is explicit")
monkeypatch.setattr(memory_middleware_module, "get_memory_config", _raise_get_memory_config)
middleware = MemoryMiddleware(memory_config=MemoryConfig(enabled=False))
assert middleware.after_agent({"messages": []}, runtime=MagicMock(context={"thread_id": "thread-1"})) is None

View File

@ -4,6 +4,7 @@ from types import SimpleNamespace
import anyio
from deerflow.agents.lead_agent import prompt as prompt_module
from deerflow.config.subagents_config import CustomSubagentConfig, SubagentsAppConfig
from deerflow.skills.types import Skill
@ -40,6 +41,21 @@ def test_build_custom_mounts_section_lists_configured_mounts(monkeypatch):
assert "read-only" in section
def test_build_custom_mounts_section_uses_explicit_app_config_without_global_read(monkeypatch):
mounts = [SimpleNamespace(container_path="/home/user/shared", read_only=False)]
config = SimpleNamespace(sandbox=SimpleNamespace(mounts=mounts))
def fail_get_app_config():
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
monkeypatch.setattr("deerflow.config.get_app_config", fail_get_app_config)
section = prompt_module._build_custom_mounts_section(app_config=config)
assert "`/home/user/shared`" in section
assert "read-write" in section
def test_apply_prompt_template_includes_custom_mounts(monkeypatch):
mounts = [SimpleNamespace(container_path="/home/user/shared", read_only=False)]
config = SimpleNamespace(
@ -49,8 +65,8 @@ def test_apply_prompt_template_includes_custom_mounts(monkeypatch):
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config)
monkeypatch.setattr(prompt_module, "_get_enabled_skills", lambda: [])
monkeypatch.setattr(prompt_module, "get_deferred_tools_prompt_section", lambda **kwargs: "")
monkeypatch.setattr(prompt_module, "_build_acp_section", lambda: "")
monkeypatch.setattr(prompt_module, "_get_memory_context", lambda agent_name=None: "")
monkeypatch.setattr(prompt_module, "_build_acp_section", lambda **kwargs: "")
monkeypatch.setattr(prompt_module, "_get_memory_context", lambda agent_name=None, **kwargs: "")
monkeypatch.setattr(prompt_module, "get_agent_soul", lambda agent_name=None: "")
prompt = prompt_module.apply_prompt_template()
@ -67,8 +83,8 @@ def test_apply_prompt_template_includes_relative_path_guidance(monkeypatch):
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config)
monkeypatch.setattr(prompt_module, "_get_enabled_skills", lambda: [])
monkeypatch.setattr(prompt_module, "get_deferred_tools_prompt_section", lambda **kwargs: "")
monkeypatch.setattr(prompt_module, "_build_acp_section", lambda: "")
monkeypatch.setattr(prompt_module, "_get_memory_context", lambda agent_name=None: "")
monkeypatch.setattr(prompt_module, "_build_acp_section", lambda **kwargs: "")
monkeypatch.setattr(prompt_module, "_get_memory_context", lambda agent_name=None, **kwargs: "")
monkeypatch.setattr(prompt_module, "get_agent_soul", lambda agent_name=None: "")
prompt = prompt_module.apply_prompt_template()
@ -77,6 +93,123 @@ def test_apply_prompt_template_includes_relative_path_guidance(monkeypatch):
assert "`hello.txt`, `../uploads/data.csv`, and `../outputs/report.md`" in prompt
def test_apply_prompt_template_threads_explicit_app_config_without_global_config(monkeypatch):
mounts = [SimpleNamespace(container_path="/home/user/shared", read_only=False)]
explicit_config = SimpleNamespace(
sandbox=SimpleNamespace(mounts=mounts),
skills=SimpleNamespace(container_path="/mnt/explicit-skills"),
skill_evolution=SimpleNamespace(enabled=False),
tool_search=SimpleNamespace(enabled=False),
memory=SimpleNamespace(enabled=False, injection_enabled=True, max_injection_tokens=2000),
acp_agents={},
)
def fail_get_app_config():
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
def fail_get_memory_config():
raise AssertionError("ambient get_memory_config() must not be used when app_config is explicit")
monkeypatch.setattr("deerflow.config.get_app_config", fail_get_app_config)
monkeypatch.setattr("deerflow.config.memory_config.get_memory_config", fail_get_memory_config)
monkeypatch.setattr(prompt_module, "get_or_new_skill_storage", lambda app_config=None: SimpleNamespace(load_skills=lambda enabled_only=True: []))
monkeypatch.setattr(prompt_module, "get_agent_soul", lambda agent_name=None: "")
prompt = prompt_module.apply_prompt_template(app_config=explicit_config)
assert "`/home/user/shared`" in prompt
assert "Custom Mounted Directories" in prompt
def test_apply_prompt_template_threads_explicit_app_config_to_subagents_without_global_config(monkeypatch):
explicit_config = SimpleNamespace(
sandbox=SimpleNamespace(
use="deerflow.sandbox.local:LocalSandboxProvider",
allow_host_bash=False,
mounts=[],
),
subagents=SubagentsAppConfig(
custom_agents={
"researcher": CustomSubagentConfig(
description="Research agent\nwith details",
system_prompt="You research.",
)
}
),
skills=SimpleNamespace(container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=False),
tool_search=SimpleNamespace(enabled=False),
memory=SimpleNamespace(enabled=False, injection_enabled=True, max_injection_tokens=2000),
acp_agents={},
)
def fail_get_app_config():
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
def fail_get_subagents_app_config():
raise AssertionError("ambient get_subagents_app_config() must not be used when app_config is explicit")
monkeypatch.setattr("deerflow.config.get_app_config", fail_get_app_config)
monkeypatch.setattr("deerflow.config.subagents_config.get_subagents_app_config", fail_get_subagents_app_config)
monkeypatch.setattr(prompt_module, "get_or_new_skill_storage", lambda app_config=None: SimpleNamespace(load_skills=lambda enabled_only=True: []))
monkeypatch.setattr(prompt_module, "get_agent_soul", lambda agent_name=None: "")
prompt = prompt_module.apply_prompt_template(subagent_enabled=True, app_config=explicit_config)
assert "**researcher**: Research agent" in prompt
assert "**bash**" not in prompt
def test_build_acp_section_uses_explicit_app_config_without_global_config(monkeypatch):
explicit_config = SimpleNamespace(acp_agents={"codex": object()})
def fail_get_acp_agents():
raise AssertionError("ambient get_acp_agents() must not be used when app_config is explicit")
monkeypatch.setattr("deerflow.config.acp_config.get_acp_agents", fail_get_acp_agents)
section = prompt_module._build_acp_section(app_config=explicit_config)
assert "ACP Agent Tasks" in section
assert "/mnt/acp-workspace/" in section
def test_get_memory_context_uses_explicit_app_config_without_global_config(monkeypatch):
explicit_config = SimpleNamespace(
memory=SimpleNamespace(enabled=True, injection_enabled=True, max_injection_tokens=1234),
)
captured: dict[str, object] = {}
def fail_get_memory_config():
raise AssertionError("ambient get_memory_config() must not be used when app_config is explicit")
def fake_get_memory_data(agent_name=None, *, user_id=None):
captured["agent_name"] = agent_name
captured["user_id"] = user_id
return {"facts": []}
def fake_format_memory_for_injection(memory_data, *, max_tokens):
captured["memory_data"] = memory_data
captured["max_tokens"] = max_tokens
return "remember this"
monkeypatch.setattr("deerflow.config.memory_config.get_memory_config", fail_get_memory_config)
monkeypatch.setattr("deerflow.runtime.user_context.get_effective_user_id", lambda: "user-1")
monkeypatch.setattr("deerflow.agents.memory.get_memory_data", fake_get_memory_data)
monkeypatch.setattr("deerflow.agents.memory.format_memory_for_injection", fake_format_memory_for_injection)
context = prompt_module._get_memory_context("agent-a", app_config=explicit_config)
assert "<memory>" in context
assert "remember this" in context
assert captured == {
"agent_name": "agent-a",
"user_id": "user-1",
"memory_data": {"facts": []},
"max_tokens": 1234,
}
def test_refresh_skills_system_prompt_cache_async_reloads_immediately(monkeypatch, tmp_path):
def make_skill(name: str) -> Skill:
skill_dir = tmp_path / name

View File

@ -106,7 +106,11 @@ def test_get_skills_prompt_section_uses_explicit_config_for_enabled_skills(monke
skill_evolution=SimpleNamespace(enabled=False),
)
def fail_get_app_config():
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
monkeypatch.setattr("deerflow.agents.lead_agent.prompt._get_enabled_skills", lambda: [_make_skill("global-skill")])
monkeypatch.setattr("deerflow.config.get_app_config", fail_get_app_config)
monkeypatch.setattr(
"deerflow.agents.lead_agent.prompt.get_or_new_skill_storage",
lambda app_config=None, **kwargs: __import__("types").SimpleNamespace(load_skills=lambda *, enabled_only: [_make_skill("explicit-skill")] if app_config is explicit_config else []),

View File

@ -1,8 +1,12 @@
import asyncio
from types import SimpleNamespace
from unittest.mock import AsyncMock, call
import pytest
from deerflow.runtime.runs.worker import _agent_factory_supports_app_config, _build_runtime_context, _rollback_to_pre_run_checkpoint
from deerflow.runtime.runs.manager import RunManager
from deerflow.runtime.runs.schemas import RunStatus
from deerflow.runtime.runs.worker import RunContext, _agent_factory_supports_app_config, _build_runtime_context, _install_runtime_context, _rollback_to_pre_run_checkpoint, run_agent
class FakeCheckpointer:
@ -12,6 +16,73 @@ class FakeCheckpointer:
self.aput_writes = AsyncMock()
def test_build_runtime_context_includes_app_config_when_present():
app_config = object()
context = _build_runtime_context("thread-1", "run-1", None, app_config)
assert context["thread_id"] == "thread-1"
assert context["run_id"] == "run-1"
assert context["app_config"] is app_config
def test_install_runtime_context_preserves_existing_thread_id_and_threads_app_config():
app_config = object()
config = {"context": {"thread_id": "caller-thread"}}
_install_runtime_context(
config,
{
"thread_id": "record-thread",
"run_id": "run-1",
"app_config": app_config,
},
)
assert config["context"]["thread_id"] == "caller-thread"
assert config["context"]["run_id"] == "run-1"
assert config["context"]["app_config"] is app_config
@pytest.mark.anyio
async def test_run_agent_threads_explicit_app_config_into_config_only_factory():
run_manager = RunManager()
record = await run_manager.create("thread-1")
bridge = SimpleNamespace(
publish=AsyncMock(),
publish_end=AsyncMock(),
cleanup=AsyncMock(),
)
app_config = object()
captured: dict[str, object] = {}
class DummyAgent:
async def astream(self, graph_input, config=None, stream_mode=None, subgraphs=False):
captured["astream_context"] = config["context"]
yield {"messages": []}
def factory(*, config):
captured["factory_context"] = config["context"]
return DummyAgent()
await run_agent(
bridge,
run_manager,
record,
ctx=RunContext(checkpointer=None, app_config=app_config),
agent_factory=factory,
graph_input={},
config={},
)
await asyncio.sleep(0)
assert captured["factory_context"]["app_config"] is app_config
assert captured["astream_context"]["app_config"] is app_config
assert run_manager.get(record.run_id).status == RunStatus.success
bridge.publish_end.assert_awaited_once_with(record.run_id)
bridge.cleanup.assert_awaited_once_with(record.run_id, delay=60)
@pytest.mark.anyio
async def test_rollback_restores_snapshot_without_deleting_thread():
checkpointer = FakeCheckpointer(put_result={"configurable": {"thread_id": "thread-1", "checkpoint_ns": "", "checkpoint_id": "restored-1"}})

View File

@ -204,7 +204,7 @@ class TestAgentConstruction:
SubagentExecutor = classes["SubagentExecutor"]
app_config = object()
app_config = SimpleNamespace(models=[SimpleNamespace(name="default-model")])
model = object()
middlewares = [object()]
agent = object()
@ -266,6 +266,43 @@ class TestAgentConstruction:
assert captured["agent"]["tools"] == []
assert captured["agent"]["system_prompt"] == base_config.system_prompt
@pytest.mark.anyio
async def test_load_skill_messages_uses_explicit_app_config_for_skill_storage(
self,
classes,
base_config,
monkeypatch: pytest.MonkeyPatch,
tmp_path,
):
"""Explicit app_config must be threaded into subagent skill storage lookup."""
SubagentExecutor = classes["SubagentExecutor"]
app_config = SimpleNamespace(models=[SimpleNamespace(name="default-model")])
skill_dir = tmp_path / "demo-skill"
skill_dir.mkdir()
skill_file = skill_dir / "SKILL.md"
skill_file.write_text("Use demo skill", encoding="utf-8")
captured: dict[str, object] = {}
def fake_get_or_new_skill_storage(*, app_config=None):
captured["app_config"] = app_config
return SimpleNamespace(load_skills=lambda *, enabled_only: [SimpleNamespace(name="demo-skill", skill_file=skill_file)])
monkeypatch.setattr("deerflow.skills.storage.get_or_new_skill_storage", fake_get_or_new_skill_storage)
executor = SubagentExecutor(
config=base_config,
tools=[],
app_config=app_config,
thread_id="test-thread",
)
messages = await executor._load_skill_messages()
assert captured["app_config"] is app_config
assert len(messages) == 1
assert "Use demo skill" in messages[0].content
# -----------------------------------------------------------------------------
# Async Execution Path Tests

View File

@ -9,6 +9,8 @@ Covers:
- Skills filter passthrough in task_tool config assembly
"""
from types import SimpleNamespace
import pytest
from deerflow.config.subagents_config import (
@ -343,12 +345,54 @@ class TestRegistryCustomAgentLookup:
assert config.timeout_seconds == 600
assert config.model == "inherit"
def test_custom_agent_found_from_explicit_app_config_without_global_config(self, monkeypatch):
from deerflow.subagents.registry import get_subagent_config
def fail_get_subagents_app_config():
raise AssertionError("ambient get_subagents_app_config() must not be used when app_config is explicit")
monkeypatch.setattr("deerflow.config.subagents_config.get_subagents_app_config", fail_get_subagents_app_config)
app_config = SimpleNamespace(
subagents=SubagentsAppConfig(
custom_agents={
"analysis": CustomSubagentConfig(
description="Data analysis specialist",
system_prompt="You are a data analysis subagent.",
skills=["data-analysis"],
)
}
)
)
config = get_subagent_config("analysis", app_config=app_config)
assert config is not None
assert config.name == "analysis"
assert config.skills == ["data-analysis"]
def test_custom_agent_not_found(self):
from deerflow.subagents.registry import get_subagent_config
_reset_subagents_config()
assert get_subagent_config("nonexistent") is None
def test_get_available_subagent_names_falls_back_when_subagents_app_config_lacks_sandbox(self, monkeypatch):
from deerflow.subagents import registry as registry_module
from deerflow.subagents.registry import get_available_subagent_names
captured: dict[str, tuple] = {}
def fake_is_host_bash_allowed(*args, **kwargs):
captured["args"] = args
return True
monkeypatch.setattr(registry_module, "is_host_bash_allowed", fake_is_host_bash_allowed)
get_available_subagent_names(app_config=SubagentsAppConfig())
assert captured["args"] == ()
def test_builtin_takes_priority_over_custom(self):
"""If a custom agent has the same name as a builtin, builtin wins."""
from deerflow.subagents.builtins import BUILTIN_SUBAGENTS

View File

@ -24,8 +24,11 @@ class FakeSubagentStatus(Enum):
TIMED_OUT = "timed_out"
def _make_runtime() -> SimpleNamespace:
def _make_runtime(*, app_config=None) -> SimpleNamespace:
# Minimal ToolRuntime-like object; task_tool only reads these three attributes.
context = {"thread_id": "thread-1"}
if app_config is not None:
context["app_config"] = app_config
return SimpleNamespace(
state={
"sandbox": {"sandbox_id": "local"},
@ -35,14 +38,14 @@ def _make_runtime() -> SimpleNamespace:
"outputs_path": "/tmp/outputs",
},
},
context={"thread_id": "thread-1"},
context=context,
config={"metadata": {"model_name": "ark-model", "trace_id": "trace-1"}},
)
def _make_subagent_config() -> SubagentConfig:
def _make_subagent_config(name: str = "general-purpose") -> SubagentConfig:
return SubagentConfig(
name="general-purpose",
name=name,
description="General helper",
system_prompt="Base system prompt",
max_turns=50,
@ -112,6 +115,68 @@ def test_task_tool_rejects_bash_subagent_when_host_bash_disabled(monkeypatch):
assert result.startswith("Error: Bash subagent is disabled")
def test_task_tool_threads_runtime_app_config_to_subagent_dependencies(monkeypatch):
app_config = object()
config = _make_subagent_config(name="bash")
runtime = _make_runtime(app_config=app_config)
events = []
captured = {}
class DummyExecutor:
def __init__(self, **kwargs):
captured["executor_kwargs"] = kwargs
def execute_async(self, prompt, task_id=None):
captured["prompt"] = prompt
return task_id or "generated-task-id"
def fake_get_available_subagent_names(*, app_config):
captured["names_app_config"] = app_config
return ["bash"]
def fake_get_subagent_config(name, *, app_config):
captured["config_lookup"] = (name, app_config)
return config
def fake_is_host_bash_allowed(config):
captured["bash_gate_app_config"] = config
return True
def fake_get_available_tools(**kwargs):
captured["tools_kwargs"] = kwargs
return ["tool-a"]
monkeypatch.setattr(task_tool_module, "SubagentStatus", FakeSubagentStatus)
monkeypatch.setattr(task_tool_module, "SubagentExecutor", DummyExecutor)
monkeypatch.setattr(task_tool_module, "get_available_subagent_names", fake_get_available_subagent_names)
monkeypatch.setattr(task_tool_module, "get_subagent_config", fake_get_subagent_config)
monkeypatch.setattr(task_tool_module, "is_host_bash_allowed", fake_is_host_bash_allowed)
monkeypatch.setattr(
task_tool_module,
"get_background_task_result",
lambda _: _make_result(FakeSubagentStatus.COMPLETED, result="done"),
)
monkeypatch.setattr(task_tool_module, "get_stream_writer", lambda: events.append)
monkeypatch.setattr(task_tool_module.asyncio, "sleep", _no_sleep)
monkeypatch.setattr("deerflow.tools.get_available_tools", fake_get_available_tools)
output = _run_task_tool(
runtime=runtime,
description="运行命令",
prompt="inspect files",
subagent_type="bash",
tool_call_id="tc-explicit-config",
)
assert output == "Task Succeeded. Result: done"
assert captured["names_app_config"] is app_config
assert captured["config_lookup"] == ("bash", app_config)
assert captured["bash_gate_app_config"] is app_config
assert captured["tools_kwargs"]["app_config"] is app_config
assert captured["executor_kwargs"]["app_config"] is app_config
assert captured["executor_kwargs"]["tools"] == ["tool-a"]
def test_task_tool_emits_running_and_completed_events(monkeypatch):
config = _make_subagent_config()
runtime = _make_runtime()
@ -421,7 +486,8 @@ def test_task_tool_runtime_none_passes_groups_none(monkeypatch):
monkeypatch.setattr(task_tool_module, "get_stream_writer", lambda: events.append)
monkeypatch.setattr(task_tool_module.asyncio, "sleep", _no_sleep)
monkeypatch.setattr("deerflow.tools.get_available_tools", get_available_tools)
monkeypatch.setattr(task_tool_module, "get_app_config", lambda: SimpleNamespace(models=[SimpleNamespace(name="default-model")]))
fallback_app_config = SimpleNamespace(models=[SimpleNamespace(name="default-model")])
monkeypatch.setattr(task_tool_module, "get_app_config", lambda: fallback_app_config)
output = _run_task_tool(
runtime=None,
@ -433,7 +499,12 @@ def test_task_tool_runtime_none_passes_groups_none(monkeypatch):
assert output == "Task Succeeded. Result: ok"
# runtime is None -> metadata is empty dict -> groups=None, model falls back to app default.
get_available_tools.assert_called_once_with(model_name="default-model", groups=None, subagent_enabled=False)
get_available_tools.assert_called_once_with(
model_name="default-model",
groups=None,
subagent_enabled=False,
app_config=fallback_app_config,
)
config = _make_subagent_config()
events = []

View File

@ -1,6 +1,7 @@
"""Core behavior tests for TitleMiddleware."""
import asyncio
from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock
from langchain_core.messages import AIMessage, HumanMessage
@ -98,6 +99,34 @@ class TestTitleMiddlewareCoreLogic:
"tags": ["middleware:title"],
}
def test_generate_title_uses_explicit_app_config_without_global_config(self, monkeypatch):
title_config = TitleConfig(enabled=True, model_name="title-model", max_chars=20)
app_config = SimpleNamespace(title=title_config)
middleware = TitleMiddleware(app_config=app_config)
model = MagicMock()
model.ainvoke = AsyncMock(return_value=AIMessage(content="显式标题"))
def fail_get_title_config():
raise AssertionError("ambient get_title_config() must not be used when app_config is explicit")
monkeypatch.setattr(title_middleware_module, "get_title_config", fail_get_title_config)
monkeypatch.setattr(title_middleware_module, "create_chat_model", MagicMock(return_value=model))
state = {
"messages": [
HumanMessage(content="请帮我写一个标题"),
AIMessage(content="好的"),
]
}
result = asyncio.run(middleware._agenerate_title_result(state))
assert result == {"title": "显式标题"}
title_middleware_module.create_chat_model.assert_called_once_with(
name="title-model",
thinking_enabled=False,
app_config=app_config,
)
def test_generate_title_normalizes_structured_message_content(self, monkeypatch):
_set_test_title_config(max_chars=20)
middleware = TitleMiddleware()