mirror of
https://github.com/bytedance/deer-flow.git
synced 2026-05-10 18:58:21 +00:00
* Make loop detection configurable Expose LoopDetectionMiddleware thresholds through config.yaml while preserving existing defaults and allowing the middleware to be disabled. Refs bytedance/deer-flow#2517 * feat(loop-detection): add per-tool tool_freq_overrides to Phase 1 Adds ToolFreqOverride model and tool_freq_overrides field to LoopDetectionConfig, wires it through LoopDetectionMiddleware, and documents the option in config.example.yaml. Resolves the gap flagged in the #2586 review: without per-tool overrides, users hit by #2510/#2511 (RNA-seq workflows exceeding the bash hard limit) had no way to raise thresholds for one tool without loosening the global limit for every tool. Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com> * Potential fix for pull request finding Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> * docs(loop-detection): document tool_freq_overrides in LoopDetectionMiddleware docstring Add the missing Args entry for tool_freq_overrides, explaining the (warn, hard_limit) tuple structure and how per-tool thresholds supersede the global tool_freq_warn / tool_freq_hard_limit for named tools. Also run ruff format on the three files flagged by the lint check. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix(loop-detection): validate LoopDetectionMiddleware __init__ params eagerly Raise clear ValueError at construction time instead of crashing at unpack-time inside _track_and_check when bad values are passed: - tool_freq_overrides: must be 2-tuples of positive ints with hard_limit >= warn - scalar thresholds: warn_threshold, hard_limit, tool_freq_warn, tool_freq_hard_limit must be >= 1 and hard limits must >= their warn pairs - window_size, max_tracked_threads must be >= 1 Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix(test): isolate credential loader directory-path test from real ~/.claude The test didn't monkeypatch HOME, so on any machine with real Claude Code credentials at ~/.claude/.credentials.json the function fell through to those credentials and the assertion failed. Adding HOME redirect ensures the default credential path doesn't exist during the test. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * style(test): add blank lines after import pytest in TestInitValidation Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * refactor(loop-detection): collapse dual validation to LoopDetectionConfig Modifications - LoopDetectionMiddleware.__init__: stripped of all ValueError raises; becomes a plain field-assignment constructor. - LoopDetectionMiddleware.from_config: classmethod that builds the middleware from a Pydantic-validated LoopDetectionConfig and handles the ToolFreqOverride -> tuple[int, int] conversion. - agents/factory.py: SDK construction routed through LoopDetectionMiddleware.from_config(LoopDetectionConfig()) so the defaults path is Pydantic-validated too. - agents/lead_agent/agent.py: uses from_config instead of unpacking config fields by hand. - tests/test_loop_detection_middleware.py: deleted TestInitValidation (16 methods exercising the removed __init__ checks); added TestFromConfig (4 tests: scalar field mapping, override tuple conversion, empty overrides, behavioral smoke test). Result: one validation layer (Pydantic), zero duplication, no __new__ hacks. Both production construction sites flow through LoopDetectionConfig. Test results make test -> 2977 passed, 18 skipped, 0 failed (137s) make format -> All checks passed; 411 files left unchanged * feat(agents): make loop_detection configurable in create_deerflow_agent Adds a `loop_detection: bool | AgentMiddleware = True` field to RuntimeFeatures, mirroring the existing pattern used by `sandbox`, `memory`, and `vision`. SDK users can now disable LoopDetectionMiddleware or replace it with a custom instance built from their own LoopDetectionConfig — e.g. `LoopDetectionMiddleware.from_config(my_cfg)` — instead of being stuck with the hardcoded defaults previously installed by the SDK factory. The lead-agent path (which already reads AppConfig.loop_detection) is unchanged, and the default `True` preserves prior always-on behavior for all existing callers. Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com> --------- Co-authored-by: knight0940 <631532668@qq.com> Co-authored-by: Claude Opus 4.7 <noreply@anthropic.com> Co-authored-by: Amorend <142649913+knight0940@users.noreply.github.com> Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
469 lines
18 KiB
Python
469 lines
18 KiB
Python
"""Tests for lead agent runtime model resolution behavior."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import inspect
|
|
from unittest.mock import MagicMock
|
|
|
|
import pytest
|
|
|
|
from deerflow.agents.lead_agent import agent as lead_agent_module
|
|
from deerflow.agents.middlewares.loop_detection_middleware import LoopDetectionMiddleware
|
|
from deerflow.config.app_config import AppConfig
|
|
from deerflow.config.loop_detection_config import LoopDetectionConfig
|
|
from deerflow.config.memory_config import MemoryConfig
|
|
from deerflow.config.model_config import ModelConfig
|
|
from deerflow.config.sandbox_config import SandboxConfig
|
|
from deerflow.config.summarization_config import SummarizationConfig
|
|
|
|
|
|
def _make_app_config(models: list[ModelConfig], loop_detection: LoopDetectionConfig | None = None) -> AppConfig:
|
|
return AppConfig(
|
|
models=models,
|
|
sandbox=SandboxConfig(use="deerflow.sandbox.local:LocalSandboxProvider"),
|
|
loop_detection=loop_detection or LoopDetectionConfig(),
|
|
)
|
|
|
|
|
|
def _make_model(name: str, *, supports_thinking: bool) -> ModelConfig:
|
|
return ModelConfig(
|
|
name=name,
|
|
display_name=name,
|
|
description=None,
|
|
use="langchain_openai:ChatOpenAI",
|
|
model=name,
|
|
supports_thinking=supports_thinking,
|
|
supports_vision=False,
|
|
)
|
|
|
|
|
|
def test_make_lead_agent_signature_matches_langgraph_server_factory_abi():
|
|
assert list(inspect.signature(lead_agent_module.make_lead_agent).parameters) == ["config"]
|
|
|
|
|
|
def test_internal_make_lead_agent_uses_explicit_app_config(monkeypatch):
|
|
app_config = _make_app_config([_make_model("explicit-model", supports_thinking=False)])
|
|
|
|
import deerflow.tools as tools_module
|
|
|
|
def _raise_get_app_config():
|
|
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", _raise_get_app_config)
|
|
monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: [])
|
|
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None, **kwargs: [])
|
|
|
|
captured: dict[str, object] = {}
|
|
|
|
def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None, app_config=None):
|
|
captured["name"] = name
|
|
captured["app_config"] = app_config
|
|
return object()
|
|
|
|
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
|
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
|
|
|
|
result = lead_agent_module._make_lead_agent(
|
|
{"configurable": {"model_name": "explicit-model"}},
|
|
app_config=app_config,
|
|
)
|
|
|
|
assert captured == {
|
|
"name": "explicit-model",
|
|
"app_config": app_config,
|
|
}
|
|
assert result["model"] is not None
|
|
|
|
|
|
def test_make_lead_agent_uses_runtime_app_config_from_context_without_global_read(monkeypatch):
|
|
app_config = _make_app_config([_make_model("context-model", supports_thinking=False)])
|
|
|
|
import deerflow.tools as tools_module
|
|
|
|
def _raise_get_app_config():
|
|
raise AssertionError("ambient get_app_config() must not be used when runtime context already carries app_config")
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", _raise_get_app_config)
|
|
monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: [])
|
|
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None, **kwargs: [])
|
|
|
|
captured: dict[str, object] = {}
|
|
|
|
def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None, app_config=None):
|
|
captured["name"] = name
|
|
captured["app_config"] = app_config
|
|
return object()
|
|
|
|
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
|
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
|
|
|
|
result = lead_agent_module.make_lead_agent(
|
|
{
|
|
"context": {
|
|
"model_name": "context-model",
|
|
"app_config": app_config,
|
|
}
|
|
}
|
|
)
|
|
|
|
assert captured == {
|
|
"name": "context-model",
|
|
"app_config": app_config,
|
|
}
|
|
assert result["model"] is not None
|
|
|
|
|
|
def test_resolve_model_name_falls_back_to_default(monkeypatch, caplog):
|
|
app_config = _make_app_config(
|
|
[
|
|
_make_model("default-model", supports_thinking=False),
|
|
_make_model("other-model", supports_thinking=True),
|
|
]
|
|
)
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
|
|
with caplog.at_level("WARNING"):
|
|
resolved = lead_agent_module._resolve_model_name("missing-model")
|
|
|
|
assert resolved == "default-model"
|
|
assert "fallback to default model 'default-model'" in caplog.text
|
|
|
|
|
|
def test_resolve_model_name_uses_default_when_none(monkeypatch):
|
|
app_config = _make_app_config(
|
|
[
|
|
_make_model("default-model", supports_thinking=False),
|
|
_make_model("other-model", supports_thinking=True),
|
|
]
|
|
)
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
|
|
resolved = lead_agent_module._resolve_model_name(None)
|
|
|
|
assert resolved == "default-model"
|
|
|
|
|
|
def test_resolve_model_name_raises_when_no_models_configured(monkeypatch):
|
|
app_config = _make_app_config([])
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
|
|
with pytest.raises(
|
|
ValueError,
|
|
match="No chat models are configured",
|
|
):
|
|
lead_agent_module._resolve_model_name("missing-model")
|
|
|
|
|
|
def test_make_lead_agent_disables_thinking_when_model_does_not_support_it(monkeypatch):
|
|
app_config = _make_app_config([_make_model("safe-model", supports_thinking=False)])
|
|
|
|
import deerflow.tools as tools_module
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: [])
|
|
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None, **kwargs: [])
|
|
|
|
captured: dict[str, object] = {}
|
|
|
|
def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None, app_config=None):
|
|
captured["name"] = name
|
|
captured["thinking_enabled"] = thinking_enabled
|
|
captured["reasoning_effort"] = reasoning_effort
|
|
captured["app_config"] = app_config
|
|
return object()
|
|
|
|
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
|
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
|
|
|
|
result = lead_agent_module.make_lead_agent(
|
|
{
|
|
"configurable": {
|
|
"model_name": "safe-model",
|
|
"thinking_enabled": True,
|
|
"is_plan_mode": False,
|
|
"subagent_enabled": False,
|
|
}
|
|
}
|
|
)
|
|
|
|
assert captured["name"] == "safe-model"
|
|
assert captured["thinking_enabled"] is False
|
|
assert captured["app_config"] is app_config
|
|
assert result["model"] is not None
|
|
|
|
|
|
def test_make_lead_agent_reads_runtime_options_from_context(monkeypatch):
|
|
app_config = _make_app_config(
|
|
[
|
|
_make_model("default-model", supports_thinking=False),
|
|
_make_model("context-model", supports_thinking=True),
|
|
]
|
|
)
|
|
|
|
import deerflow.tools as tools_module
|
|
|
|
get_available_tools = MagicMock(return_value=[])
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
monkeypatch.setattr(tools_module, "get_available_tools", get_available_tools)
|
|
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None, **kwargs: [])
|
|
|
|
captured: dict[str, object] = {}
|
|
|
|
def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None, app_config=None):
|
|
captured["name"] = name
|
|
captured["thinking_enabled"] = thinking_enabled
|
|
captured["reasoning_effort"] = reasoning_effort
|
|
captured["app_config"] = app_config
|
|
return object()
|
|
|
|
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
|
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
|
|
|
|
result = lead_agent_module.make_lead_agent(
|
|
{
|
|
"context": {
|
|
"model_name": "context-model",
|
|
"thinking_enabled": False,
|
|
"reasoning_effort": "high",
|
|
"is_plan_mode": True,
|
|
"subagent_enabled": True,
|
|
"max_concurrent_subagents": 7,
|
|
}
|
|
}
|
|
)
|
|
|
|
assert captured == {
|
|
"name": "context-model",
|
|
"thinking_enabled": False,
|
|
"reasoning_effort": "high",
|
|
"app_config": app_config,
|
|
}
|
|
get_available_tools.assert_called_once_with(model_name="context-model", groups=None, subagent_enabled=True, app_config=app_config)
|
|
assert result["model"] is not None
|
|
|
|
|
|
def test_make_lead_agent_rejects_invalid_bootstrap_agent_name(monkeypatch):
|
|
app_config = _make_app_config([_make_model("safe-model", supports_thinking=False)])
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
|
|
with pytest.raises(ValueError, match="Invalid agent name"):
|
|
lead_agent_module.make_lead_agent(
|
|
{
|
|
"configurable": {
|
|
"model_name": "safe-model",
|
|
"thinking_enabled": False,
|
|
"is_plan_mode": False,
|
|
"subagent_enabled": False,
|
|
"is_bootstrap": True,
|
|
"agent_name": "../../../tmp/evil",
|
|
}
|
|
}
|
|
)
|
|
|
|
|
|
def test_build_middlewares_uses_resolved_model_name_for_vision(monkeypatch):
|
|
app_config = _make_app_config(
|
|
[
|
|
_make_model("stale-model", supports_thinking=False),
|
|
ModelConfig(
|
|
name="vision-model",
|
|
display_name="vision-model",
|
|
description=None,
|
|
use="langchain_openai:ChatOpenAI",
|
|
model="vision-model",
|
|
supports_thinking=False,
|
|
supports_vision=True,
|
|
),
|
|
]
|
|
)
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda **kwargs: None)
|
|
monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None)
|
|
|
|
middlewares = lead_agent_module._build_middlewares(
|
|
{"configurable": {"model_name": "stale-model", "is_plan_mode": False, "subagent_enabled": False}},
|
|
model_name="vision-model",
|
|
custom_middlewares=[MagicMock()],
|
|
app_config=app_config,
|
|
)
|
|
|
|
assert any(isinstance(m, lead_agent_module.ViewImageMiddleware) for m in middlewares)
|
|
# verify the custom middleware is injected correctly
|
|
assert len(middlewares) > 0 and isinstance(middlewares[-2], MagicMock)
|
|
|
|
|
|
def test_build_middlewares_passes_explicit_app_config_to_shared_factory(monkeypatch):
|
|
app_config = _make_app_config([_make_model("safe-model", supports_thinking=False)])
|
|
captured: dict[str, object] = {}
|
|
|
|
def _raise_get_app_config():
|
|
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
|
|
|
|
def _fake_build_lead_runtime_middlewares(*, app_config, lazy_init):
|
|
captured["app_config"] = app_config
|
|
captured["lazy_init"] = lazy_init
|
|
return ["base-middleware"]
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", _raise_get_app_config)
|
|
monkeypatch.setattr(
|
|
lead_agent_module,
|
|
"build_lead_runtime_middlewares",
|
|
_fake_build_lead_runtime_middlewares,
|
|
)
|
|
monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda **kwargs: None)
|
|
monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None)
|
|
monkeypatch.setattr(
|
|
lead_agent_module,
|
|
"TitleMiddleware",
|
|
lambda *, app_config: captured.setdefault("title_app_config", app_config) or "title-middleware",
|
|
)
|
|
monkeypatch.setattr(
|
|
lead_agent_module,
|
|
"MemoryMiddleware",
|
|
lambda agent_name=None, *, memory_config: captured.setdefault("memory_config", memory_config) or "memory-middleware",
|
|
)
|
|
|
|
middlewares = lead_agent_module._build_middlewares(
|
|
{"configurable": {"is_plan_mode": False, "subagent_enabled": False}},
|
|
model_name="safe-model",
|
|
app_config=app_config,
|
|
)
|
|
|
|
assert captured == {
|
|
"app_config": app_config,
|
|
"lazy_init": True,
|
|
"title_app_config": app_config,
|
|
"memory_config": app_config.memory,
|
|
}
|
|
assert middlewares[0] == "base-middleware"
|
|
|
|
|
|
def test_build_middlewares_uses_loop_detection_config(monkeypatch):
|
|
app_config = _make_app_config(
|
|
[_make_model("safe-model", supports_thinking=False)],
|
|
loop_detection=LoopDetectionConfig(
|
|
warn_threshold=7,
|
|
hard_limit=9,
|
|
window_size=30,
|
|
max_tracked_threads=40,
|
|
tool_freq_warn=50,
|
|
tool_freq_hard_limit=60,
|
|
),
|
|
)
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
monkeypatch.setattr(lead_agent_module, "build_lead_runtime_middlewares", lambda *, app_config, lazy_init=True: [])
|
|
monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda *, app_config=None: None)
|
|
monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None)
|
|
|
|
middlewares = lead_agent_module._build_middlewares(
|
|
{"configurable": {"is_plan_mode": False, "subagent_enabled": False}},
|
|
model_name="safe-model",
|
|
app_config=app_config,
|
|
)
|
|
|
|
loop_detection = next(m for m in middlewares if isinstance(m, LoopDetectionMiddleware))
|
|
assert loop_detection.warn_threshold == 7
|
|
assert loop_detection.hard_limit == 9
|
|
assert loop_detection.window_size == 30
|
|
assert loop_detection.max_tracked_threads == 40
|
|
assert loop_detection.tool_freq_warn == 50
|
|
assert loop_detection.tool_freq_hard_limit == 60
|
|
|
|
|
|
def test_build_middlewares_omits_loop_detection_when_disabled(monkeypatch):
|
|
app_config = _make_app_config(
|
|
[_make_model("safe-model", supports_thinking=False)],
|
|
loop_detection=LoopDetectionConfig(enabled=False),
|
|
)
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
|
monkeypatch.setattr(lead_agent_module, "build_lead_runtime_middlewares", lambda *, app_config, lazy_init=True: [])
|
|
monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda *, app_config=None: None)
|
|
monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None)
|
|
|
|
middlewares = lead_agent_module._build_middlewares(
|
|
{"configurable": {"is_plan_mode": False, "subagent_enabled": False}},
|
|
model_name="safe-model",
|
|
app_config=app_config,
|
|
)
|
|
|
|
assert not any(isinstance(m, LoopDetectionMiddleware) for m in middlewares)
|
|
|
|
|
|
def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch):
|
|
app_config = _make_app_config([_make_model("model-masswork", supports_thinking=False)])
|
|
app_config.summarization = SummarizationConfig(enabled=True, model_name="model-masswork")
|
|
app_config.memory = MemoryConfig(enabled=False)
|
|
|
|
from unittest.mock import MagicMock
|
|
|
|
captured: dict[str, object] = {}
|
|
fake_model = MagicMock()
|
|
fake_model.with_config.return_value = fake_model
|
|
|
|
def _fake_create_chat_model(*, name=None, thinking_enabled, reasoning_effort=None, app_config=None):
|
|
captured["name"] = name
|
|
captured["thinking_enabled"] = thinking_enabled
|
|
captured["reasoning_effort"] = reasoning_effort
|
|
captured["app_config"] = app_config
|
|
return fake_model
|
|
|
|
def _raise_get_app_config():
|
|
raise AssertionError("ambient get_app_config() must not be used when app_config is explicit")
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", _raise_get_app_config)
|
|
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
|
monkeypatch.setattr(lead_agent_module, "DeerFlowSummarizationMiddleware", lambda **kwargs: kwargs)
|
|
|
|
middleware = lead_agent_module._create_summarization_middleware(app_config=app_config)
|
|
|
|
assert captured["name"] == "model-masswork"
|
|
assert captured["thinking_enabled"] is False
|
|
assert captured["app_config"] is app_config
|
|
assert middleware["model"] is fake_model
|
|
fake_model.with_config.assert_called_once_with(tags=["middleware:summarize"])
|
|
|
|
|
|
def test_create_summarization_middleware_threads_resolved_app_config_to_model(monkeypatch):
|
|
fallback_app_config = _make_app_config([_make_model("fallback-model", supports_thinking=False)])
|
|
fallback_app_config.summarization = SummarizationConfig(enabled=True, model_name="fallback-model")
|
|
fallback_app_config.memory = MemoryConfig(enabled=False)
|
|
|
|
from unittest.mock import MagicMock
|
|
|
|
captured: dict[str, object] = {}
|
|
fake_model = MagicMock()
|
|
fake_model.with_config.return_value = fake_model
|
|
|
|
def _fake_create_chat_model(*, name=None, thinking_enabled, reasoning_effort=None, app_config=None):
|
|
captured["app_config"] = app_config
|
|
return fake_model
|
|
|
|
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: fallback_app_config)
|
|
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
|
monkeypatch.setattr(lead_agent_module, "DeerFlowSummarizationMiddleware", lambda **kwargs: kwargs)
|
|
|
|
lead_agent_module._create_summarization_middleware()
|
|
|
|
assert captured["app_config"] is fallback_app_config
|
|
|
|
|
|
def test_memory_middleware_uses_explicit_memory_config_without_global_read(monkeypatch):
|
|
from deerflow.agents.middlewares import memory_middleware as memory_middleware_module
|
|
from deerflow.agents.middlewares.memory_middleware import MemoryMiddleware
|
|
|
|
def _raise_get_memory_config():
|
|
raise AssertionError("ambient get_memory_config() must not be used when memory_config is explicit")
|
|
|
|
monkeypatch.setattr(memory_middleware_module, "get_memory_config", _raise_get_memory_config)
|
|
|
|
middleware = MemoryMiddleware(memory_config=MemoryConfig(enabled=False))
|
|
|
|
assert middleware.after_agent({"messages": []}, runtime=MagicMock(context={"thread_id": "thread-1"})) is None
|