feat(trace):Add run_name to the trace info for system agents. (#2492)

* feat(trace): Add `run_name` to the trace info for suggestions and memory.

before(in langsmith):
CodexChatModel
CodexChatModel
lead_agent
after:
suggest_agent
memory_agent
lead_agent

feat(trace): Add `run_name` to the trace info for suggestions and memory.

before(in langsmith):
CodexChatModel
CodexChatModel
lead_agent
after:
suggest_agent
memory_agent
lead_agent

* feat(trace): Add `run_name` to the trace info for system agents.

before(in langsmith):
CodexChatModel
CodexChatModel
CodexChatModel
CodexChatModel
lead_agent
after:
suggest_agent
title_agent
security_agent
memory_agent
lead_agent

* chore(code format):code format

---------

Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
This commit is contained in:
Airene Fang 2026-04-24 17:06:55 +08:00 committed by GitHub
parent e8572b9d0c
commit 11f557a2c6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 34 additions and 4 deletions

View File

@ -121,7 +121,7 @@ async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> S
try: try:
model = create_chat_model(name=request.model_name, thinking_enabled=False) model = create_chat_model(name=request.model_name, thinking_enabled=False)
response = await model.ainvoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)]) response = await model.ainvoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)], config={"run_name": "suggest_agent"})
raw = _extract_response_text(response.content) raw = _extract_response_text(response.content)
suggestions = _parse_json_string_list(raw) or [] suggestions = _parse_json_string_list(raw) or []
cleaned = [s.replace("\n", " ").strip() for s in suggestions if s.strip()] cleaned = [s.replace("\n", " ").strip() for s in suggestions if s.strip()]

View File

@ -409,7 +409,7 @@ class MemoryUpdater:
current_memory, prompt = prepared current_memory, prompt = prepared
model = self._get_model() model = self._get_model()
response = await model.ainvoke(prompt) response = await model.ainvoke(prompt, config={"run_name": "memory_agent"})
return await asyncio.to_thread( return await asyncio.to_thread(
self._finalize_update, self._finalize_update,
current_memory=current_memory, current_memory=current_memory,

View File

@ -127,7 +127,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
model = create_chat_model(name=config.model_name, thinking_enabled=False) model = create_chat_model(name=config.model_name, thinking_enabled=False)
else: else:
model = create_chat_model(thinking_enabled=False) model = create_chat_model(thinking_enabled=False)
response = await model.ainvoke(prompt) response = await model.ainvoke(prompt, config={"run_name": "title_agent"})
title = self._parse_title(response.content) title = self._parse_title(response.content)
if title: if title:
return {"title": title} return {"title": title}

View File

@ -54,7 +54,8 @@ async def scan_skill_content(content: str, *, executable: bool = False, location
[ [
{"role": "system", "content": rubric}, {"role": "system", "content": rubric},
{"role": "user", "content": prompt}, {"role": "user", "content": prompt},
] ],
config={"run_name": "security_agent"},
) )
parsed = _extract_json_object(str(getattr(response, "content", "") or "")) parsed = _extract_json_object(str(getattr(response, "content", "") or ""))
if parsed and parsed.get("decision") in {"allow", "warn", "block"}: if parsed and parsed.get("decision") in {"allow", "warn", "block"}:

View File

@ -598,6 +598,7 @@ class TestUpdateMemoryStructuredResponse:
assert result is True assert result is True
model.ainvoke.assert_awaited_once() model.ainvoke.assert_awaited_once()
assert model.ainvoke.await_args.kwargs["config"] == {"run_name": "memory_agent"}
def test_correction_hint_injected_when_detected(self): def test_correction_hint_injected_when_detected(self):
updater = MemoryUpdater() updater = MemoryUpdater()

View File

@ -5,6 +5,27 @@ import pytest
from deerflow.skills.security_scanner import scan_skill_content from deerflow.skills.security_scanner import scan_skill_content
@pytest.mark.anyio
async def test_scan_skill_content_passes_run_name_to_model(monkeypatch):
config = SimpleNamespace(skill_evolution=SimpleNamespace(moderation_model_name=None))
fake_response = SimpleNamespace(content='{"decision":"allow","reason":"ok"}')
class FakeModel:
async def ainvoke(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return fake_response
model = FakeModel()
monkeypatch.setattr("deerflow.skills.security_scanner.get_app_config", lambda: config)
monkeypatch.setattr("deerflow.skills.security_scanner.create_chat_model", lambda **kwargs: model)
result = await scan_skill_content("---\nname: demo-skill\ndescription: demo\n---\n", executable=False)
assert result.decision == "allow"
assert model.kwargs["config"] == {"run_name": "security_agent"}
@pytest.mark.anyio @pytest.mark.anyio
async def test_scan_skill_content_blocks_when_model_unavailable(monkeypatch): async def test_scan_skill_content_blocks_when_model_unavailable(monkeypatch):
config = SimpleNamespace(skill_evolution=SimpleNamespace(moderation_model_name=None)) config = SimpleNamespace(skill_evolution=SimpleNamespace(moderation_model_name=None))

View File

@ -49,6 +49,8 @@ def test_generate_suggestions_parses_and_limits(monkeypatch):
result = asyncio.run(suggestions.generate_suggestions("t1", req)) result = asyncio.run(suggestions.generate_suggestions("t1", req))
assert result.suggestions == ["Q1", "Q2", "Q3"] assert result.suggestions == ["Q1", "Q2", "Q3"]
fake_model.ainvoke.assert_awaited_once()
assert fake_model.ainvoke.await_args.kwargs["config"] == {"run_name": "suggest_agent"}
def test_generate_suggestions_parses_list_block_content(monkeypatch): def test_generate_suggestions_parses_list_block_content(monkeypatch):
@ -67,6 +69,8 @@ def test_generate_suggestions_parses_list_block_content(monkeypatch):
result = asyncio.run(suggestions.generate_suggestions("t1", req)) result = asyncio.run(suggestions.generate_suggestions("t1", req))
assert result.suggestions == ["Q1", "Q2"] assert result.suggestions == ["Q1", "Q2"]
fake_model.ainvoke.assert_awaited_once()
assert fake_model.ainvoke.await_args.kwargs["config"] == {"run_name": "suggest_agent"}
def test_generate_suggestions_parses_output_text_block_content(monkeypatch): def test_generate_suggestions_parses_output_text_block_content(monkeypatch):
@ -85,6 +89,8 @@ def test_generate_suggestions_parses_output_text_block_content(monkeypatch):
result = asyncio.run(suggestions.generate_suggestions("t1", req)) result = asyncio.run(suggestions.generate_suggestions("t1", req))
assert result.suggestions == ["Q1", "Q2"] assert result.suggestions == ["Q1", "Q2"]
fake_model.ainvoke.assert_awaited_once()
assert fake_model.ainvoke.await_args.kwargs["config"] == {"run_name": "suggest_agent"}
def test_generate_suggestions_returns_empty_on_model_error(monkeypatch): def test_generate_suggestions_returns_empty_on_model_error(monkeypatch):

View File

@ -93,6 +93,7 @@ class TestTitleMiddlewareCoreLogic:
assert title == "短标题" assert title == "短标题"
title_middleware_module.create_chat_model.assert_called_once_with(thinking_enabled=False) title_middleware_module.create_chat_model.assert_called_once_with(thinking_enabled=False)
model.ainvoke.assert_awaited_once() model.ainvoke.assert_awaited_once()
assert model.ainvoke.await_args.kwargs["config"] == {"run_name": "title_agent"}
def test_generate_title_normalizes_structured_message_content(self, monkeypatch): def test_generate_title_normalizes_structured_message_content(self, monkeypatch):
_set_test_title_config(max_chars=20) _set_test_title_config(max_chars=20)