mirror of
https://github.com/bytedance/deer-flow.git
synced 2026-04-25 11:18:22 +00:00
* fix(podcast): add fallback for models without json_object support (#747) Models like Kimi K2 don't support response_format.type: json_object. Add try-except to fall back to regular prompting with JSON parsing when BadRequestError mentions json_object not supported. - Add fallback to prompting + repair_json_output parsing - Re-raise other BadRequestError types - Add unit tests for script_writer_node with 100% coverage * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fixes: the unit test error of test_script_writer_node.py --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
59 lines
2.0 KiB
Python
59 lines
2.0 KiB
Python
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
|
# SPDX-License-Identifier: MIT
|
|
|
|
import json
|
|
import logging
|
|
|
|
import openai
|
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
|
|
from src.config.agents import AGENT_LLM_MAP
|
|
from src.llms.llm import get_llm_by_type
|
|
from src.prompts.template import get_prompt_template
|
|
from src.utils.json_utils import repair_json_output
|
|
|
|
from ..types import Script
|
|
from .state import PodcastState
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def script_writer_node(state: PodcastState):
|
|
logger.info("Generating script for podcast...")
|
|
base_model = get_llm_by_type(AGENT_LLM_MAP["podcast_script_writer"])
|
|
|
|
messages = [
|
|
SystemMessage(content=get_prompt_template("podcast/podcast_script_writer")),
|
|
HumanMessage(content=state["input"]),
|
|
]
|
|
|
|
try:
|
|
# Try structured output with json_mode first
|
|
model = base_model.with_structured_output(Script, method="json_mode")
|
|
script = model.invoke(messages)
|
|
except openai.BadRequestError as e:
|
|
# Fall back for models that don't support json_object (e.g., Kimi K2)
|
|
if "json_object" in str(e).lower():
|
|
logger.warning(
|
|
f"Model doesn't support json_mode, falling back to prompting: {e}"
|
|
)
|
|
response = base_model.invoke(messages)
|
|
content = response.content if hasattr(response, "content") else str(response)
|
|
try:
|
|
repaired = repair_json_output(content)
|
|
script_dict = json.loads(repaired)
|
|
except json.JSONDecodeError as json_err:
|
|
logger.error(
|
|
"Failed to parse JSON from podcast script writer fallback "
|
|
"response: %s; content: %r",
|
|
json_err,
|
|
content,
|
|
)
|
|
raise
|
|
script = Script.model_validate(script_dict)
|
|
else:
|
|
raise
|
|
|
|
logger.debug("Generated podcast script: %s", script)
|
|
return {"script": script, "audio_chunks": []}
|