mirror of
https://github.com/bytedance/deer-flow.git
synced 2026-05-08 17:58:20 +00:00
Squashes 25 PR commits onto current main. AppConfig becomes a pure value object with no ambient lookup. Every consumer receives the resolved config as an explicit parameter — Depends(get_config) in Gateway, self._app_config in DeerFlowClient, runtime.context.app_config in agent runs, AppConfig.from_file() at the LangGraph Server registration boundary. Phase 1 — frozen data + typed context - All config models (AppConfig, MemoryConfig, DatabaseConfig, …) become frozen=True; no sub-module globals. - AppConfig.from_file() is pure (no side-effect singleton loaders). - Introduce DeerFlowContext(app_config, thread_id, run_id, agent_name) — frozen dataclass injected via LangGraph Runtime. - Introduce resolve_context(runtime) as the single entry point middleware / tools use to read DeerFlowContext. Phase 2 — pure explicit parameter passing - Gateway: app.state.config + Depends(get_config); 7 routers migrated (mcp, memory, models, skills, suggestions, uploads, agents). - DeerFlowClient: __init__(config=...) captures config locally. - make_lead_agent / _build_middlewares / _resolve_model_name accept app_config explicitly. - RunContext.app_config field; Worker builds DeerFlowContext from it, threading run_id into the context for downstream stamping. - Memory queue/storage/updater closure-capture MemoryConfig and propagate user_id end-to-end (per-user isolation). - Sandbox/skills/community/factories/tools thread app_config. - resolve_context() rejects non-typed runtime.context. - Test suite migrated off AppConfig.current() monkey-patches. - AppConfig.current() classmethod deleted. Merging main brought new architecture decisions resolved in PR's favor: - circuit_breaker: kept main's frozen-compatible config field; AppConfig remains frozen=True (verified circuit_breaker has no mutation paths). - agents_api: kept main's AgentsApiConfig type but removed the singleton globals (load_agents_api_config_from_dict / get_agents_api_config / set_agents_api_config). 8 routes in agents.py now read via Depends(get_config). - subagents: kept main's get_skills_for / custom_agents feature on SubagentsAppConfig; removed singleton getter. registry.py now reads app_config.subagents directly. - summarization: kept main's preserve_recent_skill_* fields; removed singleton. - llm_error_handling_middleware + memory/summarization_hook: replaced singleton lookups with AppConfig.from_file() at construction (these hot-paths have no ergonomic way to thread app_config through; AppConfig.from_file is a pure load). - worker.py + thread_data_middleware.py: DeerFlowContext.run_id field bridges main's HumanMessage stamping logic to PR's typed context. Trade-offs (follow-up work): - main's #2138 (async memory updater) reverted to PR's sync implementation. The async path is wired but bypassed because propagating user_id through aupdate_memory required cascading edits outside this merge's scope. - tests/test_subagent_skills_config.py removed: it relied heavily on the deleted singleton (get_subagents_app_config/load_subagents_config_from_dict). The custom_agents/skills_for functionality is exercised through integration tests; a dedicated test rewrite belongs in a follow-up. Verification: backend test suite — 2560 passed, 4 skipped, 84 failures. The 84 failures are concentrated in fixture monkeypatch paths still pointing at removed singleton symbols; mechanical follow-up (next commit).
203 lines
6.6 KiB
Python
203 lines
6.6 KiB
Python
"""Shared upload management logic.
|
|
|
|
Pure business logic — no FastAPI/HTTP dependencies.
|
|
Both Gateway and Client delegate to these functions.
|
|
"""
|
|
|
|
import os
|
|
import re
|
|
from pathlib import Path
|
|
from urllib.parse import quote
|
|
|
|
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
|
from deerflow.runtime.user_context import get_effective_user_id
|
|
|
|
|
|
class PathTraversalError(ValueError):
|
|
"""Raised when a path escapes its allowed base directory."""
|
|
|
|
|
|
# thread_id must be alphanumeric, hyphens, underscores, or dots only.
|
|
_SAFE_THREAD_ID = re.compile(r"^[a-zA-Z0-9._-]+$")
|
|
|
|
|
|
def validate_thread_id(thread_id: str) -> None:
|
|
"""Reject thread IDs containing characters unsafe for filesystem paths.
|
|
|
|
Raises:
|
|
ValueError: If thread_id is empty or contains unsafe characters.
|
|
"""
|
|
if not thread_id or not _SAFE_THREAD_ID.match(thread_id):
|
|
raise ValueError(f"Invalid thread_id: {thread_id!r}")
|
|
|
|
|
|
def get_uploads_dir(thread_id: str) -> Path:
|
|
"""Return the uploads directory path for a thread (no side effects)."""
|
|
validate_thread_id(thread_id)
|
|
return get_paths().sandbox_uploads_dir(thread_id, user_id=get_effective_user_id())
|
|
|
|
|
|
def ensure_uploads_dir(thread_id: str) -> Path:
|
|
"""Return the uploads directory for a thread, creating it if needed."""
|
|
base = get_uploads_dir(thread_id)
|
|
base.mkdir(parents=True, exist_ok=True)
|
|
return base
|
|
|
|
|
|
def normalize_filename(filename: str) -> str:
|
|
"""Sanitize a filename by extracting its basename.
|
|
|
|
Strips any directory components and rejects traversal patterns.
|
|
|
|
Args:
|
|
filename: Raw filename from user input (may contain path components).
|
|
|
|
Returns:
|
|
Safe filename (basename only).
|
|
|
|
Raises:
|
|
ValueError: If filename is empty or resolves to a traversal pattern.
|
|
"""
|
|
if not filename:
|
|
raise ValueError("Filename is empty")
|
|
safe = Path(filename).name
|
|
if not safe or safe in {".", ".."}:
|
|
raise ValueError(f"Filename is unsafe: {filename!r}")
|
|
# Reject backslashes — on Linux Path.name keeps them as literal chars,
|
|
# but they indicate a Windows-style path that should be stripped or rejected.
|
|
if "\\" in safe:
|
|
raise ValueError(f"Filename contains backslash: {filename!r}")
|
|
if len(safe.encode("utf-8")) > 255:
|
|
raise ValueError(f"Filename too long: {len(safe)} chars")
|
|
return safe
|
|
|
|
|
|
def claim_unique_filename(name: str, seen: set[str]) -> str:
|
|
"""Generate a unique filename by appending ``_N`` suffix on collision.
|
|
|
|
Automatically adds the returned name to *seen* so callers don't need to.
|
|
|
|
Args:
|
|
name: Candidate filename.
|
|
seen: Set of filenames already claimed (mutated in place).
|
|
|
|
Returns:
|
|
A filename not present in *seen* (already added to *seen*).
|
|
"""
|
|
if name not in seen:
|
|
seen.add(name)
|
|
return name
|
|
stem, suffix = Path(name).stem, Path(name).suffix
|
|
counter = 1
|
|
candidate = f"{stem}_{counter}{suffix}"
|
|
while candidate in seen:
|
|
counter += 1
|
|
candidate = f"{stem}_{counter}{suffix}"
|
|
seen.add(candidate)
|
|
return candidate
|
|
|
|
|
|
def validate_path_traversal(path: Path, base: Path) -> None:
|
|
"""Verify that *path* is inside *base*.
|
|
|
|
Raises:
|
|
PathTraversalError: If a path traversal is detected.
|
|
"""
|
|
try:
|
|
path.resolve().relative_to(base.resolve())
|
|
except ValueError:
|
|
raise PathTraversalError("Path traversal detected") from None
|
|
|
|
|
|
def list_files_in_dir(directory: Path) -> dict:
|
|
"""List files (not directories) in *directory*.
|
|
|
|
Args:
|
|
directory: Directory to scan.
|
|
|
|
Returns:
|
|
Dict with "files" list (sorted by name) and "count".
|
|
Each file entry has ``size`` as *int* (bytes). Call
|
|
:func:`enrich_file_listing` to stringify sizes and add
|
|
virtual / artifact URLs.
|
|
"""
|
|
if not directory.is_dir():
|
|
return {"files": [], "count": 0}
|
|
|
|
files = []
|
|
with os.scandir(directory) as entries:
|
|
for entry in sorted(entries, key=lambda e: e.name):
|
|
if not entry.is_file(follow_symlinks=False):
|
|
continue
|
|
st = entry.stat(follow_symlinks=False)
|
|
files.append(
|
|
{
|
|
"filename": entry.name,
|
|
"size": st.st_size,
|
|
"path": entry.path,
|
|
"extension": Path(entry.name).suffix,
|
|
"modified": st.st_mtime,
|
|
}
|
|
)
|
|
return {"files": files, "count": len(files)}
|
|
|
|
|
|
def delete_file_safe(base_dir: Path, filename: str, *, convertible_extensions: set[str] | None = None) -> dict:
|
|
"""Delete a file inside *base_dir* after path-traversal validation.
|
|
|
|
If *convertible_extensions* is provided and the file's extension matches,
|
|
the companion ``.md`` file is also removed (if it exists).
|
|
|
|
Args:
|
|
base_dir: Directory containing the file.
|
|
filename: Name of file to delete.
|
|
convertible_extensions: Lowercase extensions (e.g. ``{".pdf", ".docx"}``)
|
|
whose companion markdown should be cleaned up.
|
|
|
|
Returns:
|
|
Dict with success and message.
|
|
|
|
Raises:
|
|
FileNotFoundError: If the file does not exist.
|
|
PathTraversalError: If path traversal is detected.
|
|
"""
|
|
file_path = (base_dir / filename).resolve()
|
|
validate_path_traversal(file_path, base_dir)
|
|
|
|
if not file_path.is_file():
|
|
raise FileNotFoundError(f"File not found: {filename}")
|
|
|
|
file_path.unlink()
|
|
|
|
# Clean up companion markdown generated during upload conversion.
|
|
if convertible_extensions and file_path.suffix.lower() in convertible_extensions:
|
|
file_path.with_suffix(".md").unlink(missing_ok=True)
|
|
|
|
return {"success": True, "message": f"Deleted {filename}"}
|
|
|
|
|
|
def upload_artifact_url(thread_id: str, filename: str) -> str:
|
|
"""Build the artifact URL for a file in a thread's uploads directory.
|
|
|
|
*filename* is percent-encoded so that spaces, ``#``, ``?`` etc. are safe.
|
|
"""
|
|
return f"/api/threads/{thread_id}/artifacts{VIRTUAL_PATH_PREFIX}/uploads/{quote(filename, safe='')}"
|
|
|
|
|
|
def upload_virtual_path(filename: str) -> str:
|
|
"""Build the virtual path for a file in the uploads directory."""
|
|
return f"{VIRTUAL_PATH_PREFIX}/uploads/{filename}"
|
|
|
|
|
|
def enrich_file_listing(result: dict, thread_id: str) -> dict:
|
|
"""Add virtual paths, artifact URLs, and stringify sizes on a listing result.
|
|
|
|
Mutates *result* in place and returns it for convenience.
|
|
"""
|
|
for f in result["files"]:
|
|
filename = f["filename"]
|
|
f["size"] = str(f["size"])
|
|
f["virtual_path"] = upload_virtual_path(filename)
|
|
f["artifact_url"] = upload_artifact_url(thread_id, filename)
|
|
return result
|