mirror of
https://github.com/bytedance/deer-flow.git
synced 2026-05-01 14:28:28 +00:00
Merge pull request #2566 from bytedance/release/2.0-rc
introduces a complete authentication/authorization system, SQL persistence layer, run event history, user data isolation, and extensive documentation in both English and Chinese. The core additions are: Auth system: JWT-based auth with local email/password provider, CSRF protection, rate limiting Persistence layer: SQLAlchemy 2.0 async ORM with SQLite backend (users, threads, runs, events, feedback) User isolation: Per-user data scoping via contextvars sentinel pattern Frontend: Login/setup pages, AuthProvider, CSRF-aware fetcher Documentation: Comprehensive EN/ZH docs for harness, application, tutorials
This commit is contained in:
commit
4a9f1d547b
@ -34,5 +34,9 @@ INFOQUEST_API_KEY=your-infoquest-api-key
|
||||
|
||||
# GitHub API Token
|
||||
# GITHUB_TOKEN=your-github-token
|
||||
|
||||
# Database (only needed when config.yaml has database.backend: postgres)
|
||||
# DATABASE_URL=postgresql://deerflow:password@localhost:5432/deerflow
|
||||
#
|
||||
# WECOM_BOT_ID=your-wecom-bot-id
|
||||
# WECOM_BOT_SECRET=your-wecom-bot-secret
|
||||
|
||||
36
Makefile
36
Makefile
@ -1,6 +1,6 @@
|
||||
# DeerFlow - Unified Development Environment
|
||||
|
||||
.PHONY: help config config-upgrade check install setup doctor dev dev-pro dev-daemon dev-daemon-pro start start-pro start-daemon start-daemon-pro stop up up-pro down clean docker-init docker-start docker-start-pro docker-stop docker-logs docker-logs-frontend docker-logs-gateway
|
||||
.PHONY: help config config-upgrade check install setup doctor dev dev-daemon start start-daemon stop up down clean docker-init docker-start docker-stop docker-logs docker-logs-frontend docker-logs-gateway
|
||||
|
||||
BASH ?= bash
|
||||
BACKEND_UV_RUN = cd backend && uv run
|
||||
@ -26,25 +26,19 @@ help:
|
||||
@echo " make install - Install all dependencies (frontend + backend + pre-commit hooks)"
|
||||
@echo " make setup-sandbox - Pre-pull sandbox container image (recommended)"
|
||||
@echo " make dev - Start all services in development mode (with hot-reloading)"
|
||||
@echo " make dev-pro - Start in dev + Gateway mode (experimental, no LangGraph server)"
|
||||
@echo " make dev-daemon - Start dev services in background (daemon mode)"
|
||||
@echo " make dev-daemon-pro - Start dev daemon + Gateway mode (experimental)"
|
||||
@echo " make start - Start all services in production mode (optimized, no hot-reloading)"
|
||||
@echo " make start-pro - Start in prod + Gateway mode (experimental)"
|
||||
@echo " make start-daemon - Start prod services in background (daemon mode)"
|
||||
@echo " make start-daemon-pro - Start prod daemon + Gateway mode (experimental)"
|
||||
@echo " make stop - Stop all running services"
|
||||
@echo " make clean - Clean up processes and temporary files"
|
||||
@echo ""
|
||||
@echo "Docker Production Commands:"
|
||||
@echo " make up - Build and start production Docker services (localhost:2026)"
|
||||
@echo " make up-pro - Build and start production Docker in Gateway mode (experimental)"
|
||||
@echo " make down - Stop and remove production Docker containers"
|
||||
@echo ""
|
||||
@echo "Docker Development Commands:"
|
||||
@echo " make docker-init - Pull the sandbox image"
|
||||
@echo " make docker-start - Start Docker services (mode-aware from config.yaml, localhost:2026)"
|
||||
@echo " make docker-start-pro - Start Docker in Gateway mode (experimental, no LangGraph container)"
|
||||
@echo " make docker-stop - Stop Docker development services"
|
||||
@echo " make docker-logs - View Docker development logs"
|
||||
@echo " make docker-logs-frontend - View Docker frontend logs"
|
||||
@ -123,41 +117,21 @@ dev:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev
|
||||
|
||||
# Start all services in dev + Gateway mode (experimental: agent runtime embedded in Gateway)
|
||||
dev-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --gateway
|
||||
|
||||
# Start all services in production mode (with optimizations)
|
||||
start:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod
|
||||
|
||||
# Start all services in prod + Gateway mode (experimental)
|
||||
start-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --gateway
|
||||
|
||||
# Start all services in daemon mode (background)
|
||||
dev-daemon:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --daemon
|
||||
|
||||
# Start daemon + Gateway mode (experimental)
|
||||
dev-daemon-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --gateway --daemon
|
||||
|
||||
# Start prod services in daemon mode (background)
|
||||
start-daemon:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --daemon
|
||||
|
||||
# Start prod daemon + Gateway mode (experimental)
|
||||
start-daemon-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --gateway --daemon
|
||||
|
||||
# Stop all services
|
||||
stop:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --stop
|
||||
@ -182,10 +156,6 @@ docker-init:
|
||||
docker-start:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh start
|
||||
|
||||
# Start Docker in Gateway mode (experimental)
|
||||
docker-start-pro:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh start --gateway
|
||||
|
||||
# Stop Docker development environment
|
||||
docker-stop:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh stop
|
||||
@ -208,10 +178,6 @@ docker-logs-gateway:
|
||||
up:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh
|
||||
|
||||
# Build and start production services in Gateway mode
|
||||
up-pro:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh --gateway
|
||||
|
||||
# Stop and remove production containers
|
||||
down:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh down
|
||||
|
||||
44
README.md
44
README.md
@ -243,9 +243,6 @@ make up # Build images and start all production services
|
||||
make down # Stop and remove containers
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> The LangGraph agent server currently runs via `langgraph dev` (the open-source CLI server).
|
||||
|
||||
Access: http://localhost:2026
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed Docker development guide.
|
||||
@ -289,53 +286,31 @@ On Windows, run the local development flow from Git Bash. Native `cmd.exe` and P
|
||||
|
||||
#### Startup Modes
|
||||
|
||||
DeerFlow supports multiple startup modes across two dimensions:
|
||||
|
||||
- **Dev / Prod** — dev enables hot-reload; prod uses pre-built frontend
|
||||
- **Standard / Gateway** — standard uses a separate LangGraph server (4 processes); Gateway mode (experimental) embeds the agent runtime in the Gateway API (3 processes)
|
||||
DeerFlow runs the agent runtime inside the Gateway API. Development mode enables hot-reload; production mode uses a pre-built frontend.
|
||||
|
||||
| | **Local Foreground** | **Local Daemon** | **Docker Dev** | **Docker Prod** |
|
||||
|---|---|---|---|---|
|
||||
| **Dev** | `./scripts/serve.sh --dev`<br/>`make dev` | `./scripts/serve.sh --dev --daemon`<br/>`make dev-daemon` | `./scripts/docker.sh start`<br/>`make docker-start` | — |
|
||||
| **Dev + Gateway** | `./scripts/serve.sh --dev --gateway`<br/>`make dev-pro` | `./scripts/serve.sh --dev --gateway --daemon`<br/>`make dev-daemon-pro` | `./scripts/docker.sh start --gateway`<br/>`make docker-start-pro` | — |
|
||||
| **Prod** | `./scripts/serve.sh --prod`<br/>`make start` | `./scripts/serve.sh --prod --daemon`<br/>`make start-daemon` | — | `./scripts/deploy.sh`<br/>`make up` |
|
||||
| **Prod + Gateway** | `./scripts/serve.sh --prod --gateway`<br/>`make start-pro` | `./scripts/serve.sh --prod --gateway --daemon`<br/>`make start-daemon-pro` | — | `./scripts/deploy.sh --gateway`<br/>`make up-pro` |
|
||||
|
||||
| Action | Local | Docker Dev | Docker Prod |
|
||||
|---|---|---|---|
|
||||
| **Stop** | `./scripts/serve.sh --stop`<br/>`make stop` | `./scripts/docker.sh stop`<br/>`make docker-stop` | `./scripts/deploy.sh down`<br/>`make down` |
|
||||
| **Restart** | `./scripts/serve.sh --restart [flags]` | `./scripts/docker.sh restart` | — |
|
||||
|
||||
> **Gateway mode** eliminates the LangGraph server process — the Gateway API handles agent execution directly via async tasks, managing its own concurrency.
|
||||
|
||||
#### Why Gateway Mode?
|
||||
|
||||
In standard mode, DeerFlow runs a dedicated [LangGraph Platform](https://langchain-ai.github.io/langgraph/) server alongside the Gateway API. This architecture works well but has trade-offs:
|
||||
|
||||
| | Standard Mode | Gateway Mode |
|
||||
|---|---|---|
|
||||
| **Architecture** | Gateway (REST API) + LangGraph (agent runtime) | Gateway embeds agent runtime |
|
||||
| **Concurrency** | `--n-jobs-per-worker` per worker (requires license) | `--workers` × async tasks (no per-worker cap) |
|
||||
| **Containers / Processes** | 4 (frontend, gateway, langgraph, nginx) | 3 (frontend, gateway, nginx) |
|
||||
| **Resource usage** | Higher (two Python runtimes) | Lower (single Python runtime) |
|
||||
| **LangGraph Platform license** | Required for production images | Not required |
|
||||
| **Cold start** | Slower (two services to initialize) | Faster |
|
||||
|
||||
Both modes are functionally equivalent — the same agents, tools, and skills work in either mode.
|
||||
Gateway owns `/api/langgraph/*` and translates those public LangGraph-compatible paths to its native `/api/*` routers behind nginx.
|
||||
|
||||
#### Docker Production Deployment
|
||||
|
||||
`deploy.sh` supports building and starting separately. Images are mode-agnostic — runtime mode is selected at start time:
|
||||
`deploy.sh` supports building and starting separately:
|
||||
|
||||
```bash
|
||||
# One-step (build + start)
|
||||
deploy.sh # standard mode (default)
|
||||
deploy.sh --gateway # gateway mode
|
||||
deploy.sh
|
||||
|
||||
# Two-step (build once, start with any mode)
|
||||
# Two-step (build once, start later)
|
||||
deploy.sh build # build all images
|
||||
deploy.sh start # start in standard mode
|
||||
deploy.sh start --gateway # start in gateway mode
|
||||
deploy.sh start # start pre-built images
|
||||
|
||||
# Stop
|
||||
deploy.sh down
|
||||
@ -375,8 +350,8 @@ DeerFlow supports receiving tasks from messaging apps. Channels auto-start when
|
||||
|
||||
```yaml
|
||||
channels:
|
||||
# LangGraph Server URL (default: http://localhost:2024)
|
||||
langgraph_url: http://localhost:2024
|
||||
# LangGraph-compatible Gateway API base URL (default: http://localhost:8001/api)
|
||||
langgraph_url: http://localhost:8001/api
|
||||
# Gateway API URL (default: http://localhost:8001)
|
||||
gateway_url: http://localhost:8001
|
||||
|
||||
@ -444,6 +419,7 @@ channels:
|
||||
Notes:
|
||||
- `assistant_id: lead_agent` calls the default LangGraph assistant directly.
|
||||
- If `assistant_id` is set to a custom agent name, DeerFlow still routes through `lead_agent` and injects that value as `agent_name`, so the custom agent's SOUL/config takes effect for IM channels.
|
||||
- IM channel workers call Gateway's LangGraph-compatible API internally and automatically attach process-local internal auth plus the CSRF cookie/header pair required for thread and run creation.
|
||||
|
||||
Set the corresponding API keys in your `.env` file:
|
||||
|
||||
@ -504,7 +480,7 @@ WECOM_BOT_SECRET=your_bot_secret
|
||||
4. Make sure backend dependencies include `wecom-aibot-python-sdk`. The channel uses a WebSocket long connection and does not require a public callback URL.
|
||||
5. The current integration supports inbound text, image, and file messages. Final images/files generated by the agent are also sent back to the WeCom conversation.
|
||||
|
||||
When DeerFlow runs in Docker Compose, IM channels execute inside the `gateway` container. In that case, do not point `channels.langgraph_url` or `channels.gateway_url` at `localhost`; use container service names such as `http://langgraph:2024` and `http://gateway:8001`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` and `DEER_FLOW_CHANNELS_GATEWAY_URL`.
|
||||
When DeerFlow runs in Docker Compose, IM channels execute inside the `gateway` container. In that case, do not point `channels.langgraph_url` or `channels.gateway_url` at `localhost`; use container service names such as `http://gateway:8001/api` and `http://gateway:8001`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` and `DEER_FLOW_CHANNELS_GATEWAY_URL`.
|
||||
|
||||
**Commands**
|
||||
|
||||
|
||||
@ -7,15 +7,13 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
DeerFlow is a LangGraph-based AI super agent system with a full-stack architecture. The backend provides a "super agent" with sandbox execution, persistent memory, subagent delegation, and extensible tool integration - all operating in per-thread isolated environments.
|
||||
|
||||
**Architecture**:
|
||||
- **LangGraph Server** (port 2024): Agent runtime and workflow execution
|
||||
- **Gateway API** (port 8001): REST API for models, MCP, skills, memory, artifacts, uploads, and local thread cleanup
|
||||
- **Gateway API** (port 8001): REST API plus embedded LangGraph-compatible agent runtime
|
||||
- **Frontend** (port 3000): Next.js web interface
|
||||
- **Nginx** (port 2026): Unified reverse proxy entry point
|
||||
- **Provisioner** (port 8002, optional in Docker dev): Started only when sandbox is configured for provisioner/Kubernetes mode
|
||||
|
||||
**Runtime Modes**:
|
||||
- **Standard mode** (`make dev`): LangGraph Server handles agent execution as a separate process. 4 processes total.
|
||||
- **Gateway mode** (`make dev-pro`, experimental): Agent runtime embedded in Gateway via `RunManager` + `run_agent()` + `StreamBridge` (`packages/harness/deerflow/runtime/`). Service manages its own concurrency via async tasks. 3 processes total, no LangGraph Server.
|
||||
**Runtime**:
|
||||
- `make dev`, Docker dev, and production all run the agent runtime in Gateway via `RunManager` + `run_agent()` + `StreamBridge` (`packages/harness/deerflow/runtime/`). Nginx exposes that runtime at `/api/langgraph/*` and rewrites it to Gateway's native `/api/*` routers.
|
||||
|
||||
**Project Structure**:
|
||||
```
|
||||
@ -25,7 +23,7 @@ deer-flow/
|
||||
├── extensions_config.json # MCP servers and skills configuration
|
||||
├── backend/ # Backend application (this directory)
|
||||
│ ├── Makefile # Backend-only commands (dev, gateway, lint)
|
||||
│ ├── langgraph.json # LangGraph server configuration
|
||||
│ ├── langgraph.json # LangGraph Studio graph configuration
|
||||
│ ├── packages/
|
||||
│ │ └── harness/ # deerflow-harness package (import: deerflow.*)
|
||||
│ │ ├── pyproject.toml
|
||||
@ -83,16 +81,15 @@ When making code changes, you MUST update the relevant documentation:
|
||||
```bash
|
||||
make check # Check system requirements
|
||||
make install # Install all dependencies (frontend + backend)
|
||||
make dev # Start all services (LangGraph + Gateway + Frontend + Nginx), with config.yaml preflight
|
||||
make dev-pro # Gateway mode (experimental): skip LangGraph, agent runtime embedded in Gateway
|
||||
make start-pro # Production + Gateway mode (experimental)
|
||||
make dev # Start all services (Gateway + Frontend + Nginx), with config.yaml preflight
|
||||
make start # Start production services locally
|
||||
make stop # Stop all services
|
||||
```
|
||||
|
||||
**Backend directory** (for backend development only):
|
||||
```bash
|
||||
make install # Install backend dependencies
|
||||
make dev # Run LangGraph server only (port 2024)
|
||||
make dev # Run Gateway API with reload (port 8001)
|
||||
make gateway # Run Gateway API only (port 8001)
|
||||
make test # Run all backend tests
|
||||
make lint # Lint with ruff
|
||||
@ -158,7 +155,7 @@ from deerflow.config import get_app_config
|
||||
|
||||
Lead-agent middlewares are assembled in strict append order across `packages/harness/deerflow/agents/middlewares/tool_error_handling_middleware.py` (`build_lead_runtime_middlewares`) and `packages/harness/deerflow/agents/lead_agent/agent.py` (`_build_middlewares`):
|
||||
|
||||
1. **ThreadDataMiddleware** - Creates per-thread directories (`backend/.deer-flow/threads/{thread_id}/user-data/{workspace,uploads,outputs}`); Web UI thread deletion now follows LangGraph thread removal with Gateway cleanup of the local `.deer-flow/threads/{thread_id}` directory
|
||||
1. **ThreadDataMiddleware** - Creates per-thread directories under the user's isolation scope (`backend/.deer-flow/users/{user_id}/threads/{thread_id}/user-data/{workspace,uploads,outputs}`); resolves `user_id` via `get_effective_user_id()` (falls back to `"default"` in no-auth mode); Web UI thread deletion now follows LangGraph thread removal with Gateway cleanup of the local thread directory
|
||||
2. **UploadsMiddleware** - Tracks and injects newly uploaded files into conversation
|
||||
3. **SandboxMiddleware** - Acquires sandbox, stores `sandbox_id` in state
|
||||
4. **DanglingToolCallMiddleware** - Injects placeholder ToolMessages for AIMessage tool_calls that lack responses (e.g., due to user interruption), including raw provider tool-call payloads preserved only in `additional_kwargs["tool_calls"]`
|
||||
@ -222,6 +219,9 @@ FastAPI application on port 8001 with health check at `GET /health`.
|
||||
| **Threads** (`/api/threads/{id}`) | `DELETE /` - remove DeerFlow-managed local thread data after LangGraph thread deletion; unexpected failures are logged server-side and return a generic 500 detail |
|
||||
| **Artifacts** (`/api/threads/{id}/artifacts`) | `GET /{path}` - serve artifacts; active content types (`text/html`, `application/xhtml+xml`, `image/svg+xml`) are always forced as download attachments to reduce XSS risk; `?download=true` still forces download for other file types |
|
||||
| **Suggestions** (`/api/threads/{id}/suggestions`) | `POST /` - generate follow-up questions; rich list/block model content is normalized before JSON parsing |
|
||||
| **Thread Runs** (`/api/threads/{id}/runs`) | `POST /` - create background run; `POST /stream` - create + SSE stream; `POST /wait` - create + block; `GET /` - list runs; `GET /{rid}` - run details; `POST /{rid}/cancel` - cancel; `GET /{rid}/join` - join SSE; `GET /{rid}/messages` - paginated messages `{data, has_more}`; `GET /{rid}/events` - full event stream; `GET /../messages` - thread messages with feedback; `GET /../token-usage` - aggregate tokens |
|
||||
| **Feedback** (`/api/threads/{id}/runs/{rid}/feedback`) | `PUT /` - upsert feedback; `DELETE /` - delete user feedback; `POST /` - create feedback; `GET /` - list feedback; `GET /stats` - aggregate stats; `DELETE /{fid}` - delete specific |
|
||||
| **Runs** (`/api/runs`) | `POST /stream` - stateless run + SSE; `POST /wait` - stateless run + block; `GET /{rid}/messages` - paginated messages by run_id `{data, has_more}` (cursor: `after_seq`/`before_seq`); `GET /{rid}/feedback` - list feedback by run_id |
|
||||
|
||||
Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` → Gateway.
|
||||
|
||||
@ -235,7 +235,7 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
|
||||
**Virtual Path System**:
|
||||
- Agent sees: `/mnt/user-data/{workspace,uploads,outputs}`, `/mnt/skills`
|
||||
- Physical: `backend/.deer-flow/threads/{thread_id}/user-data/...`, `deer-flow/skills/`
|
||||
- Physical: `backend/.deer-flow/users/{user_id}/threads/{thread_id}/user-data/...`, `deer-flow/skills/`
|
||||
- Translation: `replace_virtual_path()` / `replace_virtual_paths_in_command()`
|
||||
- Detection: `is_local_sandbox()` checks `sandbox_id == "local"`
|
||||
|
||||
@ -275,7 +275,7 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
- `invoke_acp_agent` - Invokes external ACP-compatible agents from `config.yaml`
|
||||
- ACP launchers must be real ACP adapters. The standard `codex` CLI is not ACP-compatible by itself; configure a wrapper such as `npx -y @zed-industries/codex-acp` or an installed `codex-acp` binary
|
||||
- Missing ACP executables now return an actionable error message instead of a raw `[Errno 2]`
|
||||
- Each ACP agent uses a per-thread workspace at `{base_dir}/threads/{thread_id}/acp-workspace/`. The workspace is accessible to the lead agent via the virtual path `/mnt/acp-workspace/` (read-only). In docker sandbox mode, the directory is volume-mounted into the container at `/mnt/acp-workspace` (read-only); in local sandbox mode, path translation is handled by `tools.py`
|
||||
- Each ACP agent uses a per-thread workspace at `{base_dir}/users/{user_id}/threads/{thread_id}/acp-workspace/`. The workspace is accessible to the lead agent via the virtual path `/mnt/acp-workspace/` (read-only). In docker sandbox mode, the directory is volume-mounted into the container at `/mnt/acp-workspace` (read-only); in local sandbox mode, path translation is handled by `tools.py`
|
||||
- `image_search/` - Image search via DuckDuckGo
|
||||
|
||||
### MCP System (`packages/harness/deerflow/mcp/`)
|
||||
@ -312,9 +312,9 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
|
||||
### IM Channels System (`app/channels/`)
|
||||
|
||||
Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow agent via the LangGraph Server.
|
||||
Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow agent via Gateway's LangGraph-compatible API.
|
||||
|
||||
**Architecture**: Channels communicate with the LangGraph Server through `langgraph-sdk` HTTP client (same as the frontend), ensuring threads are created and managed server-side.
|
||||
**Architecture**: Channels communicate with Gateway through the `langgraph-sdk` HTTP client (same as the frontend), ensuring threads are created and managed server-side. The internal SDK client injects process-local internal auth plus a matching CSRF cookie/header pair so Gateway accepts state-changing thread/run requests from channel workers without relying on browser session cookies.
|
||||
|
||||
**Components**:
|
||||
- `message_bus.py` - Async pub/sub hub (`InboundMessage` → queue → dispatcher; `OutboundMessage` → callbacks → channels)
|
||||
@ -327,7 +327,7 @@ Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow a
|
||||
**Message Flow**:
|
||||
1. External platform -> Channel impl -> `MessageBus.publish_inbound()`
|
||||
2. `ChannelManager._dispatch_loop()` consumes from queue
|
||||
3. For chat: look up/create thread on LangGraph Server
|
||||
3. For chat: look up/create thread through Gateway's LangGraph-compatible API
|
||||
4. Feishu chat: `runs.stream()` → accumulate AI text → publish multiple outbound updates (`is_final=False`) → publish final outbound (`is_final=True`)
|
||||
5. Slack/Telegram chat: `runs.wait()` → extract final response → publish outbound
|
||||
6. Feishu channel sends one running reply card up front, then patches the same card for each outbound update (card JSON sets `config.update_multi=true` for Feishu's patch API requirement)
|
||||
@ -335,27 +335,36 @@ Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow a
|
||||
8. Outbound → channel callbacks → platform reply
|
||||
|
||||
**Configuration** (`config.yaml` -> `channels`):
|
||||
- `langgraph_url` - LangGraph Server URL (default: `http://localhost:2024`)
|
||||
- `langgraph_url` - LangGraph-compatible Gateway API base URL (default: `http://localhost:8001/api`)
|
||||
- `gateway_url` - Gateway API URL for auxiliary commands (default: `http://localhost:8001`)
|
||||
- In Docker Compose, IM channels run inside the `gateway` container, so `localhost` points back to that container. Use `http://langgraph:2024` / `http://gateway:8001`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` / `DEER_FLOW_CHANNELS_GATEWAY_URL`.
|
||||
- In Docker Compose, IM channels run inside the `gateway` container, so `localhost` points back to that container. Use `http://gateway:8001/api` for `langgraph_url` and `http://gateway:8001` for `gateway_url`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` / `DEER_FLOW_CHANNELS_GATEWAY_URL`.
|
||||
- Per-channel configs: `feishu` (app_id, app_secret), `slack` (bot_token, app_token), `telegram` (bot_token)
|
||||
|
||||
### Memory System (`packages/harness/deerflow/agents/memory/`)
|
||||
|
||||
**Components**:
|
||||
- `updater.py` - LLM-based memory updates with fact extraction, whitespace-normalized fact deduplication (trims leading/trailing whitespace before comparing), and atomic file I/O
|
||||
- `queue.py` - Debounced update queue (per-thread deduplication, configurable wait time)
|
||||
- `queue.py` - Debounced update queue (per-thread deduplication, configurable wait time); captures `user_id` at enqueue time so it survives the `threading.Timer` boundary
|
||||
- `prompt.py` - Prompt templates for memory updates
|
||||
- `storage.py` - File-based storage with per-user isolation; cache keyed by `(user_id, agent_name)` tuple
|
||||
|
||||
**Data Structure** (stored in `backend/.deer-flow/memory.json`):
|
||||
**Per-User Isolation**:
|
||||
- Memory is stored per-user at `{base_dir}/users/{user_id}/memory.json`
|
||||
- Per-agent per-user memory at `{base_dir}/users/{user_id}/agents/{agent_name}/memory.json`
|
||||
- `user_id` is resolved via `get_effective_user_id()` from `deerflow.runtime.user_context`
|
||||
- In no-auth mode, `user_id` defaults to `"default"` (constant `DEFAULT_USER_ID`)
|
||||
- Absolute `storage_path` in config opts out of per-user isolation
|
||||
- **Migration**: Run `PYTHONPATH=. python scripts/migrate_user_isolation.py` to move legacy `memory.json` and `threads/` into per-user layout; supports `--dry-run`
|
||||
|
||||
**Data Structure** (stored in `{base_dir}/users/{user_id}/memory.json`):
|
||||
- **User Context**: `workContext`, `personalContext`, `topOfMind` (1-3 sentence summaries)
|
||||
- **History**: `recentMonths`, `earlierContext`, `longTermBackground`
|
||||
- **Facts**: Discrete facts with `id`, `content`, `category` (preference/knowledge/context/behavior/goal), `confidence` (0-1), `createdAt`, `source`
|
||||
|
||||
**Workflow**:
|
||||
1. `MemoryMiddleware` filters messages (user inputs + final AI responses) and queues conversation
|
||||
1. `MemoryMiddleware` filters messages (user inputs + final AI responses), captures `user_id` via `get_effective_user_id()`, and queues conversation with the captured `user_id`
|
||||
2. Queue debounces (30s default), batches updates, deduplicates per-thread
|
||||
3. Background thread invokes LLM to extract context updates and facts
|
||||
3. Background thread invokes LLM to extract context updates and facts, using the stored `user_id` (not the contextvar, which is unavailable on timer threads)
|
||||
4. Applies updates atomically (temp file + rename) with cache invalidation, skipping duplicate fact content before append
|
||||
5. Next interaction injects top 15 facts + context into `<memory>` tags in system prompt
|
||||
|
||||
@ -363,7 +372,7 @@ Focused regression coverage for the updater lives in `backend/tests/test_memory_
|
||||
|
||||
**Configuration** (`config.yaml` → `memory`):
|
||||
- `enabled` / `injection_enabled` - Master switches
|
||||
- `storage_path` - Path to memory.json
|
||||
- `storage_path` - Path to memory.json (absolute path opts out of per-user isolation)
|
||||
- `debounce_seconds` - Wait time before processing (default: 30)
|
||||
- `model_name` - LLM for updates (null = default model)
|
||||
- `max_facts` / `fact_confidence_threshold` - Fact storage limits (100 / 0.7)
|
||||
@ -398,9 +407,9 @@ Both can be modified at runtime via Gateway API endpoints or `DeerFlowClient` me
|
||||
|
||||
`DeerFlowClient` provides direct in-process access to all DeerFlow capabilities without HTTP services. All return types align with the Gateway API response schemas, so consumer code works identically in HTTP and embedded modes.
|
||||
|
||||
**Architecture**: Imports the same `deerflow` modules that LangGraph Server and Gateway API use. Shares the same config files and data directories. No FastAPI dependency.
|
||||
**Architecture**: Imports the same `deerflow` modules that Gateway API uses. Shares the same config files and data directories. No FastAPI dependency.
|
||||
|
||||
**Agent Conversation** (replaces LangGraph Server):
|
||||
**Agent Conversation**:
|
||||
- `chat(message, thread_id)` — synchronous, accumulates streaming deltas per message-id and returns the final AI text
|
||||
- `stream(message, thread_id)` — subscribes to LangGraph `stream_mode=["values", "messages", "custom"]` and yields `StreamEvent`:
|
||||
- `"values"` — full state snapshot (title, messages, artifacts); AI text already delivered via `messages` mode is **not** re-synthesized here to avoid duplicate deliveries
|
||||
@ -463,20 +472,15 @@ This starts all services and makes the application available at `http://localhos
|
||||
| | **Local Foreground** | **Local Daemon** | **Docker Dev** | **Docker Prod** |
|
||||
|---|---|---|---|---|
|
||||
| **Dev** | `./scripts/serve.sh --dev`<br/>`make dev` | `./scripts/serve.sh --dev --daemon`<br/>`make dev-daemon` | `./scripts/docker.sh start`<br/>`make docker-start` | — |
|
||||
| **Dev + Gateway** | `./scripts/serve.sh --dev --gateway`<br/>`make dev-pro` | `./scripts/serve.sh --dev --gateway --daemon`<br/>`make dev-daemon-pro` | `./scripts/docker.sh start --gateway`<br/>`make docker-start-pro` | — |
|
||||
| **Prod** | `./scripts/serve.sh --prod`<br/>`make start` | `./scripts/serve.sh --prod --daemon`<br/>`make start-daemon` | — | `./scripts/deploy.sh`<br/>`make up` |
|
||||
| **Prod + Gateway** | `./scripts/serve.sh --prod --gateway`<br/>`make start-pro` | `./scripts/serve.sh --prod --gateway --daemon`<br/>`make start-daemon-pro` | — | `./scripts/deploy.sh --gateway`<br/>`make up-pro` |
|
||||
|
||||
| Action | Local | Docker Dev | Docker Prod |
|
||||
|---|---|---|---|
|
||||
| **Stop** | `./scripts/serve.sh --stop`<br/>`make stop` | `./scripts/docker.sh stop`<br/>`make docker-stop` | `./scripts/deploy.sh down`<br/>`make down` |
|
||||
| **Restart** | `./scripts/serve.sh --restart [flags]` | `./scripts/docker.sh restart` | — |
|
||||
|
||||
Gateway mode embeds the agent runtime in Gateway, no LangGraph server.
|
||||
|
||||
**Nginx routing**:
|
||||
- Standard mode: `/api/langgraph/*` → LangGraph Server (2024)
|
||||
- Gateway mode: `/api/langgraph/*` → Gateway embedded runtime (8001) (via envsubst)
|
||||
- `/api/langgraph/*` → Gateway embedded runtime (8001), rewritten to `/api/*`
|
||||
- `/api/*` (other) → Gateway API (8001)
|
||||
- `/` (non-API) → Frontend (3000)
|
||||
|
||||
@ -485,15 +489,11 @@ Gateway mode embeds the agent runtime in Gateway, no LangGraph server.
|
||||
From the **backend** directory:
|
||||
|
||||
```bash
|
||||
# Terminal 1: LangGraph server
|
||||
make dev
|
||||
|
||||
# Terminal 2: Gateway API
|
||||
# Gateway API
|
||||
make gateway
|
||||
```
|
||||
|
||||
Direct access (without nginx):
|
||||
- LangGraph: `http://localhost:2024`
|
||||
- Gateway: `http://localhost:8001`
|
||||
|
||||
### Frontend Configuration
|
||||
|
||||
@ -13,6 +13,9 @@ FROM python:3.12-slim-bookworm AS builder
|
||||
ARG NODE_MAJOR=22
|
||||
ARG APT_MIRROR
|
||||
ARG UV_INDEX_URL
|
||||
# Optional extras to install (e.g. "postgres" for PostgreSQL support)
|
||||
# Usage: docker build --build-arg UV_EXTRAS=postgres ...
|
||||
ARG UV_EXTRAS
|
||||
|
||||
# Optionally override apt mirror for restricted networks (e.g. APT_MIRROR=mirrors.aliyun.com)
|
||||
RUN if [ -n "${APT_MIRROR}" ]; then \
|
||||
@ -43,8 +46,9 @@ WORKDIR /app
|
||||
COPY backend ./backend
|
||||
|
||||
# Install dependencies with cache mount
|
||||
# When UV_EXTRAS is set (e.g. "postgres"), installs optional dependencies.
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
sh -c "cd backend && UV_INDEX_URL=${UV_INDEX_URL:-https://pypi.org/simple} uv sync"
|
||||
sh -c "cd backend && UV_INDEX_URL=${UV_INDEX_URL:-https://pypi.org/simple} uv sync ${UV_EXTRAS:+--extra $UV_EXTRAS}"
|
||||
|
||||
# ── Stage 2: Dev ──────────────────────────────────────────────────────────────
|
||||
# Retains compiler toolchain from builder so startup-time `uv sync` can build
|
||||
|
||||
@ -2,7 +2,7 @@ install:
|
||||
uv sync
|
||||
|
||||
dev:
|
||||
uv run langgraph dev --no-browser --no-reload --n-jobs-per-worker 10
|
||||
PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001 --reload
|
||||
|
||||
gateway:
|
||||
PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
Provides a pluggable channel system that connects external messaging platforms
|
||||
(Feishu/Lark, Slack, Telegram) to the DeerFlow agent via the ChannelManager,
|
||||
which uses ``langgraph-sdk`` to communicate with the underlying LangGraph Server.
|
||||
which uses ``langgraph-sdk`` to communicate with Gateway's LangGraph-compatible API.
|
||||
"""
|
||||
|
||||
from app.channels.base import Channel
|
||||
|
||||
@ -13,6 +13,7 @@ from app.channels.base import Channel
|
||||
from app.channels.commands import KNOWN_CHANNEL_COMMANDS
|
||||
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
from deerflow.sandbox.sandbox_provider import get_sandbox_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -344,8 +345,9 @@ class FeishuChannel(Channel):
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
paths = get_paths()
|
||||
paths.ensure_thread_dirs(thread_id)
|
||||
uploads_dir = paths.sandbox_uploads_dir(thread_id).resolve()
|
||||
user_id = get_effective_user_id()
|
||||
paths.ensure_thread_dirs(thread_id, user_id=user_id)
|
||||
uploads_dir = paths.sandbox_uploads_dir(thread_id, user_id=user_id).resolve()
|
||||
|
||||
ext = "png" if type == "image" else "bin"
|
||||
raw_filename = getattr(response, "file_name", "") or f"feishu_{file_key[-12:]}.{ext}"
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
"""ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via LangGraph Server."""
|
||||
"""ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via Gateway."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@ -17,10 +17,13 @@ from langgraph_sdk.errors import ConflictError
|
||||
from app.channels.commands import KNOWN_CHANNEL_COMMANDS
|
||||
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
from app.channels.store import ChannelStore
|
||||
from app.gateway.csrf_middleware import CSRF_COOKIE_NAME, CSRF_HEADER_NAME, generate_csrf_token
|
||||
from app.gateway.internal_auth import create_internal_auth_headers
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_LANGGRAPH_URL = "http://localhost:2024"
|
||||
DEFAULT_LANGGRAPH_URL = "http://localhost:8001/api"
|
||||
DEFAULT_GATEWAY_URL = "http://localhost:8001"
|
||||
DEFAULT_ASSISTANT_ID = "lead_agent"
|
||||
CUSTOM_AGENT_NAME_PATTERN = re.compile(r"^[A-Za-z0-9-]+$")
|
||||
@ -342,14 +345,15 @@ def _resolve_attachments(thread_id: str, artifacts: list[str]) -> list[ResolvedA
|
||||
|
||||
attachments: list[ResolvedAttachment] = []
|
||||
paths = get_paths()
|
||||
outputs_dir = paths.sandbox_outputs_dir(thread_id).resolve()
|
||||
user_id = get_effective_user_id()
|
||||
outputs_dir = paths.sandbox_outputs_dir(thread_id, user_id=user_id).resolve()
|
||||
for virtual_path in artifacts:
|
||||
# Security: only allow files from the agent outputs directory
|
||||
if not virtual_path.startswith(_OUTPUTS_VIRTUAL_PREFIX):
|
||||
logger.warning("[Manager] rejected non-outputs artifact path: %s", virtual_path)
|
||||
continue
|
||||
try:
|
||||
actual = paths.resolve_virtual_path(thread_id, virtual_path)
|
||||
actual = paths.resolve_virtual_path(thread_id, virtual_path, user_id=user_id)
|
||||
# Verify the resolved path is actually under the outputs directory
|
||||
# (guards against path-traversal even after prefix check)
|
||||
try:
|
||||
@ -507,7 +511,7 @@ class ChannelManager:
|
||||
"""Core dispatcher that bridges IM channels to the DeerFlow agent.
|
||||
|
||||
It reads from the MessageBus inbound queue, creates/reuses threads on
|
||||
the LangGraph Server, sends messages via ``runs.wait``, and publishes
|
||||
Gateway's LangGraph-compatible API, sends messages via ``runs.wait``, and publishes
|
||||
outbound responses back through the bus.
|
||||
"""
|
||||
|
||||
@ -532,6 +536,7 @@ class ChannelManager:
|
||||
self._default_session = _as_dict(default_session)
|
||||
self._channel_sessions = dict(channel_sessions or {})
|
||||
self._client = None # lazy init — langgraph_sdk async client
|
||||
self._csrf_token = generate_csrf_token()
|
||||
self._semaphore: asyncio.Semaphore | None = None
|
||||
self._running = False
|
||||
self._task: asyncio.Task | None = None
|
||||
@ -584,7 +589,14 @@ class ChannelManager:
|
||||
if self._client is None:
|
||||
from langgraph_sdk import get_client
|
||||
|
||||
self._client = get_client(url=self._langgraph_url)
|
||||
self._client = get_client(
|
||||
url=self._langgraph_url,
|
||||
headers={
|
||||
**create_internal_auth_headers(),
|
||||
CSRF_HEADER_NAME: self._csrf_token,
|
||||
"Cookie": f"{CSRF_COOKIE_NAME}={self._csrf_token}",
|
||||
},
|
||||
)
|
||||
return self._client
|
||||
|
||||
# -- lifecycle ---------------------------------------------------------
|
||||
@ -667,7 +679,7 @@ class ChannelManager:
|
||||
# -- chat handling -----------------------------------------------------
|
||||
|
||||
async def _create_thread(self, client, msg: InboundMessage) -> str:
|
||||
"""Create a new thread on the LangGraph Server and store the mapping."""
|
||||
"""Create a new thread through Gateway and store the mapping."""
|
||||
thread = await client.threads.create()
|
||||
thread_id = thread["thread_id"]
|
||||
self.store.set_thread_id(
|
||||
@ -677,7 +689,7 @@ class ChannelManager:
|
||||
topic_id=msg.topic_id,
|
||||
user_id=msg.user_id,
|
||||
)
|
||||
logger.info("[Manager] new thread created on LangGraph Server: thread_id=%s for chat_id=%s topic_id=%s", thread_id, msg.chat_id, msg.topic_id)
|
||||
logger.info("[Manager] new thread created through Gateway: thread_id=%s for chat_id=%s topic_id=%s", thread_id, msg.chat_id, msg.topic_id)
|
||||
return thread_id
|
||||
|
||||
async def _handle_chat(self, msg: InboundMessage, extra_context: dict[str, Any] | None = None) -> None:
|
||||
@ -884,7 +896,7 @@ class ChannelManager:
|
||||
return
|
||||
|
||||
if command == "new":
|
||||
# Create a new thread on the LangGraph Server
|
||||
# Create a new thread through Gateway
|
||||
client = self._get_client()
|
||||
thread = await client.threads.create()
|
||||
new_thread_id = thread["thread_id"]
|
||||
|
||||
@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.manager import DEFAULT_GATEWAY_URL, DEFAULT_LANGGRAPH_URL, ChannelManager
|
||||
@ -13,6 +13,9 @@ from app.channels.store import ChannelStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
# Channel name → import path for lazy loading
|
||||
_CHANNEL_REGISTRY: dict[str, str] = {
|
||||
"discord": "app.channels.discord:DiscordChannel",
|
||||
@ -75,14 +78,15 @@ class ChannelService:
|
||||
self._running = False
|
||||
|
||||
@classmethod
|
||||
def from_app_config(cls) -> ChannelService:
|
||||
def from_app_config(cls, app_config: AppConfig | None = None) -> ChannelService:
|
||||
"""Create a ChannelService from the application config."""
|
||||
from deerflow.config.app_config import get_app_config
|
||||
if app_config is None:
|
||||
from deerflow.config.app_config import get_app_config
|
||||
|
||||
config = get_app_config()
|
||||
app_config = get_app_config()
|
||||
channels_config = {}
|
||||
# extra fields are allowed by AppConfig (extra="allow")
|
||||
extra = config.model_extra or {}
|
||||
extra = app_config.model_extra or {}
|
||||
if "channels" in extra:
|
||||
channels_config = extra["channels"]
|
||||
return cls(channels_config=channels_config)
|
||||
@ -201,12 +205,12 @@ def get_channel_service() -> ChannelService | None:
|
||||
return _channel_service
|
||||
|
||||
|
||||
async def start_channel_service() -> ChannelService:
|
||||
async def start_channel_service(app_config: AppConfig | None = None) -> ChannelService:
|
||||
"""Create and start the global ChannelService from app config."""
|
||||
global _channel_service
|
||||
if _channel_service is not None:
|
||||
return _channel_service
|
||||
_channel_service = ChannelService.from_app_config()
|
||||
_channel_service = ChannelService.from_app_config(app_config)
|
||||
await _channel_service.start()
|
||||
return _channel_service
|
||||
|
||||
|
||||
@ -1,17 +1,23 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from app.gateway.auth_middleware import AuthMiddleware
|
||||
from app.gateway.config import get_gateway_config
|
||||
from app.gateway.csrf_middleware import CSRFMiddleware
|
||||
from app.gateway.deps import langgraph_runtime
|
||||
from app.gateway.routers import (
|
||||
agents,
|
||||
artifacts,
|
||||
assistants_compat,
|
||||
auth,
|
||||
channels,
|
||||
feedback,
|
||||
mcp,
|
||||
memory,
|
||||
models,
|
||||
@ -22,7 +28,10 @@ from app.gateway.routers import (
|
||||
threads,
|
||||
uploads,
|
||||
)
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from deerflow.config import app_config as deerflow_app_config
|
||||
|
||||
AppConfig = deerflow_app_config.AppConfig
|
||||
get_app_config = deerflow_app_config.get_app_config
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
@ -39,13 +48,122 @@ logger = logging.getLogger(__name__)
|
||||
_SHUTDOWN_HOOK_TIMEOUT_SECONDS = 5.0
|
||||
|
||||
|
||||
async def _ensure_admin_user(app: FastAPI) -> None:
|
||||
"""Startup hook: handle first boot and migrate orphan threads otherwise.
|
||||
|
||||
After admin creation, migrate orphan threads from the LangGraph
|
||||
store (metadata.user_id unset) to the admin account. This is the
|
||||
"no-auth → with-auth" upgrade path: users who ran DeerFlow without
|
||||
authentication have existing LangGraph thread data that needs an
|
||||
owner assigned.
|
||||
First boot (no admin exists):
|
||||
- Does NOT create any user accounts automatically.
|
||||
- The operator must visit ``/setup`` to create the first admin.
|
||||
|
||||
Subsequent boots (admin already exists):
|
||||
- Runs the one-time "no-auth → with-auth" orphan thread migration for
|
||||
existing LangGraph thread metadata that has no owner_id.
|
||||
|
||||
No SQL persistence migration is needed: the four user_id columns
|
||||
(threads_meta, runs, run_events, feedback) only come into existence
|
||||
alongside the auth module via create_all, so freshly created tables
|
||||
never contain NULL-owner rows.
|
||||
"""
|
||||
from sqlalchemy import select
|
||||
|
||||
from app.gateway.deps import get_local_provider
|
||||
from deerflow.persistence.engine import get_session_factory
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
try:
|
||||
provider = get_local_provider()
|
||||
except RuntimeError:
|
||||
# Auth persistence may not be initialized in some test/boot paths.
|
||||
# Skip admin migration work rather than failing gateway startup.
|
||||
logger.warning("Auth persistence not ready; skipping admin bootstrap check")
|
||||
return
|
||||
|
||||
sf = get_session_factory()
|
||||
if sf is None:
|
||||
return
|
||||
|
||||
admin_count = await provider.count_admin_users()
|
||||
|
||||
if admin_count == 0:
|
||||
logger.info("=" * 60)
|
||||
logger.info(" First boot detected — no admin account exists.")
|
||||
logger.info(" Visit /setup to complete admin account creation.")
|
||||
logger.info("=" * 60)
|
||||
return
|
||||
|
||||
# Admin already exists — run orphan thread migration for any
|
||||
# LangGraph thread metadata that pre-dates the auth module.
|
||||
async with sf() as session:
|
||||
stmt = select(UserRow).where(UserRow.system_role == "admin").limit(1)
|
||||
row = (await session.execute(stmt)).scalar_one_or_none()
|
||||
|
||||
if row is None:
|
||||
return # Should not happen (admin_count > 0 above), but be safe.
|
||||
|
||||
admin_id = str(row.id)
|
||||
|
||||
# LangGraph store orphan migration — non-fatal.
|
||||
# This covers the "no-auth → with-auth" upgrade path for users
|
||||
# whose existing LangGraph thread metadata has no user_id set.
|
||||
store = getattr(app.state, "store", None)
|
||||
if store is not None:
|
||||
try:
|
||||
migrated = await _migrate_orphaned_threads(store, admin_id)
|
||||
if migrated:
|
||||
logger.info("Migrated %d orphan LangGraph thread(s) to admin", migrated)
|
||||
except Exception:
|
||||
logger.exception("LangGraph thread migration failed (non-fatal)")
|
||||
|
||||
|
||||
async def _iter_store_items(store, namespace, *, page_size: int = 500):
|
||||
"""Paginated async iterator over a LangGraph store namespace.
|
||||
|
||||
Replaces the old hardcoded ``limit=1000`` call with a cursor-style
|
||||
loop so that environments with more than one page of orphans do
|
||||
not silently lose data. Terminates when a page is empty OR when a
|
||||
short page arrives (indicating the last page).
|
||||
"""
|
||||
offset = 0
|
||||
while True:
|
||||
batch = await store.asearch(namespace, limit=page_size, offset=offset)
|
||||
if not batch:
|
||||
return
|
||||
for item in batch:
|
||||
yield item
|
||||
if len(batch) < page_size:
|
||||
return
|
||||
offset += page_size
|
||||
|
||||
|
||||
async def _migrate_orphaned_threads(store, admin_user_id: str) -> int:
|
||||
"""Migrate LangGraph store threads with no user_id to the given admin.
|
||||
|
||||
Uses cursor pagination so all orphans are migrated regardless of
|
||||
count. Returns the number of rows migrated.
|
||||
"""
|
||||
migrated = 0
|
||||
async for item in _iter_store_items(store, ("threads",)):
|
||||
metadata = item.value.get("metadata", {})
|
||||
if not metadata.get("user_id"):
|
||||
metadata["user_id"] = admin_user_id
|
||||
item.value["metadata"] = metadata
|
||||
await store.aput(("threads",), item.key, item.value)
|
||||
migrated += 1
|
||||
return migrated
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
"""Application lifespan handler."""
|
||||
|
||||
# Load config and check necessary environment variables at startup
|
||||
try:
|
||||
get_app_config()
|
||||
app.state.config = get_app_config()
|
||||
logger.info("Configuration loaded successfully")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to load configuration during gateway startup: {e}"
|
||||
@ -58,11 +176,15 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
async with langgraph_runtime(app):
|
||||
logger.info("LangGraph runtime initialised")
|
||||
|
||||
# Ensure admin user exists (auto-create on first boot)
|
||||
# Must run AFTER langgraph_runtime so app.state.store is available for thread migration
|
||||
await _ensure_admin_user(app)
|
||||
|
||||
# Start IM channel service if any channels are configured
|
||||
try:
|
||||
from app.channels.service import start_channel_service
|
||||
|
||||
channel_service = await start_channel_service()
|
||||
channel_service = await start_channel_service(app.state.config)
|
||||
logger.info("Channel service started: %s", channel_service.get_status())
|
||||
except Exception:
|
||||
logger.exception("No IM channels configured or channel service failed to start")
|
||||
@ -177,7 +299,31 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
|
||||
],
|
||||
)
|
||||
|
||||
# CORS is handled by nginx - no need for FastAPI middleware
|
||||
# Auth: reject unauthenticated requests to non-public paths (fail-closed safety net)
|
||||
app.add_middleware(AuthMiddleware)
|
||||
|
||||
# CSRF: Double Submit Cookie pattern for state-changing requests
|
||||
app.add_middleware(CSRFMiddleware)
|
||||
|
||||
# CORS: when GATEWAY_CORS_ORIGINS is set (dev without nginx), add CORS middleware.
|
||||
# In production, nginx handles CORS and no middleware is needed.
|
||||
cors_origins_env = os.environ.get("GATEWAY_CORS_ORIGINS", "")
|
||||
if cors_origins_env:
|
||||
cors_origins = [o.strip() for o in cors_origins_env.split(",") if o.strip()]
|
||||
# Validate: wildcard origin with credentials is a security misconfiguration
|
||||
for origin in cors_origins:
|
||||
if origin == "*":
|
||||
logger.error("GATEWAY_CORS_ORIGINS contains wildcard '*' with allow_credentials=True. This is a security misconfiguration — browsers will reject the response. Use explicit scheme://host:port origins instead.")
|
||||
cors_origins = [o for o in cors_origins if o != "*"]
|
||||
break
|
||||
if cors_origins:
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=cors_origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Include routers
|
||||
# Models API is mounted at /api/models
|
||||
@ -213,6 +359,12 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
|
||||
# Assistants compatibility API (LangGraph Platform stub)
|
||||
app.include_router(assistants_compat.router)
|
||||
|
||||
# Auth API is mounted at /api/v1/auth
|
||||
app.include_router(auth.router)
|
||||
|
||||
# Feedback API is mounted at /api/threads/{thread_id}/runs/{run_id}/feedback
|
||||
app.include_router(feedback.router)
|
||||
|
||||
# Thread Runs API (LangGraph Platform-compatible runs lifecycle)
|
||||
app.include_router(thread_runs.router)
|
||||
|
||||
|
||||
42
backend/app/gateway/auth/__init__.py
Normal file
42
backend/app/gateway/auth/__init__.py
Normal file
@ -0,0 +1,42 @@
|
||||
"""Authentication module for DeerFlow.
|
||||
|
||||
This module provides:
|
||||
- JWT-based authentication
|
||||
- Provider Factory pattern for extensible auth methods
|
||||
- UserRepository interface for storage backends (SQLite)
|
||||
"""
|
||||
|
||||
from app.gateway.auth.config import AuthConfig, get_auth_config, set_auth_config
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse, TokenError
|
||||
from app.gateway.auth.jwt import TokenPayload, create_access_token, decode_token
|
||||
from app.gateway.auth.local_provider import LocalAuthProvider
|
||||
from app.gateway.auth.models import User, UserResponse
|
||||
from app.gateway.auth.password import hash_password, verify_password
|
||||
from app.gateway.auth.providers import AuthProvider
|
||||
from app.gateway.auth.repositories.base import UserRepository
|
||||
|
||||
__all__ = [
|
||||
# Config
|
||||
"AuthConfig",
|
||||
"get_auth_config",
|
||||
"set_auth_config",
|
||||
# Errors
|
||||
"AuthErrorCode",
|
||||
"AuthErrorResponse",
|
||||
"TokenError",
|
||||
# JWT
|
||||
"TokenPayload",
|
||||
"create_access_token",
|
||||
"decode_token",
|
||||
# Password
|
||||
"hash_password",
|
||||
"verify_password",
|
||||
# Models
|
||||
"User",
|
||||
"UserResponse",
|
||||
# Providers
|
||||
"AuthProvider",
|
||||
"LocalAuthProvider",
|
||||
# Repository
|
||||
"UserRepository",
|
||||
]
|
||||
57
backend/app/gateway/auth/config.py
Normal file
57
backend/app/gateway/auth/config.py
Normal file
@ -0,0 +1,57 @@
|
||||
"""Authentication configuration for DeerFlow."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import secrets
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthConfig(BaseModel):
|
||||
"""JWT and auth-related configuration. Parsed once at startup.
|
||||
|
||||
Note: the ``users`` table now lives in the shared persistence
|
||||
database managed by ``deerflow.persistence.engine``. The old
|
||||
``users_db_path`` config key has been removed — user storage is
|
||||
configured through ``config.database`` like every other table.
|
||||
"""
|
||||
|
||||
jwt_secret: str = Field(
|
||||
...,
|
||||
description="Secret key for JWT signing. MUST be set via AUTH_JWT_SECRET.",
|
||||
)
|
||||
token_expiry_days: int = Field(default=7, ge=1, le=30)
|
||||
oauth_github_client_id: str | None = Field(default=None)
|
||||
oauth_github_client_secret: str | None = Field(default=None)
|
||||
|
||||
|
||||
_auth_config: AuthConfig | None = None
|
||||
|
||||
|
||||
def get_auth_config() -> AuthConfig:
|
||||
"""Get the global AuthConfig instance. Parses from env on first call."""
|
||||
global _auth_config
|
||||
if _auth_config is None:
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
jwt_secret = os.environ.get("AUTH_JWT_SECRET")
|
||||
if not jwt_secret:
|
||||
jwt_secret = secrets.token_urlsafe(32)
|
||||
os.environ["AUTH_JWT_SECRET"] = jwt_secret
|
||||
logger.warning(
|
||||
"⚠ AUTH_JWT_SECRET is not set — using an auto-generated ephemeral secret. "
|
||||
"Sessions will be invalidated on restart. "
|
||||
"For production, add AUTH_JWT_SECRET to your .env file: "
|
||||
'python -c "import secrets; print(secrets.token_urlsafe(32))"'
|
||||
)
|
||||
_auth_config = AuthConfig(jwt_secret=jwt_secret)
|
||||
return _auth_config
|
||||
|
||||
|
||||
def set_auth_config(config: AuthConfig) -> None:
|
||||
"""Set the global AuthConfig instance (for testing)."""
|
||||
global _auth_config
|
||||
_auth_config = config
|
||||
48
backend/app/gateway/auth/credential_file.py
Normal file
48
backend/app/gateway/auth/credential_file.py
Normal file
@ -0,0 +1,48 @@
|
||||
"""Write initial admin credentials to a restricted file instead of logs.
|
||||
|
||||
Logging secrets to stdout/stderr is a well-known CodeQL finding
|
||||
(py/clear-text-logging-sensitive-data) — in production those logs
|
||||
get collected into ELK/Splunk/etc and become a secret sprawl
|
||||
source. This helper writes the credential to a 0600 file that only
|
||||
the process user can read, and returns the path so the caller can
|
||||
log **the path** (not the password) for the operator to pick up.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
_CREDENTIAL_FILENAME = "admin_initial_credentials.txt"
|
||||
|
||||
|
||||
def write_initial_credentials(email: str, password: str, *, label: str = "initial") -> Path:
|
||||
"""Write the admin email + password to ``{base_dir}/admin_initial_credentials.txt``.
|
||||
|
||||
The file is created **atomically** with mode 0600 via ``os.open``
|
||||
so the password is never world-readable, even for the single syscall
|
||||
window between ``write_text`` and ``chmod``.
|
||||
|
||||
``label`` distinguishes "initial" (fresh creation) from "reset"
|
||||
(password reset) in the file header so an operator picking up the
|
||||
file after a restart can tell which event produced it.
|
||||
|
||||
Returns the absolute :class:`Path` to the file.
|
||||
"""
|
||||
target = get_paths().base_dir / _CREDENTIAL_FILENAME
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
content = (
|
||||
f"# DeerFlow admin {label} credentials\n# This file is generated on first boot or password reset.\n# Change the password after login via Settings -> Account,\n# then delete this file.\n#\nemail: {email}\npassword: {password}\n"
|
||||
)
|
||||
|
||||
# Atomic 0600 create-or-truncate. O_TRUNC (not O_EXCL) so the
|
||||
# reset-password path can rewrite an existing file without a
|
||||
# separate unlink-then-create dance.
|
||||
fd = os.open(target, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
fh.write(content)
|
||||
|
||||
return target.resolve()
|
||||
45
backend/app/gateway/auth/errors.py
Normal file
45
backend/app/gateway/auth/errors.py
Normal file
@ -0,0 +1,45 @@
|
||||
"""Typed error definitions for auth module.
|
||||
|
||||
AuthErrorCode: exhaustive enum of all auth failure conditions.
|
||||
TokenError: exhaustive enum of JWT decode failures.
|
||||
AuthErrorResponse: structured error payload for HTTP responses.
|
||||
"""
|
||||
|
||||
from enum import StrEnum
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AuthErrorCode(StrEnum):
|
||||
"""Exhaustive list of auth error conditions."""
|
||||
|
||||
INVALID_CREDENTIALS = "invalid_credentials"
|
||||
TOKEN_EXPIRED = "token_expired"
|
||||
TOKEN_INVALID = "token_invalid"
|
||||
USER_NOT_FOUND = "user_not_found"
|
||||
EMAIL_ALREADY_EXISTS = "email_already_exists"
|
||||
PROVIDER_NOT_FOUND = "provider_not_found"
|
||||
NOT_AUTHENTICATED = "not_authenticated"
|
||||
SYSTEM_ALREADY_INITIALIZED = "system_already_initialized"
|
||||
|
||||
|
||||
class TokenError(StrEnum):
|
||||
"""Exhaustive list of JWT decode failure reasons."""
|
||||
|
||||
EXPIRED = "expired"
|
||||
INVALID_SIGNATURE = "invalid_signature"
|
||||
MALFORMED = "malformed"
|
||||
|
||||
|
||||
class AuthErrorResponse(BaseModel):
|
||||
"""Structured error response — replaces bare `detail` strings."""
|
||||
|
||||
code: AuthErrorCode
|
||||
message: str
|
||||
|
||||
|
||||
def token_error_to_code(err: TokenError) -> AuthErrorCode:
|
||||
"""Map TokenError to AuthErrorCode — single source of truth."""
|
||||
if err == TokenError.EXPIRED:
|
||||
return AuthErrorCode.TOKEN_EXPIRED
|
||||
return AuthErrorCode.TOKEN_INVALID
|
||||
55
backend/app/gateway/auth/jwt.py
Normal file
55
backend/app/gateway/auth/jwt.py
Normal file
@ -0,0 +1,55 @@
|
||||
"""JWT token creation and verification."""
|
||||
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
import jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from app.gateway.auth.config import get_auth_config
|
||||
from app.gateway.auth.errors import TokenError
|
||||
|
||||
|
||||
class TokenPayload(BaseModel):
|
||||
"""JWT token payload."""
|
||||
|
||||
sub: str # user_id
|
||||
exp: datetime
|
||||
iat: datetime | None = None
|
||||
ver: int = 0 # token_version — must match User.token_version
|
||||
|
||||
|
||||
def create_access_token(user_id: str, expires_delta: timedelta | None = None, token_version: int = 0) -> str:
|
||||
"""Create a JWT access token.
|
||||
|
||||
Args:
|
||||
user_id: The user's UUID as string
|
||||
expires_delta: Optional custom expiry, defaults to 7 days
|
||||
token_version: User's current token_version for invalidation
|
||||
|
||||
Returns:
|
||||
Encoded JWT string
|
||||
"""
|
||||
config = get_auth_config()
|
||||
expiry = expires_delta or timedelta(days=config.token_expiry_days)
|
||||
|
||||
now = datetime.now(UTC)
|
||||
payload = {"sub": user_id, "exp": now + expiry, "iat": now, "ver": token_version}
|
||||
return jwt.encode(payload, config.jwt_secret, algorithm="HS256")
|
||||
|
||||
|
||||
def decode_token(token: str) -> TokenPayload | TokenError:
|
||||
"""Decode and validate a JWT token.
|
||||
|
||||
Returns:
|
||||
TokenPayload if valid, or a specific TokenError variant.
|
||||
"""
|
||||
config = get_auth_config()
|
||||
try:
|
||||
payload = jwt.decode(token, config.jwt_secret, algorithms=["HS256"])
|
||||
return TokenPayload(**payload)
|
||||
except jwt.ExpiredSignatureError:
|
||||
return TokenError.EXPIRED
|
||||
except jwt.InvalidSignatureError:
|
||||
return TokenError.INVALID_SIGNATURE
|
||||
except jwt.PyJWTError:
|
||||
return TokenError.MALFORMED
|
||||
104
backend/app/gateway/auth/local_provider.py
Normal file
104
backend/app/gateway/auth/local_provider.py
Normal file
@ -0,0 +1,104 @@
|
||||
"""Local email/password authentication provider."""
|
||||
|
||||
import logging
|
||||
|
||||
from app.gateway.auth.models import User
|
||||
from app.gateway.auth.password import hash_password_async, needs_rehash, verify_password_async
|
||||
from app.gateway.auth.providers import AuthProvider
|
||||
from app.gateway.auth.repositories.base import UserRepository
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LocalAuthProvider(AuthProvider):
|
||||
"""Email/password authentication provider using local database."""
|
||||
|
||||
def __init__(self, repository: UserRepository):
|
||||
"""Initialize with a UserRepository.
|
||||
|
||||
Args:
|
||||
repository: UserRepository implementation (SQLite)
|
||||
"""
|
||||
self._repo = repository
|
||||
|
||||
async def authenticate(self, credentials: dict) -> User | None:
|
||||
"""Authenticate with email and password.
|
||||
|
||||
Args:
|
||||
credentials: dict with 'email' and 'password' keys
|
||||
|
||||
Returns:
|
||||
User if authentication succeeds, None otherwise
|
||||
"""
|
||||
email = credentials.get("email")
|
||||
password = credentials.get("password")
|
||||
|
||||
if not email or not password:
|
||||
return None
|
||||
|
||||
user = await self._repo.get_user_by_email(email)
|
||||
if user is None:
|
||||
return None
|
||||
|
||||
if user.password_hash is None:
|
||||
# OAuth user without local password
|
||||
return None
|
||||
|
||||
if not await verify_password_async(password, user.password_hash):
|
||||
return None
|
||||
|
||||
if needs_rehash(user.password_hash):
|
||||
try:
|
||||
user.password_hash = await hash_password_async(password)
|
||||
await self._repo.update_user(user)
|
||||
except Exception:
|
||||
# Rehash is an opportunistic upgrade; a transient DB error must not
|
||||
# prevent an otherwise-valid login from succeeding.
|
||||
logger.warning("Failed to rehash password for user %s; login will still succeed", user.email, exc_info=True)
|
||||
|
||||
return user
|
||||
|
||||
async def get_user(self, user_id: str) -> User | None:
|
||||
"""Get user by ID."""
|
||||
return await self._repo.get_user_by_id(user_id)
|
||||
|
||||
async def create_user(self, email: str, password: str | None = None, system_role: str = "user", needs_setup: bool = False) -> User:
|
||||
"""Create a new local user.
|
||||
|
||||
Args:
|
||||
email: User email address
|
||||
password: Plain text password (will be hashed)
|
||||
system_role: Role to assign ("admin" or "user")
|
||||
needs_setup: If True, user must complete setup on first login
|
||||
|
||||
Returns:
|
||||
Created User instance
|
||||
"""
|
||||
password_hash = await hash_password_async(password) if password else None
|
||||
user = User(
|
||||
email=email,
|
||||
password_hash=password_hash,
|
||||
system_role=system_role,
|
||||
needs_setup=needs_setup,
|
||||
)
|
||||
return await self._repo.create_user(user)
|
||||
|
||||
async def get_user_by_oauth(self, provider: str, oauth_id: str) -> User | None:
|
||||
"""Get user by OAuth provider and ID."""
|
||||
return await self._repo.get_user_by_oauth(provider, oauth_id)
|
||||
|
||||
async def count_users(self) -> int:
|
||||
"""Return total number of registered users."""
|
||||
return await self._repo.count_users()
|
||||
|
||||
async def count_admin_users(self) -> int:
|
||||
"""Return number of admin users."""
|
||||
return await self._repo.count_admin_users()
|
||||
|
||||
async def update_user(self, user: User) -> User:
|
||||
"""Update an existing user."""
|
||||
return await self._repo.update_user(user)
|
||||
|
||||
async def get_user_by_email(self, email: str) -> User | None:
|
||||
"""Get user by email."""
|
||||
return await self._repo.get_user_by_email(email)
|
||||
41
backend/app/gateway/auth/models.py
Normal file
41
backend/app/gateway/auth/models.py
Normal file
@ -0,0 +1,41 @@
|
||||
"""User Pydantic models for authentication."""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from typing import Literal
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, EmailStr, Field
|
||||
|
||||
|
||||
def _utc_now() -> datetime:
|
||||
"""Return current UTC time (timezone-aware)."""
|
||||
return datetime.now(UTC)
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
"""Internal user representation."""
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
id: UUID = Field(default_factory=uuid4, description="Primary key")
|
||||
email: EmailStr = Field(..., description="Unique email address")
|
||||
password_hash: str | None = Field(None, description="bcrypt hash, nullable for OAuth users")
|
||||
system_role: Literal["admin", "user"] = Field(default="user")
|
||||
created_at: datetime = Field(default_factory=_utc_now)
|
||||
|
||||
# OAuth linkage (optional)
|
||||
oauth_provider: str | None = Field(None, description="e.g. 'github', 'google'")
|
||||
oauth_id: str | None = Field(None, description="User ID from OAuth provider")
|
||||
|
||||
# Auth lifecycle
|
||||
needs_setup: bool = Field(default=False, description="True for auto-created admin until setup completes")
|
||||
token_version: int = Field(default=0, description="Incremented on password change to invalidate old JWTs")
|
||||
|
||||
|
||||
class UserResponse(BaseModel):
|
||||
"""Response model for user info endpoint."""
|
||||
|
||||
id: str
|
||||
email: str
|
||||
system_role: Literal["admin", "user"]
|
||||
needs_setup: bool = False
|
||||
81
backend/app/gateway/auth/password.py
Normal file
81
backend/app/gateway/auth/password.py
Normal file
@ -0,0 +1,81 @@
|
||||
"""Password hashing utilities with versioned hash format.
|
||||
|
||||
Hash format: ``$dfv<N>$<bcrypt_hash>`` where ``<N>`` is the version.
|
||||
|
||||
- **v1** (legacy): ``bcrypt(password)`` — plain bcrypt, susceptible to
|
||||
72-byte silent truncation.
|
||||
- **v2** (current): ``bcrypt(b64(sha256(password)))`` — SHA-256 pre-hash
|
||||
avoids the 72-byte truncation limit so the full password contributes
|
||||
to the hash.
|
||||
|
||||
Verification auto-detects the version and falls back to v1 for hashes
|
||||
without a prefix, so existing deployments upgrade transparently on next
|
||||
login.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import hashlib
|
||||
|
||||
import bcrypt
|
||||
|
||||
_CURRENT_VERSION = 2
|
||||
_PREFIX_V2 = "$dfv2$"
|
||||
_PREFIX_V1 = "$dfv1$"
|
||||
|
||||
|
||||
def _pre_hash_v2(password: str) -> bytes:
|
||||
"""SHA-256 pre-hash to bypass bcrypt's 72-byte limit."""
|
||||
return base64.b64encode(hashlib.sha256(password.encode("utf-8")).digest())
|
||||
|
||||
|
||||
def hash_password(password: str) -> str:
|
||||
"""Hash a password (current version: v2 — SHA-256 + bcrypt)."""
|
||||
raw = bcrypt.hashpw(_pre_hash_v2(password), bcrypt.gensalt()).decode("utf-8")
|
||||
return f"{_PREFIX_V2}{raw}"
|
||||
|
||||
|
||||
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
||||
"""Verify a password, auto-detecting the hash version.
|
||||
|
||||
Accepts v2 (``$dfv2$…``), v1 (``$dfv1$…``), and bare bcrypt hashes
|
||||
(treated as v1 for backward compatibility with pre-versioning data).
|
||||
"""
|
||||
try:
|
||||
if hashed_password.startswith(_PREFIX_V2):
|
||||
bcrypt_hash = hashed_password[len(_PREFIX_V2) :]
|
||||
return bcrypt.checkpw(_pre_hash_v2(plain_password), bcrypt_hash.encode("utf-8"))
|
||||
|
||||
if hashed_password.startswith(_PREFIX_V1):
|
||||
bcrypt_hash = hashed_password[len(_PREFIX_V1) :]
|
||||
else:
|
||||
bcrypt_hash = hashed_password
|
||||
|
||||
return bcrypt.checkpw(plain_password.encode("utf-8"), bcrypt_hash.encode("utf-8"))
|
||||
except ValueError:
|
||||
# bcrypt raises ValueError for malformed or corrupt hashes (e.g., invalid salt).
|
||||
# Fail closed rather than crashing the request.
|
||||
return False
|
||||
|
||||
|
||||
def needs_rehash(hashed_password: str) -> bool:
|
||||
"""Return True if the hash uses an older version and should be rehashed."""
|
||||
return not hashed_password.startswith(_PREFIX_V2)
|
||||
|
||||
|
||||
async def hash_password_async(password: str) -> str:
|
||||
"""Hash a password using bcrypt (non-blocking).
|
||||
|
||||
Wraps the blocking bcrypt operation in a thread pool to avoid
|
||||
blocking the event loop during password hashing.
|
||||
"""
|
||||
return await asyncio.to_thread(hash_password, password)
|
||||
|
||||
|
||||
async def verify_password_async(plain_password: str, hashed_password: str) -> bool:
|
||||
"""Verify a password against its hash (non-blocking).
|
||||
|
||||
Wraps the blocking bcrypt operation in a thread pool to avoid
|
||||
blocking the event loop during password verification.
|
||||
"""
|
||||
return await asyncio.to_thread(verify_password, plain_password, hashed_password)
|
||||
24
backend/app/gateway/auth/providers.py
Normal file
24
backend/app/gateway/auth/providers.py
Normal file
@ -0,0 +1,24 @@
|
||||
"""Auth provider abstraction."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class AuthProvider(ABC):
|
||||
"""Abstract base class for authentication providers."""
|
||||
|
||||
@abstractmethod
|
||||
async def authenticate(self, credentials: dict) -> "User | None":
|
||||
"""Authenticate user with given credentials.
|
||||
|
||||
Returns User if authentication succeeds, None otherwise.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def get_user(self, user_id: str) -> "User | None":
|
||||
"""Retrieve user by ID."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
# Import User at runtime to avoid circular imports
|
||||
from app.gateway.auth.models import User # noqa: E402
|
||||
0
backend/app/gateway/auth/repositories/__init__.py
Normal file
0
backend/app/gateway/auth/repositories/__init__.py
Normal file
102
backend/app/gateway/auth/repositories/base.py
Normal file
102
backend/app/gateway/auth/repositories/base.py
Normal file
@ -0,0 +1,102 @@
|
||||
"""User repository interface for abstracting database operations."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from app.gateway.auth.models import User
|
||||
|
||||
|
||||
class UserNotFoundError(LookupError):
|
||||
"""Raised when a user repository operation targets a non-existent row.
|
||||
|
||||
Subclass of :class:`LookupError` so callers that already catch
|
||||
``LookupError`` for "missing entity" can keep working unchanged,
|
||||
while specific call sites can pin to this class to distinguish
|
||||
"concurrent delete during update" from other lookups.
|
||||
"""
|
||||
|
||||
|
||||
class UserRepository(ABC):
|
||||
"""Abstract interface for user data storage.
|
||||
|
||||
Implement this interface to support different storage backends
|
||||
(SQLite)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def create_user(self, user: User) -> User:
|
||||
"""Create a new user.
|
||||
|
||||
Args:
|
||||
user: User object to create
|
||||
|
||||
Returns:
|
||||
Created User with ID assigned
|
||||
|
||||
Raises:
|
||||
ValueError: If email already exists
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_id(self, user_id: str) -> User | None:
|
||||
"""Get user by ID.
|
||||
|
||||
Args:
|
||||
user_id: User UUID as string
|
||||
|
||||
Returns:
|
||||
User if found, None otherwise
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_email(self, email: str) -> User | None:
|
||||
"""Get user by email.
|
||||
|
||||
Args:
|
||||
email: User email address
|
||||
|
||||
Returns:
|
||||
User if found, None otherwise
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def update_user(self, user: User) -> User:
|
||||
"""Update an existing user.
|
||||
|
||||
Args:
|
||||
user: User object with updated fields
|
||||
|
||||
Returns:
|
||||
Updated User
|
||||
|
||||
Raises:
|
||||
UserNotFoundError: If no row exists for ``user.id``. This is
|
||||
a hard failure (not a no-op) so callers cannot mistake a
|
||||
concurrent-delete race for a successful update.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def count_users(self) -> int:
|
||||
"""Return total number of registered users."""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def count_admin_users(self) -> int:
|
||||
"""Return number of users with system_role == 'admin'."""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_oauth(self, provider: str, oauth_id: str) -> User | None:
|
||||
"""Get user by OAuth provider and ID.
|
||||
|
||||
Args:
|
||||
provider: OAuth provider name (e.g. 'github', 'google')
|
||||
oauth_id: User ID from the OAuth provider
|
||||
|
||||
Returns:
|
||||
User if found, None otherwise
|
||||
"""
|
||||
raise NotImplementedError
|
||||
127
backend/app/gateway/auth/repositories/sqlite.py
Normal file
127
backend/app/gateway/auth/repositories/sqlite.py
Normal file
@ -0,0 +1,127 @@
|
||||
"""SQLAlchemy-backed UserRepository implementation.
|
||||
|
||||
Uses the shared async session factory from
|
||||
``deerflow.persistence.engine`` — the ``users`` table lives in the
|
||||
same database as ``threads_meta``, ``runs``, ``run_events``, and
|
||||
``feedback``.
|
||||
|
||||
Constructor takes the session factory directly (same pattern as the
|
||||
other four repositories in ``deerflow.persistence.*``). Callers
|
||||
construct this after ``init_engine_from_config()`` has run.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
from app.gateway.auth.models import User
|
||||
from app.gateway.auth.repositories.base import UserNotFoundError, UserRepository
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
|
||||
class SQLiteUserRepository(UserRepository):
|
||||
"""Async user repository backed by the shared SQLAlchemy engine."""
|
||||
|
||||
def __init__(self, session_factory: async_sessionmaker[AsyncSession]) -> None:
|
||||
self._sf = session_factory
|
||||
|
||||
# ── Converters ────────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _row_to_user(row: UserRow) -> User:
|
||||
return User(
|
||||
id=UUID(row.id),
|
||||
email=row.email,
|
||||
password_hash=row.password_hash,
|
||||
system_role=row.system_role, # type: ignore[arg-type]
|
||||
# SQLite loses tzinfo on read; reattach UTC so downstream
|
||||
# code can compare timestamps reliably.
|
||||
created_at=row.created_at if row.created_at.tzinfo else row.created_at.replace(tzinfo=UTC),
|
||||
oauth_provider=row.oauth_provider,
|
||||
oauth_id=row.oauth_id,
|
||||
needs_setup=row.needs_setup,
|
||||
token_version=row.token_version,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _user_to_row(user: User) -> UserRow:
|
||||
return UserRow(
|
||||
id=str(user.id),
|
||||
email=user.email,
|
||||
password_hash=user.password_hash,
|
||||
system_role=user.system_role,
|
||||
created_at=user.created_at,
|
||||
oauth_provider=user.oauth_provider,
|
||||
oauth_id=user.oauth_id,
|
||||
needs_setup=user.needs_setup,
|
||||
token_version=user.token_version,
|
||||
)
|
||||
|
||||
# ── CRUD ──────────────────────────────────────────────────────────
|
||||
|
||||
async def create_user(self, user: User) -> User:
|
||||
"""Insert a new user. Raises ``ValueError`` on duplicate email."""
|
||||
row = self._user_to_row(user)
|
||||
async with self._sf() as session:
|
||||
session.add(row)
|
||||
try:
|
||||
await session.commit()
|
||||
except IntegrityError as exc:
|
||||
await session.rollback()
|
||||
raise ValueError(f"Email already registered: {user.email}") from exc
|
||||
return user
|
||||
|
||||
async def get_user_by_id(self, user_id: str) -> User | None:
|
||||
async with self._sf() as session:
|
||||
row = await session.get(UserRow, user_id)
|
||||
return self._row_to_user(row) if row is not None else None
|
||||
|
||||
async def get_user_by_email(self, email: str) -> User | None:
|
||||
stmt = select(UserRow).where(UserRow.email == email)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
row = result.scalar_one_or_none()
|
||||
return self._row_to_user(row) if row is not None else None
|
||||
|
||||
async def update_user(self, user: User) -> User:
|
||||
async with self._sf() as session:
|
||||
row = await session.get(UserRow, str(user.id))
|
||||
if row is None:
|
||||
# Hard fail on concurrent delete: callers (reset_admin,
|
||||
# password change handlers, _ensure_admin_user) all
|
||||
# fetched the user just before this call, so a missing
|
||||
# row here means the row vanished underneath us. Silent
|
||||
# success would let the caller log "password reset" for
|
||||
# a row that no longer exists.
|
||||
raise UserNotFoundError(f"User {user.id} no longer exists")
|
||||
row.email = user.email
|
||||
row.password_hash = user.password_hash
|
||||
row.system_role = user.system_role
|
||||
row.oauth_provider = user.oauth_provider
|
||||
row.oauth_id = user.oauth_id
|
||||
row.needs_setup = user.needs_setup
|
||||
row.token_version = user.token_version
|
||||
await session.commit()
|
||||
return user
|
||||
|
||||
async def count_users(self) -> int:
|
||||
stmt = select(func.count()).select_from(UserRow)
|
||||
async with self._sf() as session:
|
||||
return await session.scalar(stmt) or 0
|
||||
|
||||
async def count_admin_users(self) -> int:
|
||||
stmt = select(func.count()).select_from(UserRow).where(UserRow.system_role == "admin")
|
||||
async with self._sf() as session:
|
||||
return await session.scalar(stmt) or 0
|
||||
|
||||
async def get_user_by_oauth(self, provider: str, oauth_id: str) -> User | None:
|
||||
stmt = select(UserRow).where(UserRow.oauth_provider == provider, UserRow.oauth_id == oauth_id)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
row = result.scalar_one_or_none()
|
||||
return self._row_to_user(row) if row is not None else None
|
||||
91
backend/app/gateway/auth/reset_admin.py
Normal file
91
backend/app/gateway/auth/reset_admin.py
Normal file
@ -0,0 +1,91 @@
|
||||
"""CLI tool to reset an admin password.
|
||||
|
||||
Usage:
|
||||
python -m app.gateway.auth.reset_admin
|
||||
python -m app.gateway.auth.reset_admin --email admin@example.com
|
||||
|
||||
Writes the new password to ``.deer-flow/admin_initial_credentials.txt``
|
||||
(mode 0600) instead of printing it, so CI / log aggregators never see
|
||||
the cleartext secret.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
from sqlalchemy import select
|
||||
|
||||
from app.gateway.auth.credential_file import write_initial_credentials
|
||||
from app.gateway.auth.password import hash_password
|
||||
from app.gateway.auth.repositories.sqlite import SQLiteUserRepository
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
|
||||
async def _run(email: str | None) -> int:
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.persistence.engine import (
|
||||
close_engine,
|
||||
get_session_factory,
|
||||
init_engine_from_config,
|
||||
)
|
||||
|
||||
config = get_app_config()
|
||||
await init_engine_from_config(config.database)
|
||||
try:
|
||||
sf = get_session_factory()
|
||||
if sf is None:
|
||||
print("Error: persistence engine not available (check config.database).", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
repo = SQLiteUserRepository(sf)
|
||||
|
||||
if email:
|
||||
user = await repo.get_user_by_email(email)
|
||||
else:
|
||||
# Find first admin via direct SELECT — repository does not
|
||||
# expose a "first admin" helper and we do not want to add
|
||||
# one just for this CLI.
|
||||
async with sf() as session:
|
||||
stmt = select(UserRow).where(UserRow.system_role == "admin").limit(1)
|
||||
row = (await session.execute(stmt)).scalar_one_or_none()
|
||||
if row is None:
|
||||
user = None
|
||||
else:
|
||||
user = await repo.get_user_by_id(row.id)
|
||||
|
||||
if user is None:
|
||||
if email:
|
||||
print(f"Error: user '{email}' not found.", file=sys.stderr)
|
||||
else:
|
||||
print("Error: no admin user found.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
new_password = secrets.token_urlsafe(16)
|
||||
user.password_hash = hash_password(new_password)
|
||||
user.token_version += 1
|
||||
user.needs_setup = True
|
||||
await repo.update_user(user)
|
||||
|
||||
cred_path = write_initial_credentials(user.email, new_password, label="reset")
|
||||
print(f"Password reset for: {user.email}")
|
||||
print(f"Credentials written to: {cred_path} (mode 0600)")
|
||||
print("Next login will require setup (new email + password).")
|
||||
return 0
|
||||
finally:
|
||||
await close_engine()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Reset admin password")
|
||||
parser.add_argument("--email", help="Admin email (default: first admin found)")
|
||||
args = parser.parse_args()
|
||||
|
||||
exit_code = asyncio.run(_run(args.email))
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
126
backend/app/gateway/auth_middleware.py
Normal file
126
backend/app/gateway/auth_middleware.py
Normal file
@ -0,0 +1,126 @@
|
||||
"""Global authentication middleware — fail-closed safety net.
|
||||
|
||||
Rejects unauthenticated requests to non-public paths with 401. When a
|
||||
request passes the cookie check, resolves the JWT payload to a real
|
||||
``User`` object and stamps it into both ``request.state.user`` and the
|
||||
``deerflow.runtime.user_context`` contextvar so that repository-layer
|
||||
owner filtering works automatically via the sentinel pattern.
|
||||
|
||||
Fine-grained permission checks remain in authz.py decorators.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
|
||||
from fastapi import HTTPException, Request, Response
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
from starlette.responses import JSONResponse
|
||||
from starlette.types import ASGIApp
|
||||
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse
|
||||
from app.gateway.authz import _ALL_PERMISSIONS, AuthContext
|
||||
from app.gateway.internal_auth import INTERNAL_AUTH_HEADER_NAME, get_internal_user, is_valid_internal_auth_token
|
||||
from deerflow.runtime.user_context import reset_current_user, set_current_user
|
||||
|
||||
# Paths that never require authentication.
|
||||
_PUBLIC_PATH_PREFIXES: tuple[str, ...] = (
|
||||
"/health",
|
||||
"/docs",
|
||||
"/redoc",
|
||||
"/openapi.json",
|
||||
)
|
||||
|
||||
# Exact auth paths that are public (login/register/status check).
|
||||
# /api/v1/auth/me, /api/v1/auth/change-password etc. are NOT public.
|
||||
_PUBLIC_EXACT_PATHS: frozenset[str] = frozenset(
|
||||
{
|
||||
"/api/v1/auth/login/local",
|
||||
"/api/v1/auth/register",
|
||||
"/api/v1/auth/logout",
|
||||
"/api/v1/auth/setup-status",
|
||||
"/api/v1/auth/initialize",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _is_public(path: str) -> bool:
|
||||
stripped = path.rstrip("/")
|
||||
if stripped in _PUBLIC_EXACT_PATHS:
|
||||
return True
|
||||
return any(path.startswith(prefix) for prefix in _PUBLIC_PATH_PREFIXES)
|
||||
|
||||
|
||||
class AuthMiddleware(BaseHTTPMiddleware):
|
||||
"""Strict auth gate: reject requests without a valid session.
|
||||
|
||||
Two-stage check for non-public paths:
|
||||
|
||||
1. Cookie presence — return 401 NOT_AUTHENTICATED if missing
|
||||
2. JWT validation via ``get_optional_user_from_request`` — return 401
|
||||
TOKEN_INVALID if the token is absent, malformed, expired, or the
|
||||
signed user does not exist / is stale
|
||||
|
||||
On success, stamps ``request.state.user`` and the
|
||||
``deerflow.runtime.user_context`` contextvar so that repository-layer
|
||||
owner filters work downstream without every route needing a
|
||||
``@require_auth`` decorator. Routes that need per-resource
|
||||
authorization (e.g. "user A cannot read user B's thread by guessing
|
||||
the URL") should additionally use ``@require_permission(...,
|
||||
owner_check=True)`` for explicit enforcement — but authentication
|
||||
itself is fully handled here.
|
||||
"""
|
||||
|
||||
def __init__(self, app: ASGIApp) -> None:
|
||||
super().__init__(app)
|
||||
|
||||
async def dispatch(self, request: Request, call_next: Callable) -> Response:
|
||||
if _is_public(request.url.path):
|
||||
return await call_next(request)
|
||||
|
||||
internal_user = None
|
||||
if is_valid_internal_auth_token(request.headers.get(INTERNAL_AUTH_HEADER_NAME)):
|
||||
internal_user = get_internal_user()
|
||||
|
||||
# Non-public path: require session cookie
|
||||
if internal_user is None and not request.cookies.get("access_token"):
|
||||
return JSONResponse(
|
||||
status_code=401,
|
||||
content={
|
||||
"detail": AuthErrorResponse(
|
||||
code=AuthErrorCode.NOT_AUTHENTICATED,
|
||||
message="Authentication required",
|
||||
).model_dump()
|
||||
},
|
||||
)
|
||||
|
||||
# Strict JWT validation: reject junk/expired tokens with 401
|
||||
# right here instead of silently passing through. This closes
|
||||
# the "junk cookie bypass" gap (AUTH_TEST_PLAN test 7.5.8):
|
||||
# without this, non-isolation routes like /api/models would
|
||||
# accept any cookie-shaped string as authentication.
|
||||
#
|
||||
# We call the *strict* resolver so that fine-grained error
|
||||
# codes (token_expired, token_invalid, user_not_found, …)
|
||||
# propagate from AuthErrorCode, not get flattened into one
|
||||
# generic code. BaseHTTPMiddleware doesn't let HTTPException
|
||||
# bubble up, so we catch and render it as JSONResponse here.
|
||||
from app.gateway.deps import get_current_user_from_request
|
||||
|
||||
if internal_user is not None:
|
||||
user = internal_user
|
||||
else:
|
||||
try:
|
||||
user = await get_current_user_from_request(request)
|
||||
except HTTPException as exc:
|
||||
return JSONResponse(status_code=exc.status_code, content={"detail": exc.detail})
|
||||
|
||||
# Stamp both request.state.user (for the contextvar pattern)
|
||||
# and request.state.auth (so @require_permission's "auth is
|
||||
# None" branch short-circuits instead of running the entire
|
||||
# JWT-decode + DB-lookup pipeline a second time per request).
|
||||
request.state.user = user
|
||||
request.state.auth = AuthContext(user=user, permissions=_ALL_PERMISSIONS)
|
||||
token = set_current_user(user)
|
||||
try:
|
||||
return await call_next(request)
|
||||
finally:
|
||||
reset_current_user(token)
|
||||
301
backend/app/gateway/authz.py
Normal file
301
backend/app/gateway/authz.py
Normal file
@ -0,0 +1,301 @@
|
||||
"""Authorization decorators and context for DeerFlow.
|
||||
|
||||
Inspired by LangGraph Auth system: https://github.com/langchain-ai/langgraph/blob/main/libs/sdk-py/langgraph_sdk/auth/__init__.py
|
||||
|
||||
**Usage:**
|
||||
|
||||
1. Use ``@require_auth`` on routes that need authentication
|
||||
2. Use ``@require_permission("resource", "action", filter_key=...)`` for permission checks
|
||||
3. The decorator chain processes from bottom to top
|
||||
|
||||
**Example:**
|
||||
|
||||
@router.get("/{thread_id}")
|
||||
@require_auth
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread(thread_id: str, request: Request):
|
||||
# User is authenticated and has threads:read permission
|
||||
...
|
||||
|
||||
**Permission Model:**
|
||||
|
||||
- threads:read - View thread
|
||||
- threads:write - Create/update thread
|
||||
- threads:delete - Delete thread
|
||||
- runs:create - Run agent
|
||||
- runs:read - View run
|
||||
- runs:cancel - Cancel run
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
from collections.abc import Callable
|
||||
from types import SimpleNamespace
|
||||
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.gateway.auth.models import User
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
# Permission constants
|
||||
class Permissions:
|
||||
"""Permission constants for resource:action format."""
|
||||
|
||||
# Threads
|
||||
THREADS_READ = "threads:read"
|
||||
THREADS_WRITE = "threads:write"
|
||||
THREADS_DELETE = "threads:delete"
|
||||
|
||||
# Runs
|
||||
RUNS_CREATE = "runs:create"
|
||||
RUNS_READ = "runs:read"
|
||||
RUNS_CANCEL = "runs:cancel"
|
||||
|
||||
|
||||
class AuthContext:
|
||||
"""Authentication context for the current request.
|
||||
|
||||
Stored in request.state.auth after require_auth decoration.
|
||||
|
||||
Attributes:
|
||||
user: The authenticated user, or None if anonymous
|
||||
permissions: List of permission strings (e.g., "threads:read")
|
||||
"""
|
||||
|
||||
__slots__ = ("user", "permissions")
|
||||
|
||||
def __init__(self, user: User | None = None, permissions: list[str] | None = None):
|
||||
self.user = user
|
||||
self.permissions = permissions or []
|
||||
|
||||
@property
|
||||
def is_authenticated(self) -> bool:
|
||||
"""Check if user is authenticated."""
|
||||
return self.user is not None
|
||||
|
||||
def has_permission(self, resource: str, action: str) -> bool:
|
||||
"""Check if context has permission for resource:action.
|
||||
|
||||
Args:
|
||||
resource: Resource name (e.g., "threads")
|
||||
action: Action name (e.g., "read")
|
||||
|
||||
Returns:
|
||||
True if user has permission
|
||||
"""
|
||||
permission = f"{resource}:{action}"
|
||||
return permission in self.permissions
|
||||
|
||||
def require_user(self) -> User:
|
||||
"""Get user or raise 401.
|
||||
|
||||
Raises:
|
||||
HTTPException 401 if not authenticated
|
||||
"""
|
||||
if not self.user:
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
return self.user
|
||||
|
||||
|
||||
def get_auth_context(request: Request) -> AuthContext | None:
|
||||
"""Get AuthContext from request state."""
|
||||
return getattr(request.state, "auth", None)
|
||||
|
||||
|
||||
_ALL_PERMISSIONS: list[str] = [
|
||||
Permissions.THREADS_READ,
|
||||
Permissions.THREADS_WRITE,
|
||||
Permissions.THREADS_DELETE,
|
||||
Permissions.RUNS_CREATE,
|
||||
Permissions.RUNS_READ,
|
||||
Permissions.RUNS_CANCEL,
|
||||
]
|
||||
|
||||
|
||||
def _make_test_request_stub() -> Any:
|
||||
"""Create a minimal request-like object for direct unit calls.
|
||||
|
||||
Used when decorated route handlers are invoked without FastAPI's
|
||||
request injection. Includes fields accessed by auth helpers.
|
||||
"""
|
||||
return SimpleNamespace(state=SimpleNamespace(), cookies={}, _deerflow_test_bypass_auth=True)
|
||||
|
||||
|
||||
async def _authenticate(request: Request) -> AuthContext:
|
||||
"""Authenticate request and return AuthContext.
|
||||
|
||||
Delegates to deps.get_optional_user_from_request() for the JWT→User pipeline.
|
||||
Returns AuthContext with user=None for anonymous requests.
|
||||
"""
|
||||
from app.gateway.deps import get_optional_user_from_request
|
||||
|
||||
user = await get_optional_user_from_request(request)
|
||||
if user is None:
|
||||
return AuthContext(user=None, permissions=[])
|
||||
|
||||
# In future, permissions could be stored in user record
|
||||
return AuthContext(user=user, permissions=_ALL_PERMISSIONS)
|
||||
|
||||
|
||||
def require_auth[**P, T](func: Callable[P, T]) -> Callable[P, T]:
|
||||
"""Decorator that authenticates the request and enforces authentication.
|
||||
|
||||
Independently raises HTTP 401 for unauthenticated requests, regardless of
|
||||
whether ``AuthMiddleware`` is present in the ASGI stack. Sets the resolved
|
||||
``AuthContext`` on ``request.state.auth`` for downstream handlers.
|
||||
|
||||
Must be placed ABOVE other decorators (executes after them).
|
||||
|
||||
Usage:
|
||||
@router.get("/{thread_id}")
|
||||
@require_auth # Bottom decorator (executes first after permission check)
|
||||
@require_permission("threads", "read")
|
||||
async def get_thread(thread_id: str, request: Request):
|
||||
auth: AuthContext = request.state.auth
|
||||
...
|
||||
|
||||
Raises:
|
||||
HTTPException: 401 if the request is unauthenticated.
|
||||
ValueError: If 'request' parameter is missing.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
async def wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
request = kwargs.get("request")
|
||||
if request is None:
|
||||
# Unit tests may call decorated handlers directly without a
|
||||
# FastAPI Request object. Inject a minimal request stub when
|
||||
# the wrapped function declares `request`.
|
||||
if "request" in inspect.signature(func).parameters:
|
||||
kwargs["request"] = _make_test_request_stub()
|
||||
else:
|
||||
raise ValueError("require_auth decorator requires 'request' parameter")
|
||||
request = kwargs["request"]
|
||||
|
||||
if getattr(request, "_deerflow_test_bypass_auth", False):
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
# Authenticate and set context
|
||||
auth_context = await _authenticate(request)
|
||||
request.state.auth = auth_context
|
||||
|
||||
if not auth_context.is_authenticated:
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def require_permission(
|
||||
resource: str,
|
||||
action: str,
|
||||
owner_check: bool = False,
|
||||
require_existing: bool = False,
|
||||
) -> Callable[[Callable[P, T]], Callable[P, T]]:
|
||||
"""Decorator that checks permission for resource:action.
|
||||
|
||||
Must be used AFTER @require_auth.
|
||||
|
||||
Args:
|
||||
resource: Resource name (e.g., "threads", "runs")
|
||||
action: Action name (e.g., "read", "write", "delete")
|
||||
owner_check: If True, validates that the current user owns the resource.
|
||||
Requires 'thread_id' path parameter and performs ownership check.
|
||||
require_existing: Only meaningful with ``owner_check=True``. If True, a
|
||||
missing ``threads_meta`` row counts as a denial (404)
|
||||
instead of "untracked legacy thread, allow". Use on
|
||||
**destructive / mutating** routes (DELETE, PATCH,
|
||||
state-update) so a deleted thread can't be re-targeted
|
||||
by another user via the missing-row code path.
|
||||
|
||||
Usage:
|
||||
# Read-style: legacy untracked threads are allowed
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread(thread_id: str, request: Request):
|
||||
...
|
||||
|
||||
# Destructive: thread row MUST exist and be owned by caller
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_thread(thread_id: str, request: Request):
|
||||
...
|
||||
|
||||
Raises:
|
||||
HTTPException 401: If authentication required but user is anonymous
|
||||
HTTPException 403: If user lacks permission
|
||||
HTTPException 404: If owner_check=True but user doesn't own the thread
|
||||
ValueError: If owner_check=True but 'thread_id' parameter is missing
|
||||
"""
|
||||
|
||||
def decorator(func: Callable[P, T]) -> Callable[P, T]:
|
||||
@functools.wraps(func)
|
||||
async def wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
request = kwargs.get("request")
|
||||
if request is None:
|
||||
# Unit tests may call decorated route handlers directly without
|
||||
# constructing a FastAPI Request object. Inject a minimal stub
|
||||
# when the wrapped function declares `request`.
|
||||
if "request" in inspect.signature(func).parameters:
|
||||
kwargs["request"] = _make_test_request_stub()
|
||||
else:
|
||||
return await func(*args, **kwargs)
|
||||
request = kwargs["request"]
|
||||
|
||||
if getattr(request, "_deerflow_test_bypass_auth", False):
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
auth: AuthContext = getattr(request.state, "auth", None)
|
||||
if auth is None:
|
||||
auth = await _authenticate(request)
|
||||
request.state.auth = auth
|
||||
|
||||
if not auth.is_authenticated:
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
|
||||
# Check permission
|
||||
if not auth.has_permission(resource, action):
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail=f"Permission denied: {resource}:{action}",
|
||||
)
|
||||
|
||||
# Owner check for thread-specific resources.
|
||||
#
|
||||
# 2.0-rc moved thread metadata into the SQL persistence layer
|
||||
# (``threads_meta`` table). We verify ownership via
|
||||
# ``ThreadMetaStore.check_access``: it returns True for
|
||||
# missing rows (untracked legacy thread) and for rows whose
|
||||
# ``user_id`` is NULL (shared / pre-auth data), so this is
|
||||
# strict-deny rather than strict-allow — only an *existing*
|
||||
# row with a *different* user_id triggers 404.
|
||||
if owner_check:
|
||||
thread_id = kwargs.get("thread_id")
|
||||
if thread_id is None:
|
||||
raise ValueError("require_permission with owner_check=True requires 'thread_id' parameter")
|
||||
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
thread_store = get_thread_store(request)
|
||||
allowed = await thread_store.check_access(
|
||||
thread_id,
|
||||
str(auth.user.id),
|
||||
require_existing=require_existing,
|
||||
)
|
||||
if not allowed:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Thread {thread_id} not found",
|
||||
)
|
||||
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
113
backend/app/gateway/csrf_middleware.py
Normal file
113
backend/app/gateway/csrf_middleware.py
Normal file
@ -0,0 +1,113 @@
|
||||
"""CSRF protection middleware for FastAPI.
|
||||
|
||||
Per RFC-001:
|
||||
State-changing operations require CSRF protection.
|
||||
"""
|
||||
|
||||
import secrets
|
||||
from collections.abc import Callable
|
||||
|
||||
from fastapi import Request, Response
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
from starlette.responses import JSONResponse
|
||||
from starlette.types import ASGIApp
|
||||
|
||||
CSRF_COOKIE_NAME = "csrf_token"
|
||||
CSRF_HEADER_NAME = "X-CSRF-Token"
|
||||
CSRF_TOKEN_LENGTH = 64 # bytes
|
||||
|
||||
|
||||
def is_secure_request(request: Request) -> bool:
|
||||
"""Detect whether the original client request was made over HTTPS."""
|
||||
return request.headers.get("x-forwarded-proto", request.url.scheme) == "https"
|
||||
|
||||
|
||||
def generate_csrf_token() -> str:
|
||||
"""Generate a secure random CSRF token."""
|
||||
return secrets.token_urlsafe(CSRF_TOKEN_LENGTH)
|
||||
|
||||
|
||||
def should_check_csrf(request: Request) -> bool:
|
||||
"""Determine if a request needs CSRF validation.
|
||||
|
||||
CSRF is checked for state-changing methods (POST, PUT, DELETE, PATCH).
|
||||
GET, HEAD, OPTIONS, and TRACE are exempt per RFC 7231.
|
||||
"""
|
||||
if request.method not in ("POST", "PUT", "DELETE", "PATCH"):
|
||||
return False
|
||||
|
||||
path = request.url.path.rstrip("/")
|
||||
# Exempt /api/v1/auth/me endpoint
|
||||
if path == "/api/v1/auth/me":
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
_AUTH_EXEMPT_PATHS: frozenset[str] = frozenset(
|
||||
{
|
||||
"/api/v1/auth/login/local",
|
||||
"/api/v1/auth/logout",
|
||||
"/api/v1/auth/register",
|
||||
"/api/v1/auth/initialize",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def is_auth_endpoint(request: Request) -> bool:
|
||||
"""Check if the request is to an auth endpoint.
|
||||
|
||||
Auth endpoints don't need CSRF validation on first call (no token).
|
||||
"""
|
||||
return request.url.path.rstrip("/") in _AUTH_EXEMPT_PATHS
|
||||
|
||||
|
||||
class CSRFMiddleware(BaseHTTPMiddleware):
|
||||
"""Middleware that implements CSRF protection using Double Submit Cookie pattern."""
|
||||
|
||||
def __init__(self, app: ASGIApp) -> None:
|
||||
super().__init__(app)
|
||||
|
||||
async def dispatch(self, request: Request, call_next: Callable) -> Response:
|
||||
_is_auth = is_auth_endpoint(request)
|
||||
|
||||
if should_check_csrf(request) and not _is_auth:
|
||||
cookie_token = request.cookies.get(CSRF_COOKIE_NAME)
|
||||
header_token = request.headers.get(CSRF_HEADER_NAME)
|
||||
|
||||
if not cookie_token or not header_token:
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
content={"detail": "CSRF token missing. Include X-CSRF-Token header."},
|
||||
)
|
||||
|
||||
if not secrets.compare_digest(cookie_token, header_token):
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
content={"detail": "CSRF token mismatch."},
|
||||
)
|
||||
|
||||
response = await call_next(request)
|
||||
|
||||
# For auth endpoints that set up session, also set CSRF cookie
|
||||
if _is_auth and request.method == "POST":
|
||||
# Generate a new CSRF token for the session
|
||||
csrf_token = generate_csrf_token()
|
||||
is_https = is_secure_request(request)
|
||||
response.set_cookie(
|
||||
key=CSRF_COOKIE_NAME,
|
||||
value=csrf_token,
|
||||
httponly=False, # Must be JS-readable for Double Submit Cookie pattern
|
||||
secure=is_https,
|
||||
samesite="strict",
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def get_csrf_token(request: Request) -> str | None:
|
||||
"""Get the CSRF token from the current request's cookies.
|
||||
|
||||
This is useful for server-side rendering where you need to embed
|
||||
token in forms or headers.
|
||||
"""
|
||||
return request.cookies.get(CSRF_COOKIE_NAME)
|
||||
@ -8,12 +8,34 @@ Initialization is handled directly in ``app.py`` via :class:`AsyncExitStack`.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
from collections.abc import AsyncGenerator, Callable
|
||||
from contextlib import AsyncExitStack, asynccontextmanager
|
||||
from typing import TYPE_CHECKING, TypeVar, cast
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Request
|
||||
from langgraph.types import Checkpointer
|
||||
|
||||
from deerflow.runtime import RunManager, StreamBridge
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.persistence.feedback import FeedbackRepository
|
||||
from deerflow.runtime import RunContext, RunManager, StreamBridge
|
||||
from deerflow.runtime.events.store.base import RunEventStore
|
||||
from deerflow.runtime.runs.store.base import RunStore
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.gateway.auth.local_provider import LocalAuthProvider
|
||||
from app.gateway.auth.repositories.sqlite import SQLiteUserRepository
|
||||
from deerflow.persistence.thread_meta.base import ThreadMetaStore
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def get_config(request: Request) -> AppConfig:
|
||||
"""Return the app-scoped ``AppConfig`` stored on ``app.state``."""
|
||||
config = getattr(request.app.state, "config", None)
|
||||
if config is None:
|
||||
raise HTTPException(status_code=503, detail="Configuration not available")
|
||||
return config
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
@ -25,15 +47,54 @@ async def langgraph_runtime(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
async with langgraph_runtime(app):
|
||||
yield
|
||||
"""
|
||||
from deerflow.agents.checkpointer.async_provider import make_checkpointer
|
||||
from deerflow.persistence.engine import close_engine, get_session_factory, init_engine_from_config
|
||||
from deerflow.runtime import make_store, make_stream_bridge
|
||||
from deerflow.runtime.checkpointer.async_provider import make_checkpointer
|
||||
from deerflow.runtime.events.store import make_run_event_store
|
||||
|
||||
async with AsyncExitStack() as stack:
|
||||
app.state.stream_bridge = await stack.enter_async_context(make_stream_bridge())
|
||||
app.state.checkpointer = await stack.enter_async_context(make_checkpointer())
|
||||
app.state.store = await stack.enter_async_context(make_store())
|
||||
app.state.run_manager = RunManager()
|
||||
yield
|
||||
config = getattr(app.state, "config", None)
|
||||
if config is None:
|
||||
raise RuntimeError("langgraph_runtime() requires app.state.config to be initialized")
|
||||
|
||||
app.state.stream_bridge = await stack.enter_async_context(make_stream_bridge(config))
|
||||
|
||||
# Initialize persistence engine BEFORE checkpointer so that
|
||||
# auto-create-database logic runs first (postgres backend).
|
||||
await init_engine_from_config(config.database)
|
||||
|
||||
app.state.checkpointer = await stack.enter_async_context(make_checkpointer(config))
|
||||
app.state.store = await stack.enter_async_context(make_store(config))
|
||||
|
||||
# Initialize repositories — one get_session_factory() call for all.
|
||||
sf = get_session_factory()
|
||||
if sf is not None:
|
||||
from deerflow.persistence.feedback import FeedbackRepository
|
||||
from deerflow.persistence.run import RunRepository
|
||||
|
||||
app.state.run_store = RunRepository(sf)
|
||||
app.state.feedback_repo = FeedbackRepository(sf)
|
||||
else:
|
||||
from deerflow.runtime.runs.store.memory import MemoryRunStore
|
||||
|
||||
app.state.run_store = MemoryRunStore()
|
||||
app.state.feedback_repo = None
|
||||
|
||||
from deerflow.persistence.thread_meta import make_thread_store
|
||||
|
||||
app.state.thread_store = make_thread_store(sf, app.state.store)
|
||||
|
||||
# Run event store (has its own factory with config-driven backend selection)
|
||||
run_events_config = getattr(config, "run_events", None)
|
||||
app.state.run_event_store = make_run_event_store(run_events_config)
|
||||
|
||||
# RunManager with store backing for persistence
|
||||
app.state.run_manager = RunManager(store=app.state.run_store)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
await close_engine()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@ -41,30 +102,144 @@ async def langgraph_runtime(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_stream_bridge(request: Request) -> StreamBridge:
|
||||
"""Return the global :class:`StreamBridge`, or 503."""
|
||||
bridge = getattr(request.app.state, "stream_bridge", None)
|
||||
if bridge is None:
|
||||
raise HTTPException(status_code=503, detail="Stream bridge not available")
|
||||
return bridge
|
||||
def _require(attr: str, label: str) -> Callable[[Request], T]:
|
||||
"""Create a FastAPI dependency that returns ``app.state.<attr>`` or 503."""
|
||||
|
||||
def dep(request: Request) -> T:
|
||||
val = getattr(request.app.state, attr, None)
|
||||
if val is None:
|
||||
raise HTTPException(status_code=503, detail=f"{label} not available")
|
||||
return cast(T, val)
|
||||
|
||||
dep.__name__ = dep.__qualname__ = f"get_{attr}"
|
||||
return dep
|
||||
|
||||
|
||||
def get_run_manager(request: Request) -> RunManager:
|
||||
"""Return the global :class:`RunManager`, or 503."""
|
||||
mgr = getattr(request.app.state, "run_manager", None)
|
||||
if mgr is None:
|
||||
raise HTTPException(status_code=503, detail="Run manager not available")
|
||||
return mgr
|
||||
|
||||
|
||||
def get_checkpointer(request: Request):
|
||||
"""Return the global checkpointer, or 503."""
|
||||
cp = getattr(request.app.state, "checkpointer", None)
|
||||
if cp is None:
|
||||
raise HTTPException(status_code=503, detail="Checkpointer not available")
|
||||
return cp
|
||||
get_stream_bridge: Callable[[Request], StreamBridge] = _require("stream_bridge", "Stream bridge")
|
||||
get_run_manager: Callable[[Request], RunManager] = _require("run_manager", "Run manager")
|
||||
get_checkpointer: Callable[[Request], Checkpointer] = _require("checkpointer", "Checkpointer")
|
||||
get_run_event_store: Callable[[Request], RunEventStore] = _require("run_event_store", "Run event store")
|
||||
get_feedback_repo: Callable[[Request], FeedbackRepository] = _require("feedback_repo", "Feedback")
|
||||
get_run_store: Callable[[Request], RunStore] = _require("run_store", "Run store")
|
||||
|
||||
|
||||
def get_store(request: Request):
|
||||
"""Return the global store (may be ``None`` if not configured)."""
|
||||
return getattr(request.app.state, "store", None)
|
||||
|
||||
|
||||
def get_thread_store(request: Request) -> ThreadMetaStore:
|
||||
"""Return the thread metadata store (SQL or memory-backed)."""
|
||||
val = getattr(request.app.state, "thread_store", None)
|
||||
if val is None:
|
||||
raise HTTPException(status_code=503, detail="Thread metadata store not available")
|
||||
return val
|
||||
|
||||
|
||||
def get_run_context(request: Request) -> RunContext:
|
||||
"""Build a :class:`RunContext` from ``app.state`` singletons.
|
||||
|
||||
Returns a *base* context with infrastructure dependencies.
|
||||
"""
|
||||
config = get_config(request)
|
||||
return RunContext(
|
||||
checkpointer=get_checkpointer(request),
|
||||
store=get_store(request),
|
||||
event_store=get_run_event_store(request),
|
||||
run_events_config=getattr(config, "run_events", None),
|
||||
thread_store=get_thread_store(request),
|
||||
app_config=config,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Auth helpers (used by authz.py and auth middleware)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Cached singletons to avoid repeated instantiation per request
|
||||
_cached_local_provider: LocalAuthProvider | None = None
|
||||
_cached_repo: SQLiteUserRepository | None = None
|
||||
|
||||
|
||||
def get_local_provider() -> LocalAuthProvider:
|
||||
"""Get or create the cached LocalAuthProvider singleton.
|
||||
|
||||
Must be called after ``init_engine_from_config()`` — the shared
|
||||
session factory is required to construct the user repository.
|
||||
"""
|
||||
global _cached_local_provider, _cached_repo
|
||||
if _cached_repo is None:
|
||||
from app.gateway.auth.repositories.sqlite import SQLiteUserRepository
|
||||
from deerflow.persistence.engine import get_session_factory
|
||||
|
||||
sf = get_session_factory()
|
||||
if sf is None:
|
||||
raise RuntimeError("get_local_provider() called before init_engine_from_config(); cannot access users table")
|
||||
_cached_repo = SQLiteUserRepository(sf)
|
||||
if _cached_local_provider is None:
|
||||
from app.gateway.auth.local_provider import LocalAuthProvider
|
||||
|
||||
_cached_local_provider = LocalAuthProvider(repository=_cached_repo)
|
||||
return _cached_local_provider
|
||||
|
||||
|
||||
async def get_current_user_from_request(request: Request):
|
||||
"""Get the current authenticated user from the request cookie.
|
||||
|
||||
Raises HTTPException 401 if not authenticated.
|
||||
"""
|
||||
from app.gateway.auth import decode_token
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse, TokenError, token_error_to_code
|
||||
|
||||
access_token = request.cookies.get("access_token")
|
||||
if not access_token:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.NOT_AUTHENTICATED, message="Not authenticated").model_dump(),
|
||||
)
|
||||
|
||||
payload = decode_token(access_token)
|
||||
if isinstance(payload, TokenError):
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=token_error_to_code(payload), message=f"Token error: {payload.value}").model_dump(),
|
||||
)
|
||||
|
||||
provider = get_local_provider()
|
||||
user = await provider.get_user(payload.sub)
|
||||
if user is None:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.USER_NOT_FOUND, message="User not found").model_dump(),
|
||||
)
|
||||
|
||||
# Token version mismatch → password was changed, token is stale
|
||||
if user.token_version != payload.ver:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.TOKEN_INVALID, message="Token revoked (password changed)").model_dump(),
|
||||
)
|
||||
|
||||
return user
|
||||
|
||||
|
||||
async def get_optional_user_from_request(request: Request):
|
||||
"""Get optional authenticated user from request.
|
||||
|
||||
Returns None if not authenticated.
|
||||
"""
|
||||
try:
|
||||
return await get_current_user_from_request(request)
|
||||
except HTTPException:
|
||||
return None
|
||||
|
||||
|
||||
async def get_current_user(request: Request) -> str | None:
|
||||
"""Extract user_id from request cookie, or None if not authenticated.
|
||||
|
||||
Thin adapter that returns the string id for callers that only need
|
||||
identification (e.g., ``feedback.py``). Full-user callers should use
|
||||
``get_current_user_from_request`` or ``get_optional_user_from_request``.
|
||||
"""
|
||||
user = await get_optional_user_from_request(request)
|
||||
return str(user.id) if user else None
|
||||
|
||||
26
backend/app/gateway/internal_auth.py
Normal file
26
backend/app/gateway/internal_auth.py
Normal file
@ -0,0 +1,26 @@
|
||||
"""Process-local authentication for Gateway internal callers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import secrets
|
||||
from types import SimpleNamespace
|
||||
|
||||
from deerflow.runtime.user_context import DEFAULT_USER_ID
|
||||
|
||||
INTERNAL_AUTH_HEADER_NAME = "X-DeerFlow-Internal-Token"
|
||||
_INTERNAL_AUTH_TOKEN = secrets.token_urlsafe(32)
|
||||
|
||||
|
||||
def create_internal_auth_headers() -> dict[str, str]:
|
||||
"""Return headers that authenticate same-process Gateway internal calls."""
|
||||
return {INTERNAL_AUTH_HEADER_NAME: _INTERNAL_AUTH_TOKEN}
|
||||
|
||||
|
||||
def is_valid_internal_auth_token(token: str | None) -> bool:
|
||||
"""Return True when *token* matches the process-local internal token."""
|
||||
return bool(token) and secrets.compare_digest(token, _INTERNAL_AUTH_TOKEN)
|
||||
|
||||
|
||||
def get_internal_user():
|
||||
"""Return the synthetic user used for trusted internal channel calls."""
|
||||
return SimpleNamespace(id=DEFAULT_USER_ID, system_role="internal")
|
||||
106
backend/app/gateway/langgraph_auth.py
Normal file
106
backend/app/gateway/langgraph_auth.py
Normal file
@ -0,0 +1,106 @@
|
||||
"""LangGraph Server auth handler — shares JWT logic with Gateway.
|
||||
|
||||
Loaded by LangGraph Server via langgraph.json ``auth.path``.
|
||||
Reuses the same ``decode_token`` / ``get_auth_config`` as Gateway,
|
||||
so both modes validate tokens with the same secret and rules.
|
||||
|
||||
Two layers:
|
||||
1. @auth.authenticate — validates JWT cookie, extracts user_id,
|
||||
and enforces CSRF on state-changing methods (POST/PUT/DELETE/PATCH)
|
||||
2. @auth.on — returns metadata filter so each user only sees own threads
|
||||
"""
|
||||
|
||||
import secrets
|
||||
|
||||
from langgraph_sdk import Auth
|
||||
|
||||
from app.gateway.auth.errors import TokenError
|
||||
from app.gateway.auth.jwt import decode_token
|
||||
from app.gateway.deps import get_local_provider
|
||||
|
||||
auth = Auth()
|
||||
|
||||
# Methods that require CSRF validation (state-changing per RFC 7231).
|
||||
_CSRF_METHODS = frozenset({"POST", "PUT", "DELETE", "PATCH"})
|
||||
|
||||
|
||||
def _check_csrf(request) -> None:
|
||||
"""Enforce Double Submit Cookie CSRF check for state-changing requests.
|
||||
|
||||
Mirrors Gateway's CSRFMiddleware logic so that LangGraph routes
|
||||
proxied directly by nginx have the same CSRF protection.
|
||||
"""
|
||||
method = getattr(request, "method", "") or ""
|
||||
if method.upper() not in _CSRF_METHODS:
|
||||
return
|
||||
|
||||
cookie_token = request.cookies.get("csrf_token")
|
||||
header_token = request.headers.get("x-csrf-token")
|
||||
|
||||
if not cookie_token or not header_token:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=403,
|
||||
detail="CSRF token missing. Include X-CSRF-Token header.",
|
||||
)
|
||||
|
||||
if not secrets.compare_digest(cookie_token, header_token):
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=403,
|
||||
detail="CSRF token mismatch.",
|
||||
)
|
||||
|
||||
|
||||
@auth.authenticate
|
||||
async def authenticate(request):
|
||||
"""Validate the session cookie, decode JWT, and check token_version.
|
||||
|
||||
Same validation chain as Gateway's get_current_user_from_request:
|
||||
cookie → decode JWT → DB lookup → token_version match
|
||||
Also enforces CSRF on state-changing methods.
|
||||
"""
|
||||
# CSRF check before authentication so forged cross-site requests
|
||||
# are rejected early, even if the cookie carries a valid JWT.
|
||||
_check_csrf(request)
|
||||
|
||||
token = request.cookies.get("access_token")
|
||||
if not token:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail="Not authenticated",
|
||||
)
|
||||
|
||||
payload = decode_token(token)
|
||||
if isinstance(payload, TokenError):
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail="Invalid token",
|
||||
)
|
||||
|
||||
user = await get_local_provider().get_user(payload.sub)
|
||||
if user is None:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail="User not found",
|
||||
)
|
||||
if user.token_version != payload.ver:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail="Token revoked (password changed)",
|
||||
)
|
||||
|
||||
return payload.sub
|
||||
|
||||
|
||||
@auth.on
|
||||
async def add_owner_filter(ctx: Auth.types.AuthContext, value: dict):
|
||||
"""Inject user_id metadata on writes; filter by user_id on reads.
|
||||
|
||||
Gateway stores thread ownership as ``metadata.user_id``.
|
||||
This handler ensures LangGraph Server enforces the same isolation.
|
||||
"""
|
||||
# On create/update: stamp user_id into metadata
|
||||
metadata = value.setdefault("metadata", {})
|
||||
metadata["user_id"] = ctx.user.identity
|
||||
|
||||
# Return filter dict — LangGraph applies it to search/read/delete
|
||||
return {"user_id": ctx.user.identity}
|
||||
@ -5,6 +5,7 @@ from pathlib import Path
|
||||
from fastapi import HTTPException
|
||||
|
||||
from deerflow.config.paths import get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
|
||||
def resolve_thread_virtual_path(thread_id: str, virtual_path: str) -> Path:
|
||||
@ -22,7 +23,7 @@ def resolve_thread_virtual_path(thread_id: str, virtual_path: str) -> Path:
|
||||
HTTPException: If the path is invalid or outside allowed directories.
|
||||
"""
|
||||
try:
|
||||
return get_paths().resolve_virtual_path(thread_id, virtual_path)
|
||||
return get_paths().resolve_virtual_path(thread_id, virtual_path, user_id=get_effective_user_id())
|
||||
except ValueError as e:
|
||||
status = 403 if "traversal" in str(e) else 400
|
||||
raise HTTPException(status_code=status, detail=str(e))
|
||||
|
||||
@ -7,6 +7,7 @@ from urllib.parse import quote
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from fastapi.responses import FileResponse, PlainTextResponse, Response
|
||||
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.path_utils import resolve_thread_virtual_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -81,6 +82,7 @@ def _extract_file_from_skill_archive(zip_path: Path, internal_path: str) -> byte
|
||||
summary="Get Artifact File",
|
||||
description="Retrieve an artifact file generated by the AI agent. Text and binary files can be viewed inline, while active web content is always downloaded.",
|
||||
)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_artifact(thread_id: str, path: str, request: Request, download: bool = False) -> Response:
|
||||
"""Get an artifact file by its path.
|
||||
|
||||
|
||||
493
backend/app/gateway/routers/auth.py
Normal file
493
backend/app/gateway/routers/auth.py
Normal file
@ -0,0 +1,493 @@
|
||||
"""Authentication endpoints."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from ipaddress import ip_address, ip_network
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
|
||||
from fastapi.security import OAuth2PasswordRequestForm
|
||||
from pydantic import BaseModel, EmailStr, Field, field_validator
|
||||
|
||||
from app.gateway.auth import (
|
||||
UserResponse,
|
||||
create_access_token,
|
||||
)
|
||||
from app.gateway.auth.config import get_auth_config
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse
|
||||
from app.gateway.csrf_middleware import is_secure_request
|
||||
from app.gateway.deps import get_current_user_from_request, get_local_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/v1/auth", tags=["auth"])
|
||||
|
||||
|
||||
# ── Request/Response Models ──────────────────────────────────────────────
|
||||
|
||||
|
||||
class LoginResponse(BaseModel):
|
||||
"""Response model for login — token only lives in HttpOnly cookie."""
|
||||
|
||||
expires_in: int # seconds
|
||||
needs_setup: bool = False
|
||||
|
||||
|
||||
# Top common-password blocklist. Drawn from the public SecLists "10k worst
|
||||
# passwords" set, lowercased + length>=8 only (shorter ones already fail
|
||||
# the min_length check). Kept tight on purpose: this is the **lower bound**
|
||||
# defense, not a full HIBP / passlib check, and runs in-process per request.
|
||||
_COMMON_PASSWORDS: frozenset[str] = frozenset(
|
||||
{
|
||||
"password",
|
||||
"password1",
|
||||
"password12",
|
||||
"password123",
|
||||
"password1234",
|
||||
"12345678",
|
||||
"123456789",
|
||||
"1234567890",
|
||||
"qwerty12",
|
||||
"qwertyui",
|
||||
"qwerty123",
|
||||
"abc12345",
|
||||
"abcd1234",
|
||||
"iloveyou",
|
||||
"letmein1",
|
||||
"welcome1",
|
||||
"welcome123",
|
||||
"admin123",
|
||||
"administrator",
|
||||
"passw0rd",
|
||||
"p@ssw0rd",
|
||||
"monkey12",
|
||||
"trustno1",
|
||||
"sunshine",
|
||||
"princess",
|
||||
"football",
|
||||
"baseball",
|
||||
"superman",
|
||||
"batman123",
|
||||
"starwars",
|
||||
"dragon123",
|
||||
"master123",
|
||||
"shadow12",
|
||||
"michael1",
|
||||
"jennifer",
|
||||
"computer",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _password_is_common(password: str) -> bool:
|
||||
"""Case-insensitive blocklist check.
|
||||
|
||||
Lowercases the input so trivial mutations like ``Password`` /
|
||||
``PASSWORD`` are also rejected. Does not normalize digit substitutions
|
||||
(``p@ssw0rd`` is included as a literal entry instead) — keeping the
|
||||
rule cheap and predictable.
|
||||
"""
|
||||
return password.lower() in _COMMON_PASSWORDS
|
||||
|
||||
|
||||
def _validate_strong_password(value: str) -> str:
|
||||
"""Pydantic field-validator body shared by Register + ChangePassword.
|
||||
|
||||
Constraint = function, not type-level mixin. The two request models
|
||||
have no "is-a" relationship; they only share the password-strength
|
||||
rule. Lifting it into a free function lets each model bind it via
|
||||
``@field_validator(field_name)`` without inheritance gymnastics.
|
||||
"""
|
||||
if _password_is_common(value):
|
||||
raise ValueError("Password is too common; choose a stronger password.")
|
||||
return value
|
||||
|
||||
|
||||
class RegisterRequest(BaseModel):
|
||||
"""Request model for user registration."""
|
||||
|
||||
email: EmailStr
|
||||
password: str = Field(..., min_length=8)
|
||||
|
||||
_strong_password = field_validator("password")(classmethod(lambda cls, v: _validate_strong_password(v)))
|
||||
|
||||
|
||||
class ChangePasswordRequest(BaseModel):
|
||||
"""Request model for password change (also handles setup flow)."""
|
||||
|
||||
current_password: str
|
||||
new_password: str = Field(..., min_length=8)
|
||||
new_email: EmailStr | None = None
|
||||
|
||||
_strong_password = field_validator("new_password")(classmethod(lambda cls, v: _validate_strong_password(v)))
|
||||
|
||||
|
||||
class MessageResponse(BaseModel):
|
||||
"""Generic message response."""
|
||||
|
||||
message: str
|
||||
|
||||
|
||||
# ── Helpers ───────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _set_session_cookie(response: Response, token: str, request: Request) -> None:
|
||||
"""Set the access_token HttpOnly cookie on the response."""
|
||||
config = get_auth_config()
|
||||
is_https = is_secure_request(request)
|
||||
response.set_cookie(
|
||||
key="access_token",
|
||||
value=token,
|
||||
httponly=True,
|
||||
secure=is_https,
|
||||
samesite="lax",
|
||||
max_age=config.token_expiry_days * 24 * 3600 if is_https else None,
|
||||
)
|
||||
|
||||
|
||||
# ── Rate Limiting ────────────────────────────────────────────────────────
|
||||
# In-process dict — not shared across workers.
|
||||
#
|
||||
# **Limitation**: with multi-worker deployments (e.g., gunicorn -w N), each
|
||||
# worker maintains its own lockout table, so an attacker effectively gets
|
||||
# N × _MAX_LOGIN_ATTEMPTS guesses before being locked out everywhere. For
|
||||
# production multi-worker setups, replace this with a shared store (Redis,
|
||||
# database-backed counter) to enforce a true per-IP limit.
|
||||
|
||||
_MAX_LOGIN_ATTEMPTS = 5
|
||||
_LOCKOUT_SECONDS = 300 # 5 minutes
|
||||
|
||||
# ip → (fail_count, lock_until_timestamp)
|
||||
_login_attempts: dict[str, tuple[int, float]] = {}
|
||||
|
||||
|
||||
def _trusted_proxies() -> list:
|
||||
"""Parse ``AUTH_TRUSTED_PROXIES`` env var into a list of ip_network objects.
|
||||
|
||||
Comma-separated CIDR or single-IP entries. Empty / unset = no proxy is
|
||||
trusted (direct mode). Invalid entries are skipped with a logger warning.
|
||||
Read live so env-var overrides take effect immediately and tests can
|
||||
``monkeypatch.setenv`` without poking a module-level cache.
|
||||
"""
|
||||
raw = os.getenv("AUTH_TRUSTED_PROXIES", "").strip()
|
||||
if not raw:
|
||||
return []
|
||||
nets = []
|
||||
for entry in raw.split(","):
|
||||
entry = entry.strip()
|
||||
if not entry:
|
||||
continue
|
||||
try:
|
||||
nets.append(ip_network(entry, strict=False))
|
||||
except ValueError:
|
||||
logger.warning("AUTH_TRUSTED_PROXIES: ignoring invalid entry %r", entry)
|
||||
return nets
|
||||
|
||||
|
||||
def _get_client_ip(request: Request) -> str:
|
||||
"""Extract the real client IP for rate limiting.
|
||||
|
||||
Trust model:
|
||||
|
||||
- The TCP peer (``request.client.host``) is always the baseline. It is
|
||||
whatever the kernel reports as the connecting socket — unforgeable
|
||||
by the client itself.
|
||||
- ``X-Real-IP`` is **only** honored if the TCP peer is in the
|
||||
``AUTH_TRUSTED_PROXIES`` allowlist (set via env var, comma-separated
|
||||
CIDR or single IPs). When set, the gateway is assumed to be behind a
|
||||
reverse proxy (nginx, Cloudflare, ALB, …) that overwrites
|
||||
``X-Real-IP`` with the original client address.
|
||||
- With no ``AUTH_TRUSTED_PROXIES`` set, ``X-Real-IP`` is silently
|
||||
ignored — closing the bypass where any client could rotate the
|
||||
header to dodge per-IP rate limits in dev / direct-gateway mode.
|
||||
|
||||
``X-Forwarded-For`` is intentionally NOT used because it is naturally
|
||||
client-controlled at the *first* hop and the trust chain is harder to
|
||||
audit per-request.
|
||||
"""
|
||||
peer_host = request.client.host if request.client else None
|
||||
|
||||
trusted = _trusted_proxies()
|
||||
if trusted and peer_host:
|
||||
try:
|
||||
peer_ip = ip_address(peer_host)
|
||||
if any(peer_ip in net for net in trusted):
|
||||
real_ip = request.headers.get("x-real-ip", "").strip()
|
||||
if real_ip:
|
||||
return real_ip
|
||||
except ValueError:
|
||||
# peer_host wasn't a parseable IP (e.g. "unknown") — fall through
|
||||
pass
|
||||
|
||||
return peer_host or "unknown"
|
||||
|
||||
|
||||
def _check_rate_limit(ip: str) -> None:
|
||||
"""Raise 429 if the IP is currently locked out."""
|
||||
record = _login_attempts.get(ip)
|
||||
if record is None:
|
||||
return
|
||||
fail_count, lock_until = record
|
||||
if fail_count >= _MAX_LOGIN_ATTEMPTS:
|
||||
if time.time() < lock_until:
|
||||
raise HTTPException(
|
||||
status_code=429,
|
||||
detail="Too many login attempts. Try again later.",
|
||||
)
|
||||
del _login_attempts[ip]
|
||||
|
||||
|
||||
_MAX_TRACKED_IPS = 10000
|
||||
|
||||
|
||||
def _record_login_failure(ip: str) -> None:
|
||||
"""Record a failed login attempt for the given IP."""
|
||||
# Evict expired lockouts when dict grows too large
|
||||
if len(_login_attempts) >= _MAX_TRACKED_IPS:
|
||||
now = time.time()
|
||||
expired = [k for k, (c, t) in _login_attempts.items() if c >= _MAX_LOGIN_ATTEMPTS and now >= t]
|
||||
for k in expired:
|
||||
del _login_attempts[k]
|
||||
# If still too large, evict cheapest-to-lose half: below-threshold
|
||||
# IPs (lock_until=0.0) sort first, then earliest-expiring lockouts.
|
||||
if len(_login_attempts) >= _MAX_TRACKED_IPS:
|
||||
by_time = sorted(_login_attempts.items(), key=lambda kv: kv[1][1])
|
||||
for k, _ in by_time[: len(by_time) // 2]:
|
||||
del _login_attempts[k]
|
||||
|
||||
record = _login_attempts.get(ip)
|
||||
if record is None:
|
||||
_login_attempts[ip] = (1, 0.0)
|
||||
else:
|
||||
new_count = record[0] + 1
|
||||
lock_until = time.time() + _LOCKOUT_SECONDS if new_count >= _MAX_LOGIN_ATTEMPTS else 0.0
|
||||
_login_attempts[ip] = (new_count, lock_until)
|
||||
|
||||
|
||||
def _record_login_success(ip: str) -> None:
|
||||
"""Clear failure counter for the given IP on successful login."""
|
||||
_login_attempts.pop(ip, None)
|
||||
|
||||
|
||||
# ── Endpoints ─────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@router.post("/login/local", response_model=LoginResponse)
|
||||
async def login_local(
|
||||
request: Request,
|
||||
response: Response,
|
||||
form_data: OAuth2PasswordRequestForm = Depends(),
|
||||
):
|
||||
"""Local email/password login."""
|
||||
client_ip = _get_client_ip(request)
|
||||
_check_rate_limit(client_ip)
|
||||
|
||||
user = await get_local_provider().authenticate({"email": form_data.username, "password": form_data.password})
|
||||
|
||||
if user is None:
|
||||
_record_login_failure(client_ip)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.INVALID_CREDENTIALS, message="Incorrect email or password").model_dump(),
|
||||
)
|
||||
|
||||
_record_login_success(client_ip)
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return LoginResponse(
|
||||
expires_in=get_auth_config().token_expiry_days * 24 * 3600,
|
||||
needs_setup=user.needs_setup,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/register", response_model=UserResponse, status_code=status.HTTP_201_CREATED)
|
||||
async def register(request: Request, response: Response, body: RegisterRequest):
|
||||
"""Register a new user account (always 'user' role).
|
||||
|
||||
Admin is auto-created on first boot. This endpoint creates regular users.
|
||||
Auto-login by setting the session cookie.
|
||||
"""
|
||||
try:
|
||||
user = await get_local_provider().create_user(email=body.email, password=body.password, system_role="user")
|
||||
except ValueError:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.EMAIL_ALREADY_EXISTS, message="Email already registered").model_dump(),
|
||||
)
|
||||
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return UserResponse(id=str(user.id), email=user.email, system_role=user.system_role)
|
||||
|
||||
|
||||
@router.post("/logout", response_model=MessageResponse)
|
||||
async def logout(request: Request, response: Response):
|
||||
"""Logout current user by clearing the cookie."""
|
||||
response.delete_cookie(key="access_token", secure=is_secure_request(request), samesite="lax")
|
||||
return MessageResponse(message="Successfully logged out")
|
||||
|
||||
|
||||
@router.post("/change-password", response_model=MessageResponse)
|
||||
async def change_password(request: Request, response: Response, body: ChangePasswordRequest):
|
||||
"""Change password for the currently authenticated user.
|
||||
|
||||
Also handles the first-boot setup flow:
|
||||
- If new_email is provided, updates email (checks uniqueness)
|
||||
- If user.needs_setup is True and new_email is given, clears needs_setup
|
||||
- Always increments token_version to invalidate old sessions
|
||||
- Re-issues session cookie with new token_version
|
||||
"""
|
||||
from app.gateway.auth.password import hash_password_async, verify_password_async
|
||||
|
||||
user = await get_current_user_from_request(request)
|
||||
|
||||
if user.password_hash is None:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=AuthErrorResponse(code=AuthErrorCode.INVALID_CREDENTIALS, message="OAuth users cannot change password").model_dump())
|
||||
|
||||
if not await verify_password_async(body.current_password, user.password_hash):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=AuthErrorResponse(code=AuthErrorCode.INVALID_CREDENTIALS, message="Current password is incorrect").model_dump())
|
||||
|
||||
provider = get_local_provider()
|
||||
|
||||
# Update email if provided
|
||||
if body.new_email is not None:
|
||||
existing = await provider.get_user_by_email(body.new_email)
|
||||
if existing and str(existing.id) != str(user.id):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=AuthErrorResponse(code=AuthErrorCode.EMAIL_ALREADY_EXISTS, message="Email already in use").model_dump())
|
||||
user.email = body.new_email
|
||||
|
||||
# Update password + bump version
|
||||
user.password_hash = await hash_password_async(body.new_password)
|
||||
user.token_version += 1
|
||||
|
||||
# Clear setup flag if this is the setup flow
|
||||
if user.needs_setup and body.new_email is not None:
|
||||
user.needs_setup = False
|
||||
|
||||
await provider.update_user(user)
|
||||
|
||||
# Re-issue cookie with new token_version
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return MessageResponse(message="Password changed successfully")
|
||||
|
||||
|
||||
@router.get("/me", response_model=UserResponse)
|
||||
async def get_me(request: Request):
|
||||
"""Get current authenticated user info."""
|
||||
user = await get_current_user_from_request(request)
|
||||
return UserResponse(id=str(user.id), email=user.email, system_role=user.system_role, needs_setup=user.needs_setup)
|
||||
|
||||
|
||||
_SETUP_STATUS_COOLDOWN: dict[str, float] = {}
|
||||
_SETUP_STATUS_COOLDOWN_SECONDS = 60
|
||||
_MAX_TRACKED_SETUP_STATUS_IPS = 10000
|
||||
|
||||
|
||||
@router.get("/setup-status")
|
||||
async def setup_status(request: Request):
|
||||
"""Check if an admin account exists. Returns needs_setup=True when no admin exists."""
|
||||
client_ip = _get_client_ip(request)
|
||||
now = time.time()
|
||||
last_check = _SETUP_STATUS_COOLDOWN.get(client_ip, 0)
|
||||
elapsed = now - last_check
|
||||
if elapsed < _SETUP_STATUS_COOLDOWN_SECONDS:
|
||||
retry_after = max(1, int(_SETUP_STATUS_COOLDOWN_SECONDS - elapsed))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
|
||||
detail="Setup status check is rate limited",
|
||||
headers={"Retry-After": str(retry_after)},
|
||||
)
|
||||
# Evict stale entries when dict grows too large to bound memory usage.
|
||||
if len(_SETUP_STATUS_COOLDOWN) >= _MAX_TRACKED_SETUP_STATUS_IPS:
|
||||
cutoff = now - _SETUP_STATUS_COOLDOWN_SECONDS
|
||||
stale = [k for k, t in _SETUP_STATUS_COOLDOWN.items() if t < cutoff]
|
||||
for k in stale:
|
||||
del _SETUP_STATUS_COOLDOWN[k]
|
||||
# If still too large after evicting expired entries, remove oldest half.
|
||||
if len(_SETUP_STATUS_COOLDOWN) >= _MAX_TRACKED_SETUP_STATUS_IPS:
|
||||
by_time = sorted(_SETUP_STATUS_COOLDOWN.items(), key=lambda kv: kv[1])
|
||||
for k, _ in by_time[: len(by_time) // 2]:
|
||||
del _SETUP_STATUS_COOLDOWN[k]
|
||||
_SETUP_STATUS_COOLDOWN[client_ip] = now
|
||||
admin_count = await get_local_provider().count_admin_users()
|
||||
return {"needs_setup": admin_count == 0}
|
||||
|
||||
|
||||
class InitializeAdminRequest(BaseModel):
|
||||
"""Request model for first-boot admin account creation."""
|
||||
|
||||
email: EmailStr
|
||||
password: str = Field(..., min_length=8)
|
||||
|
||||
_strong_password = field_validator("password")(classmethod(lambda cls, v: _validate_strong_password(v)))
|
||||
|
||||
|
||||
@router.post("/initialize", response_model=UserResponse, status_code=status.HTTP_201_CREATED)
|
||||
async def initialize_admin(request: Request, response: Response, body: InitializeAdminRequest):
|
||||
"""Create the first admin account on initial system setup.
|
||||
|
||||
Only callable when no admin exists. Returns 409 Conflict if an admin
|
||||
already exists.
|
||||
|
||||
On success, the admin account is created with ``needs_setup=False`` and
|
||||
the session cookie is set.
|
||||
"""
|
||||
admin_count = await get_local_provider().count_admin_users()
|
||||
if admin_count > 0:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.SYSTEM_ALREADY_INITIALIZED, message="System already initialized").model_dump(),
|
||||
)
|
||||
|
||||
try:
|
||||
user = await get_local_provider().create_user(email=body.email, password=body.password, system_role="admin", needs_setup=False)
|
||||
except ValueError:
|
||||
# DB unique-constraint race: another concurrent request beat us.
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.SYSTEM_ALREADY_INITIALIZED, message="System already initialized").model_dump(),
|
||||
)
|
||||
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return UserResponse(id=str(user.id), email=user.email, system_role=user.system_role)
|
||||
|
||||
|
||||
# ── OAuth Endpoints (Future/Placeholder) ─────────────────────────────────
|
||||
|
||||
|
||||
@router.get("/oauth/{provider}")
|
||||
async def oauth_login(provider: str):
|
||||
"""Initiate OAuth login flow.
|
||||
|
||||
Redirects to the OAuth provider's authorization URL.
|
||||
Currently a placeholder - requires OAuth provider implementation.
|
||||
"""
|
||||
if provider not in ["github", "google"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Unsupported OAuth provider: {provider}",
|
||||
)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="OAuth login not yet implemented",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/callback/{provider}")
|
||||
async def oauth_callback(provider: str, code: str, state: str):
|
||||
"""OAuth callback endpoint.
|
||||
|
||||
Handles the OAuth provider's callback after user authorization.
|
||||
Currently a placeholder.
|
||||
"""
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="OAuth callback not yet implemented",
|
||||
)
|
||||
188
backend/app/gateway/routers/feedback.py
Normal file
188
backend/app/gateway/routers/feedback.py
Normal file
@ -0,0 +1,188 @@
|
||||
"""Feedback endpoints — create, list, stats, delete.
|
||||
|
||||
Allows users to submit thumbs-up/down feedback on runs,
|
||||
optionally scoped to a specific message.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_current_user, get_feedback_repo, get_run_store
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/threads", tags=["feedback"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Request / response models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class FeedbackCreateRequest(BaseModel):
|
||||
rating: int = Field(..., description="Feedback rating: +1 (positive) or -1 (negative)")
|
||||
comment: str | None = Field(default=None, description="Optional text feedback")
|
||||
message_id: str | None = Field(default=None, description="Optional: scope feedback to a specific message")
|
||||
|
||||
|
||||
class FeedbackUpsertRequest(BaseModel):
|
||||
rating: int = Field(..., description="Feedback rating: +1 (positive) or -1 (negative)")
|
||||
comment: str | None = Field(default=None, description="Optional text feedback")
|
||||
|
||||
|
||||
class FeedbackResponse(BaseModel):
|
||||
feedback_id: str
|
||||
run_id: str
|
||||
thread_id: str
|
||||
user_id: str | None = None
|
||||
message_id: str | None = None
|
||||
rating: int
|
||||
comment: str | None = None
|
||||
created_at: str = ""
|
||||
|
||||
|
||||
class FeedbackStatsResponse(BaseModel):
|
||||
run_id: str
|
||||
total: int = 0
|
||||
positive: int = 0
|
||||
negative: int = 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.put("/{thread_id}/runs/{run_id}/feedback", response_model=FeedbackResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def upsert_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
body: FeedbackUpsertRequest,
|
||||
request: Request,
|
||||
) -> dict[str, Any]:
|
||||
"""Create or update feedback for a run (idempotent)."""
|
||||
if body.rating not in (1, -1):
|
||||
raise HTTPException(status_code=400, detail="rating must be +1 or -1")
|
||||
|
||||
user_id = await get_current_user(request)
|
||||
|
||||
run_store = get_run_store(request)
|
||||
run = await run_store.get(run_id)
|
||||
if run is None:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found")
|
||||
if run.get("thread_id") != thread_id:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found in thread {thread_id}")
|
||||
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.upsert(
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
rating=body.rating,
|
||||
user_id=user_id,
|
||||
comment=body.comment,
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/{thread_id}/runs/{run_id}/feedback")
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_run_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
) -> dict[str, bool]:
|
||||
"""Delete the current user's feedback for a run."""
|
||||
user_id = await get_current_user(request)
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
deleted = await feedback_repo.delete_by_run(
|
||||
thread_id=thread_id,
|
||||
run_id=run_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=404, detail="No feedback found for this run")
|
||||
return {"success": True}
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/{run_id}/feedback", response_model=FeedbackResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def create_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
body: FeedbackCreateRequest,
|
||||
request: Request,
|
||||
) -> dict[str, Any]:
|
||||
"""Submit feedback (thumbs-up/down) for a run."""
|
||||
if body.rating not in (1, -1):
|
||||
raise HTTPException(status_code=400, detail="rating must be +1 or -1")
|
||||
|
||||
user_id = await get_current_user(request)
|
||||
|
||||
# Validate run exists and belongs to thread
|
||||
run_store = get_run_store(request)
|
||||
run = await run_store.get(run_id)
|
||||
if run is None:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found")
|
||||
if run.get("thread_id") != thread_id:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found in thread {thread_id}")
|
||||
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.create(
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
rating=body.rating,
|
||||
user_id=user_id,
|
||||
message_id=body.message_id,
|
||||
comment=body.comment,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/feedback", response_model=list[FeedbackResponse])
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def list_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""List all feedback for a run."""
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.list_by_run(thread_id, run_id)
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/feedback/stats", response_model=FeedbackStatsResponse)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def feedback_stats(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
) -> dict[str, Any]:
|
||||
"""Get aggregated feedback stats (positive/negative counts) for a run."""
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.aggregate_by_run(thread_id, run_id)
|
||||
|
||||
|
||||
@router.delete("/{thread_id}/runs/{run_id}/feedback/{feedback_id}")
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
feedback_id: str,
|
||||
request: Request,
|
||||
) -> dict[str, bool]:
|
||||
"""Delete a feedback record."""
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
# Verify feedback belongs to the specified thread/run before deleting
|
||||
existing = await feedback_repo.get(feedback_id)
|
||||
if existing is None:
|
||||
raise HTTPException(status_code=404, detail=f"Feedback {feedback_id} not found")
|
||||
if existing.get("thread_id") != thread_id or existing.get("run_id") != run_id:
|
||||
raise HTTPException(status_code=404, detail=f"Feedback {feedback_id} not found in run {run_id}")
|
||||
deleted = await feedback_repo.delete(feedback_id)
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=404, detail=f"Feedback {feedback_id} not found")
|
||||
return {"success": True}
|
||||
@ -13,6 +13,7 @@ from deerflow.agents.memory.updater import (
|
||||
update_memory_fact,
|
||||
)
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["memory"])
|
||||
|
||||
@ -147,7 +148,7 @@ async def get_memory() -> MemoryResponse:
|
||||
}
|
||||
```
|
||||
"""
|
||||
memory_data = get_memory_data()
|
||||
memory_data = get_memory_data(user_id=get_effective_user_id())
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@ -167,7 +168,7 @@ async def reload_memory() -> MemoryResponse:
|
||||
Returns:
|
||||
The reloaded memory data.
|
||||
"""
|
||||
memory_data = reload_memory_data()
|
||||
memory_data = reload_memory_data(user_id=get_effective_user_id())
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@ -181,7 +182,7 @@ async def reload_memory() -> MemoryResponse:
|
||||
async def clear_memory() -> MemoryResponse:
|
||||
"""Clear all persisted memory data."""
|
||||
try:
|
||||
memory_data = clear_memory_data()
|
||||
memory_data = clear_memory_data(user_id=get_effective_user_id())
|
||||
except OSError as exc:
|
||||
raise HTTPException(status_code=500, detail="Failed to clear memory data.") from exc
|
||||
|
||||
@ -202,6 +203,7 @@ async def create_memory_fact_endpoint(request: FactCreateRequest) -> MemoryRespo
|
||||
content=request.content,
|
||||
category=request.category,
|
||||
confidence=request.confidence,
|
||||
user_id=get_effective_user_id(),
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise _map_memory_fact_value_error(exc) from exc
|
||||
@ -221,7 +223,7 @@ async def create_memory_fact_endpoint(request: FactCreateRequest) -> MemoryRespo
|
||||
async def delete_memory_fact_endpoint(fact_id: str) -> MemoryResponse:
|
||||
"""Delete a single fact from memory by fact id."""
|
||||
try:
|
||||
memory_data = delete_memory_fact(fact_id)
|
||||
memory_data = delete_memory_fact(fact_id, user_id=get_effective_user_id())
|
||||
except KeyError as exc:
|
||||
raise HTTPException(status_code=404, detail=f"Memory fact '{fact_id}' not found.") from exc
|
||||
except OSError as exc:
|
||||
@ -245,6 +247,7 @@ async def update_memory_fact_endpoint(fact_id: str, request: FactPatchRequest) -
|
||||
content=request.content,
|
||||
category=request.category,
|
||||
confidence=request.confidence,
|
||||
user_id=get_effective_user_id(),
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise _map_memory_fact_value_error(exc) from exc
|
||||
@ -265,7 +268,7 @@ async def update_memory_fact_endpoint(fact_id: str, request: FactPatchRequest) -
|
||||
)
|
||||
async def export_memory() -> MemoryResponse:
|
||||
"""Export the current memory data."""
|
||||
memory_data = get_memory_data()
|
||||
memory_data = get_memory_data(user_id=get_effective_user_id())
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@ -279,7 +282,7 @@ async def export_memory() -> MemoryResponse:
|
||||
async def import_memory(request: MemoryResponse) -> MemoryResponse:
|
||||
"""Import and persist memory data."""
|
||||
try:
|
||||
memory_data = import_memory_data(request.model_dump())
|
||||
memory_data = import_memory_data(request.model_dump(), user_id=get_effective_user_id())
|
||||
except OSError as exc:
|
||||
raise HTTPException(status_code=500, detail="Failed to import memory data.") from exc
|
||||
|
||||
@ -337,7 +340,7 @@ async def get_memory_status() -> MemoryStatusResponse:
|
||||
Combined memory configuration and current data.
|
||||
"""
|
||||
config = get_memory_config()
|
||||
memory_data = get_memory_data()
|
||||
memory_data = get_memory_data(user_id=get_effective_user_id())
|
||||
|
||||
return MemoryStatusResponse(
|
||||
config=MemoryConfigResponse(
|
||||
|
||||
@ -1,7 +1,8 @@
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["models"])
|
||||
|
||||
@ -36,7 +37,7 @@ class ModelsListResponse(BaseModel):
|
||||
summary="List All Models",
|
||||
description="Retrieve a list of all available AI models configured in the system.",
|
||||
)
|
||||
async def list_models() -> ModelsListResponse:
|
||||
async def list_models(config: AppConfig = Depends(get_config)) -> ModelsListResponse:
|
||||
"""List all available models from configuration.
|
||||
|
||||
Returns model information suitable for frontend display,
|
||||
@ -72,7 +73,6 @@ async def list_models() -> ModelsListResponse:
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_app_config()
|
||||
models = [
|
||||
ModelResponse(
|
||||
name=model.name,
|
||||
@ -96,7 +96,7 @@ async def list_models() -> ModelsListResponse:
|
||||
summary="Get Model Details",
|
||||
description="Retrieve detailed information about a specific AI model by its name.",
|
||||
)
|
||||
async def get_model(model_name: str) -> ModelResponse:
|
||||
async def get_model(model_name: str, config: AppConfig = Depends(get_config)) -> ModelResponse:
|
||||
"""Get a specific model by name.
|
||||
|
||||
Args:
|
||||
@ -118,7 +118,6 @@ async def get_model(model_name: str) -> ModelResponse:
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_app_config()
|
||||
model = config.get_model_config(model_name)
|
||||
if model is None:
|
||||
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
|
||||
|
||||
@ -11,10 +11,11 @@ import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi import APIRouter, HTTPException, Query, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_run_manager, get_stream_bridge
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_checkpointer, get_feedback_repo, get_run_event_store, get_run_manager, get_run_store, get_stream_bridge
|
||||
from app.gateway.routers.thread_runs import RunCreateRequest
|
||||
from app.gateway.services import sse_consumer, start_run
|
||||
from deerflow.runtime import serialize_channel_values
|
||||
@ -85,3 +86,58 @@ async def stateless_wait(body: RunCreateRequest, request: Request) -> dict:
|
||||
logger.exception("Failed to fetch final state for run %s", record.run_id)
|
||||
|
||||
return {"status": record.status.value, "error": record.error}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Run-scoped read endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _resolve_run(run_id: str, request: Request) -> dict:
|
||||
"""Fetch run by run_id with user ownership check. Raises 404 if not found."""
|
||||
run_store = get_run_store(request)
|
||||
record = await run_store.get(run_id) # user_id=AUTO filters by contextvar
|
||||
if record is None:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found")
|
||||
return record
|
||||
|
||||
|
||||
@router.get("/{run_id}/messages")
|
||||
@require_permission("runs", "read")
|
||||
async def run_messages(
|
||||
run_id: str,
|
||||
request: Request,
|
||||
limit: int = Query(default=50, le=200, ge=1),
|
||||
before_seq: int | None = Query(default=None),
|
||||
after_seq: int | None = Query(default=None),
|
||||
) -> dict:
|
||||
"""Return paginated messages for a run (cursor-based).
|
||||
|
||||
Pagination:
|
||||
- after_seq: messages with seq > after_seq (forward)
|
||||
- before_seq: messages with seq < before_seq (backward)
|
||||
- neither: latest messages
|
||||
|
||||
Response: { data: [...], has_more: bool }
|
||||
"""
|
||||
run = await _resolve_run(run_id, request)
|
||||
event_store = get_run_event_store(request)
|
||||
rows = await event_store.list_messages_by_run(
|
||||
run["thread_id"],
|
||||
run_id,
|
||||
limit=limit + 1,
|
||||
before_seq=before_seq,
|
||||
after_seq=after_seq,
|
||||
)
|
||||
has_more = len(rows) > limit
|
||||
data = rows[:limit] if has_more else rows
|
||||
return {"data": data, "has_more": has_more}
|
||||
|
||||
|
||||
@router.get("/{run_id}/feedback")
|
||||
@require_permission("runs", "read")
|
||||
async def run_feedback(run_id: str, request: Request) -> list[dict]:
|
||||
"""Return all feedback for a run."""
|
||||
run = await _resolve_run(run_id, request)
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.list_by_run(run["thread_id"], run_id)
|
||||
|
||||
@ -4,11 +4,13 @@ import logging
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.deps import get_config
|
||||
from app.gateway.path_utils import resolve_thread_virtual_path
|
||||
from deerflow.agents.lead_agent.prompt import refresh_skills_system_prompt_cache_async
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig, get_extensions_config, reload_extensions_config
|
||||
from deerflow.skills import Skill, load_skills
|
||||
from deerflow.skills.installer import SkillAlreadyExistsError, ainstall_skill_from_archive
|
||||
@ -101,9 +103,9 @@ def _skill_to_response(skill: Skill) -> SkillResponse:
|
||||
summary="List All Skills",
|
||||
description="Retrieve a list of all available skills from both public and custom directories.",
|
||||
)
|
||||
async def list_skills() -> SkillsListResponse:
|
||||
async def list_skills(config: AppConfig = Depends(get_config)) -> SkillsListResponse:
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skills = load_skills(enabled_only=False, app_config=config)
|
||||
return SkillsListResponse(skills=[_skill_to_response(skill) for skill in skills])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load skills: {e}", exc_info=True)
|
||||
@ -136,9 +138,9 @@ async def install_skill(request: SkillInstallRequest) -> SkillInstallResponse:
|
||||
|
||||
|
||||
@router.get("/skills/custom", response_model=SkillsListResponse, summary="List Custom Skills")
|
||||
async def list_custom_skills() -> SkillsListResponse:
|
||||
async def list_custom_skills(config: AppConfig = Depends(get_config)) -> SkillsListResponse:
|
||||
try:
|
||||
skills = [skill for skill in load_skills(enabled_only=False) if skill.category == "custom"]
|
||||
skills = [skill for skill in load_skills(enabled_only=False, app_config=config) if skill.category == "custom"]
|
||||
return SkillsListResponse(skills=[_skill_to_response(skill) for skill in skills])
|
||||
except Exception as e:
|
||||
logger.error("Failed to list custom skills: %s", e, exc_info=True)
|
||||
@ -146,13 +148,14 @@ async def list_custom_skills() -> SkillsListResponse:
|
||||
|
||||
|
||||
@router.get("/skills/custom/{skill_name}", response_model=CustomSkillContentResponse, summary="Get Custom Skill Content")
|
||||
async def get_custom_skill(skill_name: str) -> CustomSkillContentResponse:
|
||||
async def get_custom_skill(skill_name: str, config: AppConfig = Depends(get_config)) -> CustomSkillContentResponse:
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skill_name = skill_name.replace("\r\n", "").replace("\n", "")
|
||||
skills = load_skills(enabled_only=False, app_config=config)
|
||||
skill = next((s for s in skills if s.name == skill_name and s.category == "custom"), None)
|
||||
if skill is None:
|
||||
raise HTTPException(status_code=404, detail=f"Custom skill '{skill_name}' not found")
|
||||
return CustomSkillContentResponse(**_skill_to_response(skill).model_dump(), content=read_custom_skill_content(skill_name))
|
||||
return CustomSkillContentResponse(**_skill_to_response(skill).model_dump(), content=read_custom_skill_content(skill_name, app_config=config))
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
@ -161,14 +164,15 @@ async def get_custom_skill(skill_name: str) -> CustomSkillContentResponse:
|
||||
|
||||
|
||||
@router.put("/skills/custom/{skill_name}", response_model=CustomSkillContentResponse, summary="Edit Custom Skill")
|
||||
async def update_custom_skill(skill_name: str, request: CustomSkillUpdateRequest) -> CustomSkillContentResponse:
|
||||
async def update_custom_skill(skill_name: str, request: CustomSkillUpdateRequest, config: AppConfig = Depends(get_config)) -> CustomSkillContentResponse:
|
||||
try:
|
||||
ensure_custom_skill_is_editable(skill_name)
|
||||
skill_name = skill_name.replace("\r\n", "").replace("\n", "")
|
||||
ensure_custom_skill_is_editable(skill_name, app_config=config)
|
||||
validate_skill_markdown_content(skill_name, request.content)
|
||||
scan = await scan_skill_content(request.content, executable=False, location=f"{skill_name}/SKILL.md")
|
||||
scan = await scan_skill_content(request.content, executable=False, location=f"{skill_name}/SKILL.md", app_config=config)
|
||||
if scan.decision == "block":
|
||||
raise HTTPException(status_code=400, detail=f"Security scan blocked the edit: {scan.reason}")
|
||||
skill_file = get_custom_skill_dir(skill_name) / "SKILL.md"
|
||||
skill_file = get_custom_skill_dir(skill_name, app_config=config) / "SKILL.md"
|
||||
prev_content = skill_file.read_text(encoding="utf-8")
|
||||
atomic_write(skill_file, request.content)
|
||||
append_history(
|
||||
@ -182,9 +186,10 @@ async def update_custom_skill(skill_name: str, request: CustomSkillUpdateRequest
|
||||
"new_content": request.content,
|
||||
"scanner": {"decision": scan.decision, "reason": scan.reason},
|
||||
},
|
||||
app_config=config,
|
||||
)
|
||||
await refresh_skills_system_prompt_cache_async()
|
||||
return await get_custom_skill(skill_name)
|
||||
return await get_custom_skill(skill_name, config)
|
||||
except HTTPException:
|
||||
raise
|
||||
except FileNotFoundError as e:
|
||||
@ -197,11 +202,12 @@ async def update_custom_skill(skill_name: str, request: CustomSkillUpdateRequest
|
||||
|
||||
|
||||
@router.delete("/skills/custom/{skill_name}", summary="Delete Custom Skill")
|
||||
async def delete_custom_skill(skill_name: str) -> dict[str, bool]:
|
||||
async def delete_custom_skill(skill_name: str, config: AppConfig = Depends(get_config)) -> dict[str, bool]:
|
||||
try:
|
||||
ensure_custom_skill_is_editable(skill_name)
|
||||
skill_dir = get_custom_skill_dir(skill_name)
|
||||
prev_content = read_custom_skill_content(skill_name)
|
||||
skill_name = skill_name.replace("\r\n", "").replace("\n", "")
|
||||
ensure_custom_skill_is_editable(skill_name, app_config=config)
|
||||
skill_dir = get_custom_skill_dir(skill_name, app_config=config)
|
||||
prev_content = read_custom_skill_content(skill_name, app_config=config)
|
||||
try:
|
||||
append_history(
|
||||
skill_name,
|
||||
@ -214,6 +220,7 @@ async def delete_custom_skill(skill_name: str) -> dict[str, bool]:
|
||||
"new_content": None,
|
||||
"scanner": {"decision": "allow", "reason": "Deletion requested."},
|
||||
},
|
||||
app_config=config,
|
||||
)
|
||||
except OSError as e:
|
||||
if not isinstance(e, PermissionError) and e.errno not in {errno.EACCES, errno.EPERM, errno.EROFS}:
|
||||
@ -232,11 +239,12 @@ async def delete_custom_skill(skill_name: str) -> dict[str, bool]:
|
||||
|
||||
|
||||
@router.get("/skills/custom/{skill_name}/history", response_model=CustomSkillHistoryResponse, summary="Get Custom Skill History")
|
||||
async def get_custom_skill_history(skill_name: str) -> CustomSkillHistoryResponse:
|
||||
async def get_custom_skill_history(skill_name: str, config: AppConfig = Depends(get_config)) -> CustomSkillHistoryResponse:
|
||||
try:
|
||||
if not custom_skill_exists(skill_name) and not get_skill_history_file(skill_name).exists():
|
||||
skill_name = skill_name.replace("\r\n", "").replace("\n", "")
|
||||
if not custom_skill_exists(skill_name, app_config=config) and not get_skill_history_file(skill_name, app_config=config).exists():
|
||||
raise HTTPException(status_code=404, detail=f"Custom skill '{skill_name}' not found")
|
||||
return CustomSkillHistoryResponse(history=read_history(skill_name))
|
||||
return CustomSkillHistoryResponse(history=read_history(skill_name, app_config=config))
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
@ -245,11 +253,11 @@ async def get_custom_skill_history(skill_name: str) -> CustomSkillHistoryRespons
|
||||
|
||||
|
||||
@router.post("/skills/custom/{skill_name}/rollback", response_model=CustomSkillContentResponse, summary="Rollback Custom Skill")
|
||||
async def rollback_custom_skill(skill_name: str, request: SkillRollbackRequest) -> CustomSkillContentResponse:
|
||||
async def rollback_custom_skill(skill_name: str, request: SkillRollbackRequest, config: AppConfig = Depends(get_config)) -> CustomSkillContentResponse:
|
||||
try:
|
||||
if not custom_skill_exists(skill_name) and not get_skill_history_file(skill_name).exists():
|
||||
if not custom_skill_exists(skill_name, app_config=config) and not get_skill_history_file(skill_name, app_config=config).exists():
|
||||
raise HTTPException(status_code=404, detail=f"Custom skill '{skill_name}' not found")
|
||||
history = read_history(skill_name)
|
||||
history = read_history(skill_name, app_config=config)
|
||||
if not history:
|
||||
raise HTTPException(status_code=400, detail=f"Custom skill '{skill_name}' has no history")
|
||||
record = history[request.history_index]
|
||||
@ -257,8 +265,8 @@ async def rollback_custom_skill(skill_name: str, request: SkillRollbackRequest)
|
||||
if target_content is None:
|
||||
raise HTTPException(status_code=400, detail="Selected history entry has no previous content to roll back to")
|
||||
validate_skill_markdown_content(skill_name, target_content)
|
||||
scan = await scan_skill_content(target_content, executable=False, location=f"{skill_name}/SKILL.md")
|
||||
skill_file = get_custom_skill_file(skill_name)
|
||||
scan = await scan_skill_content(target_content, executable=False, location=f"{skill_name}/SKILL.md", app_config=config)
|
||||
skill_file = get_custom_skill_file(skill_name, app_config=config)
|
||||
current_content = skill_file.read_text(encoding="utf-8") if skill_file.exists() else None
|
||||
history_entry = {
|
||||
"action": "rollback",
|
||||
@ -271,12 +279,12 @@ async def rollback_custom_skill(skill_name: str, request: SkillRollbackRequest)
|
||||
"scanner": {"decision": scan.decision, "reason": scan.reason},
|
||||
}
|
||||
if scan.decision == "block":
|
||||
append_history(skill_name, history_entry)
|
||||
append_history(skill_name, history_entry, app_config=config)
|
||||
raise HTTPException(status_code=400, detail=f"Rollback blocked by security scanner: {scan.reason}")
|
||||
atomic_write(skill_file, target_content)
|
||||
append_history(skill_name, history_entry)
|
||||
append_history(skill_name, history_entry, app_config=config)
|
||||
await refresh_skills_system_prompt_cache_async()
|
||||
return await get_custom_skill(skill_name)
|
||||
return await get_custom_skill(skill_name, config)
|
||||
except HTTPException:
|
||||
raise
|
||||
except IndexError:
|
||||
@ -296,9 +304,10 @@ async def rollback_custom_skill(skill_name: str, request: SkillRollbackRequest)
|
||||
summary="Get Skill Details",
|
||||
description="Retrieve detailed information about a specific skill by its name.",
|
||||
)
|
||||
async def get_skill(skill_name: str) -> SkillResponse:
|
||||
async def get_skill(skill_name: str, config: AppConfig = Depends(get_config)) -> SkillResponse:
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skill_name = skill_name.replace("\r\n", "").replace("\n", "")
|
||||
skills = load_skills(enabled_only=False, app_config=config)
|
||||
skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if skill is None:
|
||||
@ -318,9 +327,10 @@ async def get_skill(skill_name: str) -> SkillResponse:
|
||||
summary="Update Skill",
|
||||
description="Update a skill's enabled status by modifying the extensions_config.json file.",
|
||||
)
|
||||
async def update_skill(skill_name: str, request: SkillUpdateRequest) -> SkillResponse:
|
||||
async def update_skill(skill_name: str, request: SkillUpdateRequest, config: AppConfig = Depends(get_config)) -> SkillResponse:
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skill_name = skill_name.replace("\r\n", "").replace("\n", "")
|
||||
skills = load_skills(enabled_only=False, app_config=config)
|
||||
skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if skill is None:
|
||||
@ -346,7 +356,7 @@ async def update_skill(skill_name: str, request: SkillUpdateRequest) -> SkillRes
|
||||
reload_extensions_config()
|
||||
await refresh_skills_system_prompt_cache_async()
|
||||
|
||||
skills = load_skills(enabled_only=False)
|
||||
skills = load_skills(enabled_only=False, app_config=config)
|
||||
updated_skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if updated_skill is None:
|
||||
|
||||
@ -1,10 +1,13 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.models import create_chat_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -98,12 +101,18 @@ def _format_conversation(messages: list[SuggestionMessage]) -> str:
|
||||
summary="Generate Follow-up Questions",
|
||||
description="Generate short follow-up questions a user might ask next, based on recent conversation context.",
|
||||
)
|
||||
async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> SuggestionsResponse:
|
||||
if not request.messages:
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def generate_suggestions(
|
||||
thread_id: str,
|
||||
body: SuggestionsRequest,
|
||||
request: Request,
|
||||
config: AppConfig = Depends(get_config),
|
||||
) -> SuggestionsResponse:
|
||||
if not body.messages:
|
||||
return SuggestionsResponse(suggestions=[])
|
||||
|
||||
n = request.n
|
||||
conversation = _format_conversation(request.messages)
|
||||
n = body.n
|
||||
conversation = _format_conversation(body.messages)
|
||||
if not conversation:
|
||||
return SuggestionsResponse(suggestions=[])
|
||||
|
||||
@ -120,7 +129,7 @@ async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> S
|
||||
user_content = f"Conversation Context:\n{conversation}\n\nGenerate {n} follow-up questions"
|
||||
|
||||
try:
|
||||
model = create_chat_model(name=request.model_name, thinking_enabled=False)
|
||||
model = create_chat_model(name=body.model_name, thinking_enabled=False, app_config=config)
|
||||
response = await model.ainvoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)], config={"run_name": "suggest_agent"})
|
||||
raw = _extract_response_text(response.content)
|
||||
suggestions = _parse_json_string_list(raw) or []
|
||||
|
||||
@ -19,7 +19,8 @@ from fastapi import APIRouter, HTTPException, Query, Request
|
||||
from fastapi.responses import Response, StreamingResponse
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_run_manager, get_stream_bridge
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_checkpointer, get_current_user, get_feedback_repo, get_run_event_store, get_run_manager, get_run_store, get_stream_bridge
|
||||
from app.gateway.services import sse_consumer, start_run
|
||||
from deerflow.runtime import RunRecord, serialize_channel_values
|
||||
|
||||
@ -92,6 +93,7 @@ def _record_to_response(record: RunRecord) -> RunResponse:
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs", response_model=RunResponse)
|
||||
@require_permission("runs", "create", owner_check=True, require_existing=True)
|
||||
async def create_run(thread_id: str, body: RunCreateRequest, request: Request) -> RunResponse:
|
||||
"""Create a background run (returns immediately)."""
|
||||
record = await start_run(body, thread_id, request)
|
||||
@ -99,6 +101,7 @@ async def create_run(thread_id: str, body: RunCreateRequest, request: Request) -
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/stream")
|
||||
@require_permission("runs", "create", owner_check=True, require_existing=True)
|
||||
async def stream_run(thread_id: str, body: RunCreateRequest, request: Request) -> StreamingResponse:
|
||||
"""Create a run and stream events via SSE.
|
||||
|
||||
@ -126,6 +129,7 @@ async def stream_run(thread_id: str, body: RunCreateRequest, request: Request) -
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/wait", response_model=dict)
|
||||
@require_permission("runs", "create", owner_check=True, require_existing=True)
|
||||
async def wait_run(thread_id: str, body: RunCreateRequest, request: Request) -> dict:
|
||||
"""Create a run and block until it completes, returning the final state."""
|
||||
record = await start_run(body, thread_id, request)
|
||||
@ -151,6 +155,7 @@ async def wait_run(thread_id: str, body: RunCreateRequest, request: Request) ->
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs", response_model=list[RunResponse])
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_runs(thread_id: str, request: Request) -> list[RunResponse]:
|
||||
"""List all runs for a thread."""
|
||||
run_mgr = get_run_manager(request)
|
||||
@ -159,6 +164,7 @@ async def list_runs(thread_id: str, request: Request) -> list[RunResponse]:
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}", response_model=RunResponse)
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def get_run(thread_id: str, run_id: str, request: Request) -> RunResponse:
|
||||
"""Get details of a specific run."""
|
||||
run_mgr = get_run_manager(request)
|
||||
@ -169,6 +175,7 @@ async def get_run(thread_id: str, run_id: str, request: Request) -> RunResponse:
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/{run_id}/cancel")
|
||||
@require_permission("runs", "cancel", owner_check=True, require_existing=True)
|
||||
async def cancel_run(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
@ -206,6 +213,7 @@ async def cancel_run(
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/join")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def join_run(thread_id: str, run_id: str, request: Request) -> StreamingResponse:
|
||||
"""Join an existing run's SSE stream."""
|
||||
bridge = get_stream_bridge(request)
|
||||
@ -226,6 +234,7 @@ async def join_run(thread_id: str, run_id: str, request: Request) -> StreamingRe
|
||||
|
||||
|
||||
@router.api_route("/{thread_id}/runs/{run_id}/stream", methods=["GET", "POST"], response_model=None)
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def stream_existing_run(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
@ -265,3 +274,104 @@ async def stream_existing_run(
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Messages / Events / Token usage endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get("/{thread_id}/messages")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_thread_messages(
|
||||
thread_id: str,
|
||||
request: Request,
|
||||
limit: int = Query(default=50, le=200),
|
||||
before_seq: int | None = Query(default=None),
|
||||
after_seq: int | None = Query(default=None),
|
||||
) -> list[dict]:
|
||||
"""Return displayable messages for a thread (across all runs), with feedback attached."""
|
||||
event_store = get_run_event_store(request)
|
||||
messages = await event_store.list_messages(thread_id, limit=limit, before_seq=before_seq, after_seq=after_seq)
|
||||
|
||||
# Attach feedback to the last AI message of each run
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
user_id = await get_current_user(request)
|
||||
feedback_map = await feedback_repo.list_by_thread_grouped(thread_id, user_id=user_id)
|
||||
|
||||
# Find the last ai_message per run_id
|
||||
last_ai_per_run: dict[str, int] = {} # run_id -> index in messages list
|
||||
for i, msg in enumerate(messages):
|
||||
if msg.get("event_type") == "ai_message":
|
||||
last_ai_per_run[msg["run_id"]] = i
|
||||
|
||||
# Attach feedback field
|
||||
last_ai_indices = set(last_ai_per_run.values())
|
||||
for i, msg in enumerate(messages):
|
||||
if i in last_ai_indices:
|
||||
run_id = msg["run_id"]
|
||||
fb = feedback_map.get(run_id)
|
||||
msg["feedback"] = (
|
||||
{
|
||||
"feedback_id": fb["feedback_id"],
|
||||
"rating": fb["rating"],
|
||||
"comment": fb.get("comment"),
|
||||
}
|
||||
if fb
|
||||
else None
|
||||
)
|
||||
else:
|
||||
msg["feedback"] = None
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/messages")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_run_messages(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
limit: int = Query(default=50, le=200, ge=1),
|
||||
before_seq: int | None = Query(default=None),
|
||||
after_seq: int | None = Query(default=None),
|
||||
) -> dict:
|
||||
"""Return paginated messages for a specific run.
|
||||
|
||||
Response: { data: [...], has_more: bool }
|
||||
"""
|
||||
event_store = get_run_event_store(request)
|
||||
rows = await event_store.list_messages_by_run(
|
||||
thread_id,
|
||||
run_id,
|
||||
limit=limit + 1,
|
||||
before_seq=before_seq,
|
||||
after_seq=after_seq,
|
||||
)
|
||||
has_more = len(rows) > limit
|
||||
data = rows[:limit] if has_more else rows
|
||||
return {"data": data, "has_more": has_more}
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/events")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_run_events(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
event_types: str | None = Query(default=None),
|
||||
limit: int = Query(default=500, le=2000),
|
||||
) -> list[dict]:
|
||||
"""Return the full event stream for a run (debug/audit)."""
|
||||
event_store = get_run_event_store(request)
|
||||
types = event_types.split(",") if event_types else None
|
||||
return await event_store.list_events(thread_id, run_id, event_types=types, limit=limit)
|
||||
|
||||
|
||||
@router.get("/{thread_id}/token-usage")
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def thread_token_usage(thread_id: str, request: Request) -> dict:
|
||||
"""Thread-level token usage aggregation."""
|
||||
run_store = get_run_store(request)
|
||||
agg = await run_store.aggregate_tokens_by_thread(thread_id)
|
||||
return {"thread_id": thread_id, **agg}
|
||||
|
||||
@ -18,23 +18,35 @@ import uuid
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_store
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_checkpointer
|
||||
from app.gateway.utils import sanitize_log_param
|
||||
from deerflow.config.paths import Paths, get_paths
|
||||
from deerflow.runtime import serialize_channel_values
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Store namespace
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
THREADS_NS: tuple[str, ...] = ("threads",)
|
||||
"""Namespace used by the Store for thread metadata records."""
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/threads", tags=["threads"])
|
||||
|
||||
|
||||
# Metadata keys that the server controls; clients are not allowed to set
|
||||
# them. Pydantic ``@field_validator("metadata")`` strips them on every
|
||||
# inbound model below so a malicious client cannot reflect a forged
|
||||
# owner identity through the API surface. Defense-in-depth — the
|
||||
# row-level invariant is still ``threads_meta.user_id`` populated from
|
||||
# the auth contextvar; this list closes the metadata-blob echo gap.
|
||||
_SERVER_RESERVED_METADATA_KEYS: frozenset[str] = frozenset({"owner_id", "user_id"})
|
||||
|
||||
|
||||
def _strip_reserved_metadata(metadata: dict[str, Any] | None) -> dict[str, Any]:
|
||||
"""Return ``metadata`` with server-controlled keys removed."""
|
||||
if not metadata:
|
||||
return metadata or {}
|
||||
return {k: v for k, v in metadata.items() if k not in _SERVER_RESERVED_METADATA_KEYS}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Response / request models
|
||||
# ---------------------------------------------------------------------------
|
||||
@ -63,8 +75,11 @@ class ThreadCreateRequest(BaseModel):
|
||||
"""Request body for creating a thread."""
|
||||
|
||||
thread_id: str | None = Field(default=None, description="Optional thread ID (auto-generated if omitted)")
|
||||
assistant_id: str | None = Field(default=None, description="Associate thread with an assistant")
|
||||
metadata: dict[str, Any] = Field(default_factory=dict, description="Initial metadata")
|
||||
|
||||
_strip_reserved = field_validator("metadata")(classmethod(lambda cls, v: _strip_reserved_metadata(v)))
|
||||
|
||||
|
||||
class ThreadSearchRequest(BaseModel):
|
||||
"""Request body for searching threads."""
|
||||
@ -93,6 +108,8 @@ class ThreadPatchRequest(BaseModel):
|
||||
|
||||
metadata: dict[str, Any] = Field(default_factory=dict, description="Metadata to merge")
|
||||
|
||||
_strip_reserved = field_validator("metadata")(classmethod(lambda cls, v: _strip_reserved_metadata(v)))
|
||||
|
||||
|
||||
class ThreadStateUpdateRequest(BaseModel):
|
||||
"""Request body for updating thread state (human-in-the-loop resume)."""
|
||||
@ -126,70 +143,25 @@ class ThreadHistoryRequest(BaseModel):
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _delete_thread_data(thread_id: str, paths: Paths | None = None) -> ThreadDeleteResponse:
|
||||
def _delete_thread_data(thread_id: str, paths: Paths | None = None, *, user_id: str | None = None) -> ThreadDeleteResponse:
|
||||
"""Delete local persisted filesystem data for a thread."""
|
||||
path_manager = paths or get_paths()
|
||||
try:
|
||||
path_manager.delete_thread_dir(thread_id)
|
||||
path_manager.delete_thread_dir(thread_id, user_id=user_id)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(status_code=422, detail=str(exc)) from exc
|
||||
except FileNotFoundError:
|
||||
# Not critical — thread data may not exist on disk
|
||||
logger.debug("No local thread data to delete for %s", thread_id)
|
||||
logger.debug("No local thread data to delete for %s", sanitize_log_param(thread_id))
|
||||
return ThreadDeleteResponse(success=True, message=f"No local data for {thread_id}")
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to delete thread data for %s", thread_id)
|
||||
logger.exception("Failed to delete thread data for %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete local thread data.") from exc
|
||||
|
||||
logger.info("Deleted local thread data for %s", thread_id)
|
||||
logger.info("Deleted local thread data for %s", sanitize_log_param(thread_id))
|
||||
return ThreadDeleteResponse(success=True, message=f"Deleted local thread data for {thread_id}")
|
||||
|
||||
|
||||
async def _store_get(store, thread_id: str) -> dict | None:
|
||||
"""Fetch a thread record from the Store; returns ``None`` if absent."""
|
||||
item = await store.aget(THREADS_NS, thread_id)
|
||||
return item.value if item is not None else None
|
||||
|
||||
|
||||
async def _store_put(store, record: dict) -> None:
|
||||
"""Write a thread record to the Store."""
|
||||
await store.aput(THREADS_NS, record["thread_id"], record)
|
||||
|
||||
|
||||
async def _store_upsert(store, thread_id: str, *, metadata: dict | None = None, values: dict | None = None) -> None:
|
||||
"""Create or refresh a thread record in the Store.
|
||||
|
||||
On creation the record is written with ``status="idle"``. On update only
|
||||
``updated_at`` (and optionally ``metadata`` / ``values``) are changed so
|
||||
that existing fields are preserved.
|
||||
|
||||
``values`` carries the agent-state snapshot exposed to the frontend
|
||||
(currently just ``{"title": "..."}``).
|
||||
"""
|
||||
now = time.time()
|
||||
existing = await _store_get(store, thread_id)
|
||||
if existing is None:
|
||||
await _store_put(
|
||||
store,
|
||||
{
|
||||
"thread_id": thread_id,
|
||||
"status": "idle",
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"metadata": metadata or {},
|
||||
"values": values or {},
|
||||
},
|
||||
)
|
||||
else:
|
||||
val = dict(existing)
|
||||
val["updated_at"] = now
|
||||
if metadata:
|
||||
val.setdefault("metadata", {}).update(metadata)
|
||||
if values:
|
||||
val.setdefault("values", {}).update(values)
|
||||
await _store_put(store, val)
|
||||
|
||||
|
||||
def _derive_thread_status(checkpoint_tuple) -> str:
|
||||
"""Derive thread status from checkpoint metadata."""
|
||||
if checkpoint_tuple is None:
|
||||
@ -215,22 +187,18 @@ def _derive_thread_status(checkpoint_tuple) -> str:
|
||||
|
||||
|
||||
@router.delete("/{thread_id}", response_model=ThreadDeleteResponse)
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_thread_data(thread_id: str, request: Request) -> ThreadDeleteResponse:
|
||||
"""Delete local persisted filesystem data for a thread.
|
||||
|
||||
Cleans DeerFlow-managed thread directories, removes checkpoint data,
|
||||
and removes the thread record from the Store.
|
||||
and removes the thread_meta row from the configured ThreadMetaStore
|
||||
(sqlite or memory).
|
||||
"""
|
||||
# Clean local filesystem
|
||||
response = _delete_thread_data(thread_id)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
# Remove from Store (best-effort)
|
||||
store = get_store(request)
|
||||
if store is not None:
|
||||
try:
|
||||
await store.adelete(THREADS_NS, thread_id)
|
||||
except Exception:
|
||||
logger.debug("Could not delete store record for thread %s (not critical)", thread_id)
|
||||
# Clean local filesystem
|
||||
response = _delete_thread_data(thread_id, user_id=get_effective_user_id())
|
||||
|
||||
# Remove checkpoints (best-effort)
|
||||
checkpointer = getattr(request.app.state, "checkpointer", None)
|
||||
@ -239,7 +207,15 @@ async def delete_thread_data(thread_id: str, request: Request) -> ThreadDeleteRe
|
||||
if hasattr(checkpointer, "adelete_thread"):
|
||||
await checkpointer.adelete_thread(thread_id)
|
||||
except Exception:
|
||||
logger.debug("Could not delete checkpoints for thread %s (not critical)", thread_id)
|
||||
logger.debug("Could not delete checkpoints for thread %s (not critical)", sanitize_log_param(thread_id))
|
||||
|
||||
# Remove thread_meta row (best-effort) — required for sqlite backend
|
||||
# so the deleted thread no longer appears in /threads/search.
|
||||
try:
|
||||
thread_store = get_thread_store(request)
|
||||
await thread_store.delete(thread_id)
|
||||
except Exception:
|
||||
logger.debug("Could not delete thread_meta for %s (not critical)", sanitize_log_param(thread_id))
|
||||
|
||||
return response
|
||||
|
||||
@ -248,43 +224,40 @@ async def delete_thread_data(thread_id: str, request: Request) -> ThreadDeleteRe
|
||||
async def create_thread(body: ThreadCreateRequest, request: Request) -> ThreadResponse:
|
||||
"""Create a new thread.
|
||||
|
||||
The thread record is written to the Store (for fast listing) and an
|
||||
empty checkpoint is written to the checkpointer (for state reads).
|
||||
Writes a thread_meta record (so the thread appears in /threads/search)
|
||||
and an empty checkpoint (so state endpoints work immediately).
|
||||
Idempotent: returns the existing record when ``thread_id`` already exists.
|
||||
"""
|
||||
store = get_store(request)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
checkpointer = get_checkpointer(request)
|
||||
thread_store = get_thread_store(request)
|
||||
thread_id = body.thread_id or str(uuid.uuid4())
|
||||
now = time.time()
|
||||
# ``body.metadata`` is already stripped of server-reserved keys by
|
||||
# ``ThreadCreateRequest._strip_reserved`` — see the model definition.
|
||||
|
||||
# Idempotency: return existing record from Store when already present
|
||||
if store is not None:
|
||||
existing_record = await _store_get(store, thread_id)
|
||||
if existing_record is not None:
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=existing_record.get("status", "idle"),
|
||||
created_at=str(existing_record.get("created_at", "")),
|
||||
updated_at=str(existing_record.get("updated_at", "")),
|
||||
metadata=existing_record.get("metadata", {}),
|
||||
)
|
||||
# Idempotency: return existing record when already present
|
||||
existing_record = await thread_store.get(thread_id)
|
||||
if existing_record is not None:
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=existing_record.get("status", "idle"),
|
||||
created_at=str(existing_record.get("created_at", "")),
|
||||
updated_at=str(existing_record.get("updated_at", "")),
|
||||
metadata=existing_record.get("metadata", {}),
|
||||
)
|
||||
|
||||
# Write thread record to Store
|
||||
if store is not None:
|
||||
try:
|
||||
await _store_put(
|
||||
store,
|
||||
{
|
||||
"thread_id": thread_id,
|
||||
"status": "idle",
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"metadata": body.metadata,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to write thread %s to store", thread_id)
|
||||
raise HTTPException(status_code=500, detail="Failed to create thread")
|
||||
# Write thread_meta so the thread appears in /threads/search immediately
|
||||
try:
|
||||
await thread_store.create(
|
||||
thread_id,
|
||||
assistant_id=getattr(body, "assistant_id", None),
|
||||
metadata=body.metadata,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to write thread_meta for %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create thread")
|
||||
|
||||
# Write an empty checkpoint so state endpoints work immediately
|
||||
config = {"configurable": {"thread_id": thread_id, "checkpoint_ns": ""}}
|
||||
@ -301,10 +274,10 @@ async def create_thread(body: ThreadCreateRequest, request: Request) -> ThreadRe
|
||||
}
|
||||
await checkpointer.aput(config, empty_checkpoint(), ckpt_metadata, {})
|
||||
except Exception:
|
||||
logger.exception("Failed to create checkpoint for thread %s", thread_id)
|
||||
logger.exception("Failed to create checkpoint for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create thread")
|
||||
|
||||
logger.info("Thread created: %s", thread_id)
|
||||
logger.info("Thread created: %s", sanitize_log_param(thread_id))
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status="idle",
|
||||
@ -318,166 +291,91 @@ async def create_thread(body: ThreadCreateRequest, request: Request) -> ThreadRe
|
||||
async def search_threads(body: ThreadSearchRequest, request: Request) -> list[ThreadResponse]:
|
||||
"""Search and list threads.
|
||||
|
||||
Two-phase approach:
|
||||
|
||||
**Phase 1 — Store (fast path, O(threads))**: returns threads that were
|
||||
created or run through this Gateway. Store records are tiny metadata
|
||||
dicts so fetching all of them at once is cheap.
|
||||
|
||||
**Phase 2 — Checkpointer supplement (lazy migration)**: threads that
|
||||
were created directly by LangGraph Server (and therefore absent from the
|
||||
Store) are discovered here by iterating the shared checkpointer. Any
|
||||
newly found thread is immediately written to the Store so that the next
|
||||
search skips Phase 2 for that thread — the Store converges to a full
|
||||
index over time without a one-shot migration job.
|
||||
Delegates to the configured ThreadMetaStore implementation
|
||||
(SQL-backed for sqlite/postgres, Store-backed for memory mode).
|
||||
"""
|
||||
store = get_store(request)
|
||||
checkpointer = get_checkpointer(request)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Phase 1: Store
|
||||
# -----------------------------------------------------------------------
|
||||
merged: dict[str, ThreadResponse] = {}
|
||||
|
||||
if store is not None:
|
||||
try:
|
||||
items = await store.asearch(THREADS_NS, limit=10_000)
|
||||
except Exception:
|
||||
logger.warning("Store search failed — falling back to checkpointer only", exc_info=True)
|
||||
items = []
|
||||
|
||||
for item in items:
|
||||
val = item.value
|
||||
merged[val["thread_id"]] = ThreadResponse(
|
||||
thread_id=val["thread_id"],
|
||||
status=val.get("status", "idle"),
|
||||
created_at=str(val.get("created_at", "")),
|
||||
updated_at=str(val.get("updated_at", "")),
|
||||
metadata=val.get("metadata", {}),
|
||||
values=val.get("values", {}),
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Phase 2: Checkpointer supplement
|
||||
# Discovers threads not yet in the Store (e.g. created by LangGraph
|
||||
# Server) and lazily migrates them so future searches skip this phase.
|
||||
# -----------------------------------------------------------------------
|
||||
try:
|
||||
async for checkpoint_tuple in checkpointer.alist(None):
|
||||
cfg = getattr(checkpoint_tuple, "config", {})
|
||||
thread_id = cfg.get("configurable", {}).get("thread_id")
|
||||
if not thread_id or thread_id in merged:
|
||||
continue
|
||||
|
||||
# Skip sub-graph checkpoints (checkpoint_ns is non-empty for those)
|
||||
if cfg.get("configurable", {}).get("checkpoint_ns", ""):
|
||||
continue
|
||||
|
||||
ckpt_meta = getattr(checkpoint_tuple, "metadata", {}) or {}
|
||||
# Strip LangGraph internal keys from the user-visible metadata dict
|
||||
user_meta = {k: v for k, v in ckpt_meta.items() if k not in ("created_at", "updated_at", "step", "source", "writes", "parents")}
|
||||
|
||||
# Extract state values (title) from the checkpoint's channel_values
|
||||
checkpoint_data = getattr(checkpoint_tuple, "checkpoint", {}) or {}
|
||||
channel_values = checkpoint_data.get("channel_values", {})
|
||||
ckpt_values = {}
|
||||
if title := channel_values.get("title"):
|
||||
ckpt_values["title"] = title
|
||||
|
||||
thread_resp = ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=_derive_thread_status(checkpoint_tuple),
|
||||
created_at=str(ckpt_meta.get("created_at", "")),
|
||||
updated_at=str(ckpt_meta.get("updated_at", ckpt_meta.get("created_at", ""))),
|
||||
metadata=user_meta,
|
||||
values=ckpt_values,
|
||||
)
|
||||
merged[thread_id] = thread_resp
|
||||
|
||||
# Lazy migration — write to Store so the next search finds it there
|
||||
if store is not None:
|
||||
try:
|
||||
await _store_upsert(store, thread_id, metadata=user_meta, values=ckpt_values or None)
|
||||
except Exception:
|
||||
logger.debug("Failed to migrate thread %s to store (non-fatal)", thread_id)
|
||||
except Exception:
|
||||
logger.exception("Checkpointer scan failed during thread search")
|
||||
# Don't raise — return whatever was collected from Store + partial scan
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Phase 3: Filter → sort → paginate
|
||||
# -----------------------------------------------------------------------
|
||||
results = list(merged.values())
|
||||
|
||||
if body.metadata:
|
||||
results = [r for r in results if all(r.metadata.get(k) == v for k, v in body.metadata.items())]
|
||||
|
||||
if body.status:
|
||||
results = [r for r in results if r.status == body.status]
|
||||
|
||||
results.sort(key=lambda r: r.updated_at, reverse=True)
|
||||
return results[body.offset : body.offset + body.limit]
|
||||
repo = get_thread_store(request)
|
||||
rows = await repo.search(
|
||||
metadata=body.metadata or None,
|
||||
status=body.status,
|
||||
limit=body.limit,
|
||||
offset=body.offset,
|
||||
)
|
||||
return [
|
||||
ThreadResponse(
|
||||
thread_id=r["thread_id"],
|
||||
status=r.get("status", "idle"),
|
||||
created_at=r.get("created_at", ""),
|
||||
updated_at=r.get("updated_at", ""),
|
||||
metadata=r.get("metadata", {}),
|
||||
values={"title": r["display_name"]} if r.get("display_name") else {},
|
||||
interrupts={},
|
||||
)
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
@router.patch("/{thread_id}", response_model=ThreadResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def patch_thread(thread_id: str, body: ThreadPatchRequest, request: Request) -> ThreadResponse:
|
||||
"""Merge metadata into a thread record."""
|
||||
store = get_store(request)
|
||||
if store is None:
|
||||
raise HTTPException(status_code=503, detail="Store not available")
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
record = await _store_get(store, thread_id)
|
||||
thread_store = get_thread_store(request)
|
||||
record = await thread_store.get(thread_id)
|
||||
if record is None:
|
||||
raise HTTPException(status_code=404, detail=f"Thread {thread_id} not found")
|
||||
|
||||
now = time.time()
|
||||
updated = dict(record)
|
||||
updated.setdefault("metadata", {}).update(body.metadata)
|
||||
updated["updated_at"] = now
|
||||
|
||||
# ``body.metadata`` already stripped by ``ThreadPatchRequest._strip_reserved``.
|
||||
try:
|
||||
await _store_put(store, updated)
|
||||
await thread_store.update_metadata(thread_id, body.metadata)
|
||||
except Exception:
|
||||
logger.exception("Failed to patch thread %s", thread_id)
|
||||
logger.exception("Failed to patch thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update thread")
|
||||
|
||||
# Re-read to get the merged metadata + refreshed updated_at
|
||||
record = await thread_store.get(thread_id) or record
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=updated.get("status", "idle"),
|
||||
created_at=str(updated.get("created_at", "")),
|
||||
updated_at=str(now),
|
||||
metadata=updated.get("metadata", {}),
|
||||
status=record.get("status", "idle"),
|
||||
created_at=str(record.get("created_at", "")),
|
||||
updated_at=str(record.get("updated_at", "")),
|
||||
metadata=record.get("metadata", {}),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{thread_id}", response_model=ThreadResponse)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread(thread_id: str, request: Request) -> ThreadResponse:
|
||||
"""Get thread info.
|
||||
|
||||
Reads metadata from the Store and derives the accurate execution
|
||||
status from the checkpointer. Falls back to the checkpointer alone
|
||||
for threads that pre-date Store adoption (backward compat).
|
||||
Reads metadata from the ThreadMetaStore and derives the accurate
|
||||
execution status from the checkpointer. Falls back to the checkpointer
|
||||
alone for threads that pre-date ThreadMetaStore adoption (backward compat).
|
||||
"""
|
||||
store = get_store(request)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
thread_store = get_thread_store(request)
|
||||
checkpointer = get_checkpointer(request)
|
||||
|
||||
record: dict | None = None
|
||||
if store is not None:
|
||||
record = await _store_get(store, thread_id)
|
||||
record: dict | None = await thread_store.get(thread_id)
|
||||
|
||||
# Derive accurate status from the checkpointer
|
||||
config = {"configurable": {"thread_id": thread_id, "checkpoint_ns": ""}}
|
||||
try:
|
||||
checkpoint_tuple = await checkpointer.aget_tuple(config)
|
||||
except Exception:
|
||||
logger.exception("Failed to get checkpoint for thread %s", thread_id)
|
||||
logger.exception("Failed to get checkpoint for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread")
|
||||
|
||||
if record is None and checkpoint_tuple is None:
|
||||
raise HTTPException(status_code=404, detail=f"Thread {thread_id} not found")
|
||||
|
||||
# If the thread exists in the checkpointer but not the store (e.g. legacy
|
||||
# data), synthesize a minimal store record from the checkpoint metadata.
|
||||
# If the thread exists in the checkpointer but not in thread_meta (e.g.
|
||||
# legacy data created before thread_meta adoption), synthesize a minimal
|
||||
# record from the checkpoint metadata.
|
||||
if record is None and checkpoint_tuple is not None:
|
||||
ckpt_meta = getattr(checkpoint_tuple, "metadata", {}) or {}
|
||||
record = {
|
||||
@ -505,7 +403,9 @@ async def get_thread(thread_id: str, request: Request) -> ThreadResponse:
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@router.get("/{thread_id}/state", response_model=ThreadStateResponse)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread_state(thread_id: str, request: Request) -> ThreadStateResponse:
|
||||
"""Get the latest state snapshot for a thread.
|
||||
|
||||
@ -518,7 +418,7 @@ async def get_thread_state(thread_id: str, request: Request) -> ThreadStateRespo
|
||||
try:
|
||||
checkpoint_tuple = await checkpointer.aget_tuple(config)
|
||||
except Exception:
|
||||
logger.exception("Failed to get state for thread %s", thread_id)
|
||||
logger.exception("Failed to get state for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread state")
|
||||
|
||||
if checkpoint_tuple is None:
|
||||
@ -542,8 +442,10 @@ async def get_thread_state(thread_id: str, request: Request) -> ThreadStateRespo
|
||||
next_tasks = [t.name for t in tasks_raw if hasattr(t, "name")]
|
||||
tasks = [{"id": getattr(t, "id", ""), "name": getattr(t, "name", "")} for t in tasks_raw]
|
||||
|
||||
values = serialize_channel_values(channel_values)
|
||||
|
||||
return ThreadStateResponse(
|
||||
values=serialize_channel_values(channel_values),
|
||||
values=values,
|
||||
next=next_tasks,
|
||||
metadata=metadata,
|
||||
checkpoint={"id": checkpoint_id, "ts": str(metadata.get("created_at", ""))},
|
||||
@ -555,15 +457,19 @@ async def get_thread_state(thread_id: str, request: Request) -> ThreadStateRespo
|
||||
|
||||
|
||||
@router.post("/{thread_id}/state", response_model=ThreadStateResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, request: Request) -> ThreadStateResponse:
|
||||
"""Update thread state (e.g. for human-in-the-loop resume or title rename).
|
||||
|
||||
Writes a new checkpoint that merges *body.values* into the latest
|
||||
channel values, then syncs any updated ``title`` field back to the Store
|
||||
so that ``/threads/search`` reflects the change immediately.
|
||||
channel values, then syncs any updated ``title`` field through the
|
||||
ThreadMetaStore abstraction so that ``/threads/search`` reflects the
|
||||
change immediately in both sqlite and memory backends.
|
||||
"""
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
checkpointer = get_checkpointer(request)
|
||||
store = get_store(request)
|
||||
thread_store = get_thread_store(request)
|
||||
|
||||
# checkpoint_ns must be present in the config for aput — default to ""
|
||||
# (the root graph namespace). checkpoint_id is optional; omitting it
|
||||
@ -580,7 +486,7 @@ async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, re
|
||||
try:
|
||||
checkpoint_tuple = await checkpointer.aget_tuple(read_config)
|
||||
except Exception:
|
||||
logger.exception("Failed to get state for thread %s", thread_id)
|
||||
logger.exception("Failed to get state for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread state")
|
||||
|
||||
if checkpoint_tuple is None:
|
||||
@ -614,19 +520,22 @@ async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, re
|
||||
try:
|
||||
new_config = await checkpointer.aput(write_config, checkpoint, metadata, {})
|
||||
except Exception:
|
||||
logger.exception("Failed to update state for thread %s", thread_id)
|
||||
logger.exception("Failed to update state for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update thread state")
|
||||
|
||||
new_checkpoint_id: str | None = None
|
||||
if isinstance(new_config, dict):
|
||||
new_checkpoint_id = new_config.get("configurable", {}).get("checkpoint_id")
|
||||
|
||||
# Sync title changes to the Store so /threads/search reflects them immediately.
|
||||
if store is not None and body.values and "title" in body.values:
|
||||
try:
|
||||
await _store_upsert(store, thread_id, values={"title": body.values["title"]})
|
||||
except Exception:
|
||||
logger.debug("Failed to sync title to store for thread %s (non-fatal)", thread_id)
|
||||
# Sync title changes through the ThreadMetaStore abstraction so /threads/search
|
||||
# reflects them immediately in both sqlite and memory backends.
|
||||
if body.values and "title" in body.values:
|
||||
new_title = body.values["title"]
|
||||
if new_title: # Skip empty strings and None
|
||||
try:
|
||||
await thread_store.update_display_name(thread_id, new_title)
|
||||
except Exception:
|
||||
logger.debug("Failed to sync title to thread_meta for %s (non-fatal)", sanitize_log_param(thread_id))
|
||||
|
||||
return ThreadStateResponse(
|
||||
values=serialize_channel_values(channel_values),
|
||||
@ -638,8 +547,16 @@ async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, re
|
||||
|
||||
|
||||
@router.post("/{thread_id}/history", response_model=list[HistoryEntry])
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread_history(thread_id: str, body: ThreadHistoryRequest, request: Request) -> list[HistoryEntry]:
|
||||
"""Get checkpoint history for a thread."""
|
||||
"""Get checkpoint history for a thread.
|
||||
|
||||
Messages are read from the checkpointer's channel values (the
|
||||
authoritative source) and serialized via
|
||||
:func:`~deerflow.runtime.serialization.serialize_channel_values`.
|
||||
Only the latest (first) checkpoint carries the ``messages`` key to
|
||||
avoid duplicating them across every entry.
|
||||
"""
|
||||
checkpointer = get_checkpointer(request)
|
||||
|
||||
config: dict[str, Any] = {"configurable": {"thread_id": thread_id}}
|
||||
@ -647,6 +564,7 @@ async def get_thread_history(thread_id: str, body: ThreadHistoryRequest, request
|
||||
config["configurable"]["checkpoint_id"] = body.before
|
||||
|
||||
entries: list[HistoryEntry] = []
|
||||
is_latest_checkpoint = True
|
||||
try:
|
||||
async for checkpoint_tuple in checkpointer.alist(config, limit=body.limit):
|
||||
ckpt_config = getattr(checkpoint_tuple, "config", {})
|
||||
@ -661,22 +579,42 @@ async def get_thread_history(thread_id: str, body: ThreadHistoryRequest, request
|
||||
|
||||
channel_values = checkpoint.get("channel_values", {})
|
||||
|
||||
# Build values from checkpoint channel_values
|
||||
values: dict[str, Any] = {}
|
||||
if title := channel_values.get("title"):
|
||||
values["title"] = title
|
||||
if thread_data := channel_values.get("thread_data"):
|
||||
values["thread_data"] = thread_data
|
||||
|
||||
# Attach messages only to the latest checkpoint entry.
|
||||
if is_latest_checkpoint:
|
||||
messages = channel_values.get("messages")
|
||||
if messages:
|
||||
values["messages"] = serialize_channel_values({"messages": messages}).get("messages", [])
|
||||
is_latest_checkpoint = False
|
||||
|
||||
# Derive next tasks
|
||||
tasks_raw = getattr(checkpoint_tuple, "tasks", []) or []
|
||||
next_tasks = [t.name for t in tasks_raw if hasattr(t, "name")]
|
||||
|
||||
# Strip LangGraph internal keys from metadata
|
||||
user_meta = {k: v for k, v in metadata.items() if k not in ("created_at", "updated_at", "step", "source", "writes", "parents")}
|
||||
# Keep step for ordering context
|
||||
if "step" in metadata:
|
||||
user_meta["step"] = metadata["step"]
|
||||
|
||||
entries.append(
|
||||
HistoryEntry(
|
||||
checkpoint_id=checkpoint_id,
|
||||
parent_checkpoint_id=parent_id,
|
||||
metadata=metadata,
|
||||
values=serialize_channel_values(channel_values),
|
||||
metadata=user_meta,
|
||||
values=values,
|
||||
created_at=str(metadata.get("created_at", "")),
|
||||
next=next_tasks,
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to get history for thread %s", thread_id)
|
||||
logger.exception("Failed to get history for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread history")
|
||||
|
||||
return entries
|
||||
|
||||
@ -4,11 +4,14 @@ import logging
|
||||
import os
|
||||
import stat
|
||||
|
||||
from fastapi import APIRouter, File, HTTPException, UploadFile
|
||||
from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.paths import get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
from deerflow.sandbox.sandbox_provider import SandboxProvider, get_sandbox_provider
|
||||
from deerflow.uploads.manager import (
|
||||
PathTraversalError,
|
||||
@ -58,23 +61,22 @@ def _uses_thread_data_mounts(sandbox_provider: SandboxProvider) -> bool:
|
||||
return bool(getattr(sandbox_provider, "uses_thread_data_mounts", False))
|
||||
|
||||
|
||||
def _get_uploads_config_value(key: str, default: object) -> object:
|
||||
def _get_uploads_config_value(app_config: AppConfig, key: str, default: object) -> object:
|
||||
"""Read a value from the uploads config, supporting dict and attribute access."""
|
||||
cfg = get_app_config()
|
||||
uploads_cfg = getattr(cfg, "uploads", None)
|
||||
uploads_cfg = getattr(app_config, "uploads", None)
|
||||
if isinstance(uploads_cfg, dict):
|
||||
return uploads_cfg.get(key, default)
|
||||
return getattr(uploads_cfg, key, default)
|
||||
|
||||
|
||||
def _auto_convert_documents_enabled() -> bool:
|
||||
def _auto_convert_documents_enabled(app_config: AppConfig) -> bool:
|
||||
"""Return whether automatic host-side document conversion is enabled.
|
||||
|
||||
The secure default is disabled unless an operator explicitly opts in via
|
||||
uploads.auto_convert_documents in config.yaml.
|
||||
"""
|
||||
try:
|
||||
raw = _get_uploads_config_value("auto_convert_documents", False)
|
||||
raw = _get_uploads_config_value(app_config, "auto_convert_documents", False)
|
||||
if isinstance(raw, str):
|
||||
return raw.strip().lower() in {"1", "true", "yes", "on"}
|
||||
return bool(raw)
|
||||
@ -83,9 +85,12 @@ def _auto_convert_documents_enabled() -> bool:
|
||||
|
||||
|
||||
@router.post("", response_model=UploadResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=False)
|
||||
async def upload_files(
|
||||
thread_id: str,
|
||||
request: Request,
|
||||
files: list[UploadFile] = File(...),
|
||||
config: AppConfig = Depends(get_config),
|
||||
) -> UploadResponse:
|
||||
"""Upload multiple files to a thread's uploads directory."""
|
||||
if not files:
|
||||
@ -95,7 +100,7 @@ async def upload_files(
|
||||
uploads_dir = ensure_uploads_dir(thread_id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id)
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id, user_id=get_effective_user_id())
|
||||
uploaded_files = []
|
||||
|
||||
sandbox_provider = get_sandbox_provider()
|
||||
@ -104,7 +109,7 @@ async def upload_files(
|
||||
if sync_to_sandbox:
|
||||
sandbox_id = sandbox_provider.acquire(thread_id)
|
||||
sandbox = sandbox_provider.get(sandbox_id)
|
||||
auto_convert_documents = _auto_convert_documents_enabled()
|
||||
auto_convert_documents = _auto_convert_documents_enabled(config)
|
||||
|
||||
for file in files:
|
||||
if not file.filename:
|
||||
@ -166,7 +171,8 @@ async def upload_files(
|
||||
|
||||
|
||||
@router.get("/list", response_model=dict)
|
||||
async def list_uploaded_files(thread_id: str) -> dict:
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def list_uploaded_files(thread_id: str, request: Request) -> dict:
|
||||
"""List all files in a thread's uploads directory."""
|
||||
try:
|
||||
uploads_dir = get_uploads_dir(thread_id)
|
||||
@ -176,7 +182,7 @@ async def list_uploaded_files(thread_id: str) -> dict:
|
||||
enrich_file_listing(result, thread_id)
|
||||
|
||||
# Gateway additionally includes the sandbox-relative path.
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id)
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id, user_id=get_effective_user_id())
|
||||
for f in result["files"]:
|
||||
f["path"] = str(sandbox_uploads / f["filename"])
|
||||
|
||||
@ -184,7 +190,8 @@ async def list_uploaded_files(thread_id: str) -> dict:
|
||||
|
||||
|
||||
@router.delete("/{filename}")
|
||||
async def delete_uploaded_file(thread_id: str, filename: str) -> dict:
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_uploaded_file(thread_id: str, filename: str, request: Request) -> dict:
|
||||
"""Delete a file from a thread's uploads directory."""
|
||||
try:
|
||||
uploads_dir = get_uploads_dir(thread_id)
|
||||
|
||||
@ -11,14 +11,14 @@ import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_run_manager, get_store, get_stream_bridge
|
||||
from app.gateway.deps import get_run_context, get_run_manager, get_stream_bridge
|
||||
from app.gateway.utils import sanitize_log_param
|
||||
from deerflow.runtime import (
|
||||
END_SENTINEL,
|
||||
HEARTBEAT_SENTINEL,
|
||||
@ -188,71 +188,6 @@ def build_run_config(
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _upsert_thread_in_store(store, thread_id: str, metadata: dict | None) -> None:
|
||||
"""Create or refresh the thread record in the Store.
|
||||
|
||||
Called from :func:`start_run` so that threads created via the stateless
|
||||
``/runs/stream`` endpoint (which never calls ``POST /threads``) still
|
||||
appear in ``/threads/search`` results.
|
||||
"""
|
||||
# Deferred import to avoid circular import with the threads router module.
|
||||
from app.gateway.routers.threads import _store_upsert
|
||||
|
||||
try:
|
||||
await _store_upsert(store, thread_id, metadata=metadata)
|
||||
except Exception:
|
||||
logger.warning("Failed to upsert thread %s in store (non-fatal)", thread_id)
|
||||
|
||||
|
||||
async def _sync_thread_title_after_run(
|
||||
run_task: asyncio.Task,
|
||||
thread_id: str,
|
||||
checkpointer: Any,
|
||||
store: Any,
|
||||
) -> None:
|
||||
"""Wait for *run_task* to finish, then persist the generated title to the Store.
|
||||
|
||||
TitleMiddleware writes the generated title to the LangGraph agent state
|
||||
(checkpointer) but the Gateway's Store record is not updated automatically.
|
||||
This coroutine closes that gap by reading the final checkpoint after the
|
||||
run completes and syncing ``values.title`` into the Store record so that
|
||||
subsequent ``/threads/search`` responses include the correct title.
|
||||
|
||||
Runs as a fire-and-forget :func:`asyncio.create_task`; failures are
|
||||
logged at DEBUG level and never propagate.
|
||||
"""
|
||||
# Wait for the background run task to complete (any outcome).
|
||||
# asyncio.wait does not propagate task exceptions — it just returns
|
||||
# when the task is done, cancelled, or failed.
|
||||
await asyncio.wait({run_task})
|
||||
|
||||
# Deferred import to avoid circular import with the threads router module.
|
||||
from app.gateway.routers.threads import _store_get, _store_put
|
||||
|
||||
try:
|
||||
ckpt_config = {"configurable": {"thread_id": thread_id, "checkpoint_ns": ""}}
|
||||
ckpt_tuple = await checkpointer.aget_tuple(ckpt_config)
|
||||
if ckpt_tuple is None:
|
||||
return
|
||||
|
||||
channel_values = ckpt_tuple.checkpoint.get("channel_values", {})
|
||||
title = channel_values.get("title")
|
||||
if not title:
|
||||
return
|
||||
|
||||
existing = await _store_get(store, thread_id)
|
||||
if existing is None:
|
||||
return
|
||||
|
||||
updated = dict(existing)
|
||||
updated.setdefault("values", {})["title"] = title
|
||||
updated["updated_at"] = time.time()
|
||||
await _store_put(store, updated)
|
||||
logger.debug("Synced title %r for thread %s", title, thread_id)
|
||||
except Exception:
|
||||
logger.debug("Failed to sync title for thread %s (non-fatal)", thread_id, exc_info=True)
|
||||
|
||||
|
||||
async def start_run(
|
||||
body: Any,
|
||||
thread_id: str,
|
||||
@ -272,8 +207,7 @@ async def start_run(
|
||||
"""
|
||||
bridge = get_stream_bridge(request)
|
||||
run_mgr = get_run_manager(request)
|
||||
checkpointer = get_checkpointer(request)
|
||||
store = get_store(request)
|
||||
run_ctx = get_run_context(request)
|
||||
|
||||
disconnect = DisconnectMode.cancel if body.on_disconnect == "cancel" else DisconnectMode.continue_
|
||||
|
||||
@ -291,11 +225,21 @@ async def start_run(
|
||||
except UnsupportedStrategyError as exc:
|
||||
raise HTTPException(status_code=501, detail=str(exc)) from exc
|
||||
|
||||
# Ensure the thread is visible in /threads/search, even for threads that
|
||||
# were never explicitly created via POST /threads (e.g. stateless runs).
|
||||
store = get_store(request)
|
||||
if store is not None:
|
||||
await _upsert_thread_in_store(store, thread_id, body.metadata)
|
||||
# Upsert thread metadata so the thread appears in /threads/search,
|
||||
# even for threads that were never explicitly created via POST /threads
|
||||
# (e.g. stateless runs).
|
||||
try:
|
||||
existing = await run_ctx.thread_store.get(thread_id)
|
||||
if existing is None:
|
||||
await run_ctx.thread_store.create(
|
||||
thread_id,
|
||||
assistant_id=body.assistant_id,
|
||||
metadata=body.metadata,
|
||||
)
|
||||
else:
|
||||
await run_ctx.thread_store.update_status(thread_id, "running")
|
||||
except Exception:
|
||||
logger.warning("Failed to upsert thread_meta for %s (non-fatal)", sanitize_log_param(thread_id))
|
||||
|
||||
agent_factory = resolve_agent_factory(body.assistant_id)
|
||||
graph_input = normalize_input(body.input)
|
||||
@ -330,8 +274,7 @@ async def start_run(
|
||||
bridge,
|
||||
run_mgr,
|
||||
record,
|
||||
checkpointer=checkpointer,
|
||||
store=store,
|
||||
ctx=run_ctx,
|
||||
agent_factory=agent_factory,
|
||||
graph_input=graph_input,
|
||||
config=config,
|
||||
@ -343,11 +286,9 @@ async def start_run(
|
||||
)
|
||||
record.task = task
|
||||
|
||||
# After the run completes, sync the title generated by TitleMiddleware from
|
||||
# the checkpointer into the Store record so that /threads/search returns the
|
||||
# correct title instead of an empty values dict.
|
||||
if store is not None:
|
||||
asyncio.create_task(_sync_thread_title_after_run(task, thread_id, checkpointer, store))
|
||||
# Title sync is handled by worker.py's finally block which reads the
|
||||
# title from the checkpoint and calls thread_store.update_display_name
|
||||
# after the run completes.
|
||||
|
||||
return record
|
||||
|
||||
|
||||
6
backend/app/gateway/utils.py
Normal file
6
backend/app/gateway/utils.py
Normal file
@ -0,0 +1,6 @@
|
||||
"""Shared utility helpers for the Gateway layer."""
|
||||
|
||||
|
||||
def sanitize_log_param(value: str) -> str:
|
||||
"""Strip control characters to prevent log injection."""
|
||||
return value.replace("\n", "").replace("\r", "").replace("\x00", "")
|
||||
77
backend/docs/AUTH_TEST_DOCKER_GAP.md
Normal file
77
backend/docs/AUTH_TEST_DOCKER_GAP.md
Normal file
@ -0,0 +1,77 @@
|
||||
# Docker Test Gap (Section 七 7.4)
|
||||
|
||||
This file documents the only **un-executed** test cases from
|
||||
`backend/docs/AUTH_TEST_PLAN.md` after the full release validation pass.
|
||||
|
||||
## Why this gap exists
|
||||
|
||||
The release validation environment (sg_dev: `10.251.229.92`) **does not have
|
||||
a Docker daemon installed**. The TC-DOCKER cases are container-runtime
|
||||
behavior tests that need an actual Docker engine to spin up
|
||||
`docker/docker-compose.yaml` services.
|
||||
|
||||
```bash
|
||||
$ ssh sg_dev "which docker; docker --version"
|
||||
# (empty)
|
||||
# bash: docker: command not found
|
||||
```
|
||||
|
||||
All other test plan sections were executed against either:
|
||||
- The local dev box (Mac, all services running locally), or
|
||||
- The deployed sg_dev instance (gateway + frontend + nginx via SSH tunnel)
|
||||
|
||||
## Cases not executed
|
||||
|
||||
| Case | Title | What it covers | Why not run |
|
||||
|---|---|---|---|
|
||||
| TC-DOCKER-01 | `users.db` volume persistence | Verify the `DEER_FLOW_HOME` bind mount survives container restart | needs `docker compose up` |
|
||||
| TC-DOCKER-02 | Session persistence across container restart | `AUTH_JWT_SECRET` env var keeps cookies valid after `docker compose down && up` | needs `docker compose down/up` |
|
||||
| TC-DOCKER-03 | Per-worker rate limiter divergence | Confirms in-process `_login_attempts` dict doesn't share state across `gunicorn` workers (4 by default in the compose file); known limitation, documented | needs multi-worker container |
|
||||
| TC-DOCKER-04 | IM channels skip AuthMiddleware | Verify Feishu/Slack/Telegram dispatchers run in-container against `http://langgraph:2024` without going through nginx | needs `docker logs` |
|
||||
| TC-DOCKER-05 | Admin credentials surfacing | **Updated post-simplify** — was "log scrape", now "0600 credential file in `DEER_FLOW_HOME`". The file-based behavior is already validated by TC-1.1 + TC-UPG-13 on sg_dev (non-Docker), so the only Docker-specific gap is verifying the volume mount carries the file out to the host | needs container + host volume |
|
||||
| TC-DOCKER-06 | Gateway-mode Docker deploy | `./scripts/deploy.sh --gateway` produces a 3-container topology (no `langgraph` container); same auth flow as standard mode | needs `docker compose --profile gateway` |
|
||||
|
||||
## Coverage already provided by non-Docker tests
|
||||
|
||||
The **auth-relevant** behavior in each Docker case is already exercised by
|
||||
the test cases that ran on sg_dev or local:
|
||||
|
||||
| Docker case | Auth behavior covered by |
|
||||
|---|---|
|
||||
| TC-DOCKER-01 (volume persistence) | TC-REENT-01 on sg_dev (admin row survives gateway restart) — same SQLite file, just no container layer between |
|
||||
| TC-DOCKER-02 (session persistence) | TC-API-02/03/06 (cookie roundtrip), plus TC-REENT-04 (multi-cookie) — JWT verification is process-state-free, container restart is equivalent to `pkill uvicorn && uv run uvicorn` |
|
||||
| TC-DOCKER-03 (per-worker rate limit) | TC-GW-04 + TC-REENT-09 (single-worker rate limit + 5min expiry). The cross-worker divergence is an architectural property of the in-memory dict; no auth code path differs |
|
||||
| TC-DOCKER-04 (IM channels skip auth) | Code-level only: `app/channels/manager.py` uses `langgraph_sdk` directly with no cookie handling. The langgraph_auth handler is bypassed by going through SDK, not HTTP |
|
||||
| TC-DOCKER-05 (credential surfacing) | TC-1.1 on sg_dev (file at `~/deer-flow/backend/.deer-flow/admin_initial_credentials.txt`, mode 0600, password 22 chars) — the only Docker-unique step is whether the bind mount projects this path onto the host, which is a `docker compose` config check, not a runtime behavior change |
|
||||
| TC-DOCKER-06 (gateway-mode container) | Section 七 7.2 covered by TC-GW-01..05 + Section 二 (gateway-mode auth flow on sg_dev) — same Gateway code, container is just a packaging change |
|
||||
|
||||
## Reproduction steps when Docker becomes available
|
||||
|
||||
Anyone with `docker` + `docker compose` installed can reproduce the gap by
|
||||
running the test plan section verbatim. Pre-flight:
|
||||
|
||||
```bash
|
||||
# Required on the host
|
||||
docker --version # >=24.x
|
||||
docker compose version # plugin >=2.x
|
||||
|
||||
# Required env var (otherwise sessions reset on every container restart)
|
||||
echo "AUTH_JWT_SECRET=$(python3 -c 'import secrets; print(secrets.token_urlsafe(32))')" \
|
||||
>> .env
|
||||
|
||||
# Optional: pin DEER_FLOW_HOME to a stable host path
|
||||
echo "DEER_FLOW_HOME=$HOME/deer-flow-data" >> .env
|
||||
```
|
||||
|
||||
Then run TC-DOCKER-01..06 from the test plan as written.
|
||||
|
||||
## Decision log
|
||||
|
||||
- **Not blocking the release.** The auth-relevant behavior in every Docker
|
||||
case has an already-validated equivalent on bare metal. The gap is purely
|
||||
about *container packaging* details (bind mounts, multi-worker, log
|
||||
collection), not about whether the auth code paths work.
|
||||
- **TC-DOCKER-05 was updated in place** in `AUTH_TEST_PLAN.md` to reflect
|
||||
the post-simplify reality (credentials file → 0600 file, no log leak).
|
||||
The old "grep 'Password:' in docker logs" expectation would have failed
|
||||
silently and given a false sense of coverage.
|
||||
1801
backend/docs/AUTH_TEST_PLAN.md
Normal file
1801
backend/docs/AUTH_TEST_PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
129
backend/docs/AUTH_UPGRADE.md
Normal file
129
backend/docs/AUTH_UPGRADE.md
Normal file
@ -0,0 +1,129 @@
|
||||
# Authentication Upgrade Guide
|
||||
|
||||
DeerFlow 内置了认证模块。本文档面向从无认证版本升级的用户。
|
||||
|
||||
## 核心概念
|
||||
|
||||
认证模块采用**始终强制**策略:
|
||||
|
||||
- 首次启动时自动创建 admin 账号,随机密码打印到控制台日志
|
||||
- 认证从一开始就是强制的,无竞争窗口
|
||||
- 历史对话(升级前创建的 thread)自动迁移到 admin 名下
|
||||
|
||||
## 升级步骤
|
||||
|
||||
### 1. 更新代码
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
cd backend && make install
|
||||
```
|
||||
|
||||
### 2. 首次启动
|
||||
|
||||
```bash
|
||||
make dev
|
||||
```
|
||||
|
||||
控制台会输出:
|
||||
|
||||
```
|
||||
============================================================
|
||||
Admin account created on first boot
|
||||
Email: admin@deerflow.dev
|
||||
Password: aB3xK9mN_pQ7rT2w
|
||||
Change it after login: Settings → Account
|
||||
============================================================
|
||||
```
|
||||
|
||||
如果未登录就重启了服务,不用担心——只要 setup 未完成,每次启动都会重置密码并重新打印到控制台。
|
||||
|
||||
### 3. 登录
|
||||
|
||||
访问 `http://localhost:2026/login`,使用控制台输出的邮箱和密码登录。
|
||||
|
||||
### 4. 修改密码
|
||||
|
||||
登录后进入 Settings → Account → Change Password。
|
||||
|
||||
### 5. 添加用户(可选)
|
||||
|
||||
其他用户通过 `/login` 页面注册,自动获得 **user** 角色。每个用户只能看到自己的对话。
|
||||
|
||||
## 安全机制
|
||||
|
||||
| 机制 | 说明 |
|
||||
|------|------|
|
||||
| JWT HttpOnly Cookie | Token 不暴露给 JavaScript,防止 XSS 窃取 |
|
||||
| CSRF Double Submit Cookie | 所有 POST/PUT/DELETE 请求需携带 `X-CSRF-Token` |
|
||||
| bcrypt 密码哈希 | 密码不以明文存储 |
|
||||
| 多租户隔离 | 用户只能访问自己的 thread |
|
||||
| HTTPS 自适应 | 检测 `x-forwarded-proto`,自动设置 `Secure` cookie 标志 |
|
||||
|
||||
## 常见操作
|
||||
|
||||
### 忘记密码
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
# 重置 admin 密码
|
||||
python -m app.gateway.auth.reset_admin
|
||||
|
||||
# 重置指定用户密码
|
||||
python -m app.gateway.auth.reset_admin --email user@example.com
|
||||
```
|
||||
|
||||
会输出新的随机密码。
|
||||
|
||||
### 完全重置
|
||||
|
||||
删除用户数据库,重启后自动创建新 admin:
|
||||
|
||||
```bash
|
||||
rm -f backend/.deer-flow/users.db
|
||||
# 重启服务,控制台输出新密码
|
||||
```
|
||||
|
||||
## 数据存储
|
||||
|
||||
| 文件 | 内容 |
|
||||
|------|------|
|
||||
| `.deer-flow/users.db` | SQLite 用户数据库(密码哈希、角色) |
|
||||
| `.env` 中的 `AUTH_JWT_SECRET` | JWT 签名密钥(未设置时自动生成临时密钥,重启后 session 失效) |
|
||||
|
||||
### 生产环境建议
|
||||
|
||||
```bash
|
||||
# 生成持久化 JWT 密钥,避免重启后所有用户需重新登录
|
||||
python -c "import secrets; print(secrets.token_urlsafe(32))"
|
||||
# 将输出添加到 .env:
|
||||
# AUTH_JWT_SECRET=<生成的密钥>
|
||||
```
|
||||
|
||||
## API 端点
|
||||
|
||||
| 端点 | 方法 | 说明 |
|
||||
|------|------|------|
|
||||
| `/api/v1/auth/login/local` | POST | 邮箱密码登录(OAuth2 form) |
|
||||
| `/api/v1/auth/register` | POST | 注册新用户(user 角色) |
|
||||
| `/api/v1/auth/logout` | POST | 登出(清除 cookie) |
|
||||
| `/api/v1/auth/me` | GET | 获取当前用户信息 |
|
||||
| `/api/v1/auth/change-password` | POST | 修改密码 |
|
||||
| `/api/v1/auth/setup-status` | GET | 检查 admin 是否存在 |
|
||||
|
||||
## 兼容性
|
||||
|
||||
- **标准模式**(`make dev`):完全兼容,admin 自动创建
|
||||
- **Gateway 模式**(`make dev-pro`):完全兼容
|
||||
- **Docker 部署**:完全兼容,`.deer-flow/users.db` 需持久化卷挂载
|
||||
- **IM 渠道**(Feishu/Slack/Telegram):通过 LangGraph SDK 通信,不经过认证层
|
||||
- **DeerFlowClient**(嵌入式):不经过 HTTP,不受认证影响
|
||||
|
||||
## 故障排查
|
||||
|
||||
| 症状 | 原因 | 解决 |
|
||||
|------|------|------|
|
||||
| 启动后没看到密码 | admin 已存在(非首次启动) | 用 `reset_admin` 重置,或删 `users.db` |
|
||||
| 登录后 POST 返回 403 | CSRF token 缺失 | 确认前端已更新 |
|
||||
| 重启后需要重新登录 | `AUTH_JWT_SECRET` 未持久化 | 在 `.env` 中设置固定密钥 |
|
||||
@ -277,7 +277,7 @@ LangGraph Server 只需要 harness 包。`langgraph.json` 更新:
|
||||
"lead_agent": "deerflow.agents:make_lead_agent"
|
||||
},
|
||||
"checkpointer": {
|
||||
"path": "./packages/harness/deerflow/agents/checkpointer/async_provider.py:make_checkpointer"
|
||||
"path": "./packages/harness/deerflow/runtime/checkpointer/async_provider.py:make_checkpointer"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@ -124,7 +124,7 @@ title:
|
||||
# checkpointer.py
|
||||
from langgraph.checkpoint.sqlite import SqliteSaver
|
||||
|
||||
checkpointer = SqliteSaver.from_conn_string("checkpoints.db")
|
||||
checkpointer = SqliteSaver.from_conn_string("deerflow.db")
|
||||
```
|
||||
|
||||
```json
|
||||
|
||||
@ -8,7 +8,10 @@
|
||||
"graphs": {
|
||||
"lead_agent": "deerflow.agents:make_lead_agent"
|
||||
},
|
||||
"auth": {
|
||||
"path": "./app/gateway/langgraph_auth.py:auth"
|
||||
},
|
||||
"checkpointer": {
|
||||
"path": "./packages/harness/deerflow/agents/checkpointer/async_provider.py:make_checkpointer"
|
||||
"path": "./packages/harness/deerflow/runtime/checkpointer/async_provider.py:make_checkpointer"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
from .checkpointer import get_checkpointer, make_checkpointer, reset_checkpointer
|
||||
from .factory import create_deerflow_agent
|
||||
from .features import Next, Prev, RuntimeFeatures
|
||||
from .lead_agent import make_lead_agent
|
||||
@ -18,7 +17,4 @@ __all__ = [
|
||||
"make_lead_agent",
|
||||
"SandboxState",
|
||||
"ThreadState",
|
||||
"get_checkpointer",
|
||||
"reset_checkpointer",
|
||||
"make_checkpointer",
|
||||
]
|
||||
|
||||
@ -18,7 +18,7 @@ from deerflow.agents.middlewares.tool_error_handling_middleware import build_lea
|
||||
from deerflow.agents.middlewares.view_image_middleware import ViewImageMiddleware
|
||||
from deerflow.agents.thread_state import ThreadState
|
||||
from deerflow.config.agents_config import load_agent_config, validate_agent_name
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from deerflow.config.app_config import AppConfig, get_app_config
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.config.summarization_config import get_summarization_config
|
||||
from deerflow.models import create_chat_model
|
||||
@ -35,9 +35,9 @@ def _get_runtime_config(config: RunnableConfig) -> dict:
|
||||
return cfg
|
||||
|
||||
|
||||
def _resolve_model_name(requested_model_name: str | None = None) -> str:
|
||||
def _resolve_model_name(requested_model_name: str | None = None, *, app_config: AppConfig | None = None) -> str:
|
||||
"""Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured."""
|
||||
app_config = get_app_config()
|
||||
app_config = app_config or get_app_config()
|
||||
default_model_name = app_config.models[0].name if app_config.models else None
|
||||
if default_model_name is None:
|
||||
raise ValueError("No chat models are configured. Please configure at least one model in config.yaml.")
|
||||
@ -50,7 +50,7 @@ def _resolve_model_name(requested_model_name: str | None = None) -> str:
|
||||
return default_model_name
|
||||
|
||||
|
||||
def _create_summarization_middleware() -> DeerFlowSummarizationMiddleware | None:
|
||||
def _create_summarization_middleware(*, app_config: AppConfig | None = None) -> DeerFlowSummarizationMiddleware | None:
|
||||
"""Create and configure the summarization middleware from config."""
|
||||
config = get_summarization_config()
|
||||
|
||||
@ -68,13 +68,15 @@ def _create_summarization_middleware() -> DeerFlowSummarizationMiddleware | None
|
||||
# Prepare keep parameter
|
||||
keep = config.keep.to_tuple()
|
||||
|
||||
# Prepare model parameter
|
||||
# Prepare model parameter.
|
||||
# Bind "middleware:summarize" tag so RunJournal identifies these LLM calls
|
||||
# as middleware rather than lead_agent (SummarizationMiddleware is a
|
||||
# LangChain built-in, so we tag the model at creation time).
|
||||
if config.model_name:
|
||||
model = create_chat_model(name=config.model_name, thinking_enabled=False)
|
||||
model = create_chat_model(name=config.model_name, thinking_enabled=False, app_config=app_config)
|
||||
else:
|
||||
# Use a lightweight model for summarization to save costs
|
||||
# Falls back to default model if not explicitly specified
|
||||
model = create_chat_model(thinking_enabled=False)
|
||||
model = create_chat_model(thinking_enabled=False, app_config=app_config)
|
||||
model = model.with_config(tags=["middleware:summarize"])
|
||||
|
||||
# Prepare kwargs
|
||||
kwargs = {
|
||||
@ -97,7 +99,8 @@ def _create_summarization_middleware() -> DeerFlowSummarizationMiddleware | None
|
||||
# the sole entry point for DeerFlowSummarizationMiddleware, and the runtime
|
||||
# config is not expected to change after startup.
|
||||
try:
|
||||
skills_container_path = get_app_config().skills.container_path or "/mnt/skills"
|
||||
resolved_app_config = app_config or get_app_config()
|
||||
skills_container_path = resolved_app_config.skills.container_path or "/mnt/skills"
|
||||
except Exception:
|
||||
logger.exception("Failed to resolve skills container path; falling back to default")
|
||||
skills_container_path = "/mnt/skills"
|
||||
@ -238,7 +241,14 @@ Being proactive with task management demonstrates thoroughness and ensures all r
|
||||
# ViewImageMiddleware should be before ClarificationMiddleware to inject image details before LLM
|
||||
# ToolErrorHandlingMiddleware should be before ClarificationMiddleware to convert tool exceptions to ToolMessages
|
||||
# ClarificationMiddleware should be last to intercept clarification requests after model calls
|
||||
def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_name: str | None = None, custom_middlewares: list[AgentMiddleware] | None = None):
|
||||
def _build_middlewares(
|
||||
config: RunnableConfig,
|
||||
model_name: str | None,
|
||||
agent_name: str | None = None,
|
||||
custom_middlewares: list[AgentMiddleware] | None = None,
|
||||
*,
|
||||
app_config: AppConfig | None = None,
|
||||
):
|
||||
"""Build middleware chain based on runtime configuration.
|
||||
|
||||
Args:
|
||||
@ -250,9 +260,10 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
List of middleware instances.
|
||||
"""
|
||||
middlewares = build_lead_runtime_middlewares(lazy_init=True)
|
||||
resolved_app_config = app_config or get_app_config()
|
||||
|
||||
# Add summarization middleware if enabled
|
||||
summarization_middleware = _create_summarization_middleware()
|
||||
summarization_middleware = _create_summarization_middleware(app_config=resolved_app_config)
|
||||
if summarization_middleware is not None:
|
||||
middlewares.append(summarization_middleware)
|
||||
|
||||
@ -264,7 +275,7 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
middlewares.append(todo_list_middleware)
|
||||
|
||||
# Add TokenUsageMiddleware when token_usage tracking is enabled
|
||||
if get_app_config().token_usage.enabled:
|
||||
if resolved_app_config.token_usage.enabled:
|
||||
middlewares.append(TokenUsageMiddleware())
|
||||
|
||||
# Add TitleMiddleware
|
||||
@ -275,13 +286,12 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
|
||||
# Add ViewImageMiddleware only if the current model supports vision.
|
||||
# Use the resolved runtime model_name from make_lead_agent to avoid stale config values.
|
||||
app_config = get_app_config()
|
||||
model_config = app_config.get_model_config(model_name) if model_name else None
|
||||
model_config = resolved_app_config.get_model_config(model_name) if model_name else None
|
||||
if model_config is not None and model_config.supports_vision:
|
||||
middlewares.append(ViewImageMiddleware())
|
||||
|
||||
# Add DeferredToolFilterMiddleware to hide deferred tool schemas from model binding
|
||||
if app_config.tool_search.enabled:
|
||||
if resolved_app_config.tool_search.enabled:
|
||||
from deerflow.agents.middlewares.deferred_tool_filter_middleware import DeferredToolFilterMiddleware
|
||||
|
||||
middlewares.append(DeferredToolFilterMiddleware())
|
||||
@ -304,12 +314,13 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
return middlewares
|
||||
|
||||
|
||||
def make_lead_agent(config: RunnableConfig):
|
||||
def make_lead_agent(config: RunnableConfig, app_config: AppConfig | None = None):
|
||||
# Lazy import to avoid circular dependency
|
||||
from deerflow.tools import get_available_tools
|
||||
from deerflow.tools.builtins import setup_agent
|
||||
|
||||
cfg = _get_runtime_config(config)
|
||||
resolved_app_config = app_config or get_app_config()
|
||||
|
||||
thinking_enabled = cfg.get("thinking_enabled", True)
|
||||
reasoning_effort = cfg.get("reasoning_effort", None)
|
||||
@ -325,10 +336,9 @@ def make_lead_agent(config: RunnableConfig):
|
||||
agent_model_name = agent_config.model if agent_config and agent_config.model else None
|
||||
|
||||
# Final model name resolution: request → agent config → global default, with fallback for unknown names
|
||||
model_name = _resolve_model_name(requested_model_name or agent_model_name)
|
||||
model_name = _resolve_model_name(requested_model_name or agent_model_name, app_config=resolved_app_config)
|
||||
|
||||
app_config = get_app_config()
|
||||
model_config = app_config.get_model_config(model_name)
|
||||
model_config = resolved_app_config.get_model_config(model_name)
|
||||
|
||||
if model_config is None:
|
||||
raise ValueError("No chat model could be resolved. Please configure at least one model in config.yaml or provide a valid 'model_name'/'model' in the request.")
|
||||
@ -367,20 +377,34 @@ def make_lead_agent(config: RunnableConfig):
|
||||
if is_bootstrap:
|
||||
# Special bootstrap agent with minimal prompt for initial custom agent creation flow
|
||||
return create_agent(
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled),
|
||||
tools=get_available_tools(model_name=model_name, subagent_enabled=subagent_enabled) + [setup_agent],
|
||||
middleware=_build_middlewares(config, model_name=model_name),
|
||||
system_prompt=apply_prompt_template(subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, available_skills=set(["bootstrap"])),
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled, app_config=resolved_app_config),
|
||||
tools=get_available_tools(model_name=model_name, subagent_enabled=subagent_enabled, app_config=resolved_app_config) + [setup_agent],
|
||||
middleware=_build_middlewares(config, model_name=model_name, app_config=resolved_app_config),
|
||||
system_prompt=apply_prompt_template(
|
||||
subagent_enabled=subagent_enabled,
|
||||
max_concurrent_subagents=max_concurrent_subagents,
|
||||
available_skills=set(["bootstrap"]),
|
||||
app_config=resolved_app_config,
|
||||
),
|
||||
state_schema=ThreadState,
|
||||
)
|
||||
|
||||
# Default lead agent (unchanged behavior)
|
||||
return create_agent(
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled, reasoning_effort=reasoning_effort),
|
||||
tools=get_available_tools(model_name=model_name, groups=agent_config.tool_groups if agent_config else None, subagent_enabled=subagent_enabled),
|
||||
middleware=_build_middlewares(config, model_name=model_name, agent_name=agent_name),
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled, reasoning_effort=reasoning_effort, app_config=resolved_app_config),
|
||||
tools=get_available_tools(
|
||||
model_name=model_name,
|
||||
groups=agent_config.tool_groups if agent_config else None,
|
||||
subagent_enabled=subagent_enabled,
|
||||
app_config=resolved_app_config,
|
||||
),
|
||||
middleware=_build_middlewares(config, model_name=model_name, agent_name=agent_name, app_config=resolved_app_config),
|
||||
system_prompt=apply_prompt_template(
|
||||
subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, agent_name=agent_name, available_skills=set(agent_config.skills) if agent_config and agent_config.skills is not None else None
|
||||
subagent_enabled=subagent_enabled,
|
||||
max_concurrent_subagents=max_concurrent_subagents,
|
||||
agent_name=agent_name,
|
||||
available_skills=set(agent_config.skills) if agent_config and agent_config.skills is not None else None,
|
||||
app_config=resolved_app_config,
|
||||
),
|
||||
state_schema=ThreadState,
|
||||
)
|
||||
|
||||
@ -1,14 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from functools import lru_cache
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from deerflow.config.agents_config import load_agent_soul
|
||||
from deerflow.skills import load_skills
|
||||
from deerflow.skills.types import Skill
|
||||
from deerflow.subagents import get_available_subagent_names
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_ENABLED_SKILLS_REFRESH_WAIT_TIMEOUT_SECONDS = 5.0
|
||||
@ -111,6 +117,19 @@ def _get_enabled_skills():
|
||||
return []
|
||||
|
||||
|
||||
def _get_enabled_skills_for_config(app_config: AppConfig | None = None) -> list[Skill]:
|
||||
"""Return enabled skills using the caller's config source.
|
||||
|
||||
When a concrete ``app_config`` is supplied, bypass the global enabled-skills
|
||||
cache so the skill list and skill paths are resolved from the same config
|
||||
object. This keeps request-scoped config injection consistent even while the
|
||||
release branch still supports global fallback paths.
|
||||
"""
|
||||
if app_config is None:
|
||||
return _get_enabled_skills()
|
||||
return list(load_skills(enabled_only=True, app_config=app_config))
|
||||
|
||||
|
||||
def _skill_mutability_label(category: str) -> str:
|
||||
return "[custom, editable]" if category == "custom" else "[built-in]"
|
||||
|
||||
@ -123,31 +142,6 @@ async def refresh_skills_system_prompt_cache_async() -> None:
|
||||
await asyncio.to_thread(_invalidate_enabled_skills_cache().wait)
|
||||
|
||||
|
||||
def _reset_skills_system_prompt_cache_state() -> None:
|
||||
global _enabled_skills_cache, _enabled_skills_refresh_active, _enabled_skills_refresh_version
|
||||
|
||||
_get_cached_skills_prompt_section.cache_clear()
|
||||
with _enabled_skills_lock:
|
||||
_enabled_skills_cache = None
|
||||
_enabled_skills_refresh_active = False
|
||||
_enabled_skills_refresh_version = 0
|
||||
_enabled_skills_refresh_event.clear()
|
||||
|
||||
|
||||
def _refresh_enabled_skills_cache() -> None:
|
||||
"""Backward-compatible test helper for direct synchronous reload."""
|
||||
try:
|
||||
skills = _load_enabled_skills_sync()
|
||||
except Exception:
|
||||
logger.exception("Failed to load enabled skills for prompt injection")
|
||||
skills = []
|
||||
|
||||
with _enabled_skills_lock:
|
||||
_enabled_skills_cache = skills
|
||||
_enabled_skills_refresh_active = False
|
||||
_enabled_skills_refresh_event.set()
|
||||
|
||||
|
||||
def _build_skill_evolution_section(skill_evolution_enabled: bool) -> str:
|
||||
if not skill_evolution_enabled:
|
||||
return ""
|
||||
@ -548,12 +542,13 @@ def _get_memory_context(agent_name: str | None = None) -> str:
|
||||
try:
|
||||
from deerflow.agents.memory import format_memory_for_injection, get_memory_data
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
config = get_memory_config()
|
||||
if not config.enabled or not config.injection_enabled:
|
||||
return ""
|
||||
|
||||
memory_data = get_memory_data(agent_name)
|
||||
memory_data = get_memory_data(agent_name, user_id=get_effective_user_id())
|
||||
memory_content = format_memory_for_injection(memory_data, max_tokens=config.max_injection_tokens)
|
||||
|
||||
if not memory_content.strip():
|
||||
@ -600,14 +595,14 @@ You have access to skills that provide optimized workflows for specific tasks. E
|
||||
</skill_system>"""
|
||||
|
||||
|
||||
def get_skills_prompt_section(available_skills: set[str] | None = None) -> str:
|
||||
def get_skills_prompt_section(available_skills: set[str] | None = None, *, app_config: AppConfig | None = None) -> str:
|
||||
"""Generate the skills prompt section with available skills list."""
|
||||
skills = _get_enabled_skills()
|
||||
skills = _get_enabled_skills_for_config(app_config)
|
||||
|
||||
try:
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
config = get_app_config()
|
||||
config = app_config or get_app_config()
|
||||
container_base_path = config.skills.container_path
|
||||
skill_evolution_enabled = config.skill_evolution.enabled
|
||||
except Exception:
|
||||
@ -636,7 +631,7 @@ def get_agent_soul(agent_name: str | None) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
def get_deferred_tools_prompt_section() -> str:
|
||||
def get_deferred_tools_prompt_section(*, app_config: AppConfig | None = None) -> str:
|
||||
"""Generate <available-deferred-tools> block for the system prompt.
|
||||
|
||||
Lists only deferred tool names so the agent knows what exists
|
||||
@ -648,7 +643,8 @@ def get_deferred_tools_prompt_section() -> str:
|
||||
try:
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
if not get_app_config().tool_search.enabled:
|
||||
config = app_config or get_app_config()
|
||||
if not config.tool_search.enabled:
|
||||
return ""
|
||||
except Exception:
|
||||
return ""
|
||||
@ -681,12 +677,13 @@ def _build_acp_section() -> str:
|
||||
)
|
||||
|
||||
|
||||
def _build_custom_mounts_section() -> str:
|
||||
def _build_custom_mounts_section(*, app_config: AppConfig | None = None) -> str:
|
||||
"""Build a prompt section for explicitly configured sandbox mounts."""
|
||||
try:
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
mounts = get_app_config().sandbox.mounts or []
|
||||
config = app_config or get_app_config()
|
||||
mounts = config.sandbox.mounts or []
|
||||
except Exception:
|
||||
logger.exception("Failed to load configured sandbox mounts for the lead-agent prompt")
|
||||
return ""
|
||||
@ -703,7 +700,14 @@ def _build_custom_mounts_section() -> str:
|
||||
return f"\n**Custom Mounted Directories:**\n{mounts_list}\n- If the user needs files outside `/mnt/user-data`, use these absolute container paths directly when they match the requested directory"
|
||||
|
||||
|
||||
def apply_prompt_template(subagent_enabled: bool = False, max_concurrent_subagents: int = 3, *, agent_name: str | None = None, available_skills: set[str] | None = None) -> str:
|
||||
def apply_prompt_template(
|
||||
subagent_enabled: bool = False,
|
||||
max_concurrent_subagents: int = 3,
|
||||
*,
|
||||
agent_name: str | None = None,
|
||||
available_skills: set[str] | None = None,
|
||||
app_config: AppConfig | None = None,
|
||||
) -> str:
|
||||
# Get memory context
|
||||
memory_context = _get_memory_context(agent_name)
|
||||
|
||||
@ -730,14 +734,14 @@ def apply_prompt_template(subagent_enabled: bool = False, max_concurrent_subagen
|
||||
)
|
||||
|
||||
# Get skills section
|
||||
skills_section = get_skills_prompt_section(available_skills)
|
||||
skills_section = get_skills_prompt_section(available_skills, app_config=app_config)
|
||||
|
||||
# Get deferred tools section (tool_search)
|
||||
deferred_tools_section = get_deferred_tools_prompt_section()
|
||||
deferred_tools_section = get_deferred_tools_prompt_section(app_config=app_config)
|
||||
|
||||
# Build ACP agent section only if ACP agents are configured
|
||||
acp_section = _build_acp_section()
|
||||
custom_mounts_section = _build_custom_mounts_section()
|
||||
custom_mounts_section = _build_custom_mounts_section(app_config=app_config)
|
||||
acp_and_mounts_section = "\n".join(section for section in (acp_section, custom_mounts_section) if section)
|
||||
|
||||
# Format the prompt with dynamic skills and memory
|
||||
|
||||
@ -20,6 +20,7 @@ class ConversationContext:
|
||||
messages: list[Any]
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
|
||||
agent_name: str | None = None
|
||||
user_id: str | None = None
|
||||
correction_detected: bool = False
|
||||
reinforcement_detected: bool = False
|
||||
|
||||
@ -44,6 +45,7 @@ class MemoryUpdateQueue:
|
||||
thread_id: str,
|
||||
messages: list[Any],
|
||||
agent_name: str | None = None,
|
||||
user_id: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
) -> None:
|
||||
@ -53,6 +55,9 @@ class MemoryUpdateQueue:
|
||||
thread_id: The thread ID.
|
||||
messages: The conversation messages.
|
||||
agent_name: If provided, memory is stored per-agent. If None, uses global memory.
|
||||
user_id: The user ID captured at enqueue time. Stored in ConversationContext so it
|
||||
survives the threading.Timer boundary (ContextVar does not propagate across
|
||||
raw threads).
|
||||
correction_detected: Whether recent turns include an explicit correction signal.
|
||||
reinforcement_detected: Whether recent turns include a positive reinforcement signal.
|
||||
"""
|
||||
@ -65,6 +70,7 @@ class MemoryUpdateQueue:
|
||||
thread_id=thread_id,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
user_id=user_id,
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
)
|
||||
@ -77,6 +83,7 @@ class MemoryUpdateQueue:
|
||||
thread_id: str,
|
||||
messages: list[Any],
|
||||
agent_name: str | None = None,
|
||||
user_id: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
) -> None:
|
||||
@ -90,6 +97,7 @@ class MemoryUpdateQueue:
|
||||
thread_id=thread_id,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
user_id=user_id,
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
)
|
||||
@ -103,6 +111,7 @@ class MemoryUpdateQueue:
|
||||
thread_id: str,
|
||||
messages: list[Any],
|
||||
agent_name: str | None,
|
||||
user_id: str | None,
|
||||
correction_detected: bool,
|
||||
reinforcement_detected: bool,
|
||||
) -> None:
|
||||
@ -116,6 +125,7 @@ class MemoryUpdateQueue:
|
||||
thread_id=thread_id,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
user_id=user_id,
|
||||
correction_detected=merged_correction_detected,
|
||||
reinforcement_detected=merged_reinforcement_detected,
|
||||
)
|
||||
@ -176,6 +186,7 @@ class MemoryUpdateQueue:
|
||||
agent_name=context.agent_name,
|
||||
correction_detected=context.correction_detected,
|
||||
reinforcement_detected=context.reinforcement_detected,
|
||||
user_id=context.user_id,
|
||||
)
|
||||
if success:
|
||||
logger.info("Memory updated successfully for thread %s", context.thread_id)
|
||||
|
||||
@ -44,17 +44,17 @@ class MemoryStorage(abc.ABC):
|
||||
"""Abstract base class for memory storage providers."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def load(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def load(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Load memory data for the given agent."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def reload(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def reload(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Force reload memory data for the given agent."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None) -> bool:
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> bool:
|
||||
"""Save memory data for the given agent."""
|
||||
pass
|
||||
|
||||
@ -64,9 +64,9 @@ class FileMemoryStorage(MemoryStorage):
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the file memory storage."""
|
||||
# Per-agent memory cache: keyed by agent_name (None = global)
|
||||
# Per-user/agent memory cache: keyed by (user_id, agent_name) tuple (None = global)
|
||||
# Value: (memory_data, file_mtime)
|
||||
self._memory_cache: dict[str | None, tuple[dict[str, Any], float | None]] = {}
|
||||
self._memory_cache: dict[tuple[str | None, str | None], tuple[dict[str, Any], float | None]] = {}
|
||||
# Guards all reads and writes to _memory_cache across concurrent callers.
|
||||
self._cache_lock = threading.Lock()
|
||||
|
||||
@ -81,21 +81,29 @@ class FileMemoryStorage(MemoryStorage):
|
||||
if not AGENT_NAME_PATTERN.match(agent_name):
|
||||
raise ValueError(f"Invalid agent name {agent_name!r}: names must match {AGENT_NAME_PATTERN.pattern}")
|
||||
|
||||
def _get_memory_file_path(self, agent_name: str | None = None) -> Path:
|
||||
def _get_memory_file_path(self, agent_name: str | None = None, *, user_id: str | None = None) -> Path:
|
||||
"""Get the path to the memory file."""
|
||||
if user_id is not None:
|
||||
if agent_name is not None:
|
||||
self._validate_agent_name(agent_name)
|
||||
return get_paths().user_agent_memory_file(user_id, agent_name)
|
||||
config = get_memory_config()
|
||||
if config.storage_path and Path(config.storage_path).is_absolute():
|
||||
return Path(config.storage_path)
|
||||
return get_paths().user_memory_file(user_id)
|
||||
# Legacy: no user_id
|
||||
if agent_name is not None:
|
||||
self._validate_agent_name(agent_name)
|
||||
return get_paths().agent_memory_file(agent_name)
|
||||
|
||||
config = get_memory_config()
|
||||
if config.storage_path:
|
||||
p = Path(config.storage_path)
|
||||
return p if p.is_absolute() else get_paths().base_dir / p
|
||||
return get_paths().memory_file
|
||||
|
||||
def _load_memory_from_file(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def _load_memory_from_file(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Load memory data from file."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
|
||||
if not file_path.exists():
|
||||
return create_empty_memory()
|
||||
@ -108,9 +116,14 @@ class FileMemoryStorage(MemoryStorage):
|
||||
logger.warning("Failed to load memory file: %s", e)
|
||||
return create_empty_memory()
|
||||
|
||||
def load(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
@staticmethod
|
||||
def _cache_key(agent_name: str | None = None, *, user_id: str | None = None) -> tuple[str | None, str | None]:
|
||||
return (user_id, agent_name)
|
||||
|
||||
def load(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Load memory data (cached with file modification time check)."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
cache_key = self._cache_key(agent_name, user_id=user_id)
|
||||
|
||||
try:
|
||||
current_mtime = file_path.stat().st_mtime if file_path.exists() else None
|
||||
@ -118,21 +131,22 @@ class FileMemoryStorage(MemoryStorage):
|
||||
current_mtime = None
|
||||
|
||||
with self._cache_lock:
|
||||
cached = self._memory_cache.get(agent_name)
|
||||
cached = self._memory_cache.get(cache_key)
|
||||
if cached is not None and cached[1] == current_mtime:
|
||||
return cached[0]
|
||||
|
||||
memory_data = self._load_memory_from_file(agent_name)
|
||||
memory_data = self._load_memory_from_file(agent_name, user_id=user_id)
|
||||
|
||||
with self._cache_lock:
|
||||
self._memory_cache[agent_name] = (memory_data, current_mtime)
|
||||
self._memory_cache[cache_key] = (memory_data, current_mtime)
|
||||
|
||||
return memory_data
|
||||
|
||||
def reload(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def reload(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Reload memory data from file, forcing cache invalidation."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
memory_data = self._load_memory_from_file(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
memory_data = self._load_memory_from_file(agent_name, user_id=user_id)
|
||||
cache_key = self._cache_key(agent_name, user_id=user_id)
|
||||
|
||||
try:
|
||||
mtime = file_path.stat().st_mtime if file_path.exists() else None
|
||||
@ -140,12 +154,13 @@ class FileMemoryStorage(MemoryStorage):
|
||||
mtime = None
|
||||
|
||||
with self._cache_lock:
|
||||
self._memory_cache[agent_name] = (memory_data, mtime)
|
||||
self._memory_cache[cache_key] = (memory_data, mtime)
|
||||
return memory_data
|
||||
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None) -> bool:
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> bool:
|
||||
"""Save memory data to file and update cache."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
cache_key = self._cache_key(agent_name, user_id=user_id)
|
||||
|
||||
try:
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
@ -166,7 +181,7 @@ class FileMemoryStorage(MemoryStorage):
|
||||
mtime = None
|
||||
|
||||
with self._cache_lock:
|
||||
self._memory_cache[agent_name] = (memory_data, mtime)
|
||||
self._memory_cache[cache_key] = (memory_data, mtime)
|
||||
logger.info("Memory saved to %s", file_path)
|
||||
return True
|
||||
except OSError as e:
|
||||
|
||||
@ -38,27 +38,28 @@ def _create_empty_memory() -> dict[str, Any]:
|
||||
return create_empty_memory()
|
||||
|
||||
|
||||
def _save_memory_to_file(memory_data: dict[str, Any], agent_name: str | None = None) -> bool:
|
||||
def _save_memory_to_file(memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> bool:
|
||||
"""Backward-compatible wrapper around the configured memory storage save path."""
|
||||
return get_memory_storage().save(memory_data, agent_name)
|
||||
return get_memory_storage().save(memory_data, agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def get_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
||||
def get_memory_data(agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Get the current memory data via storage provider."""
|
||||
return get_memory_storage().load(agent_name)
|
||||
return get_memory_storage().load(agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def reload_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
||||
def reload_memory_data(agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Reload memory data via storage provider."""
|
||||
return get_memory_storage().reload(agent_name)
|
||||
return get_memory_storage().reload(agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def import_memory_data(memory_data: dict[str, Any], agent_name: str | None = None) -> dict[str, Any]:
|
||||
def import_memory_data(memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Persist imported memory data via storage provider.
|
||||
|
||||
Args:
|
||||
memory_data: Full memory payload to persist.
|
||||
agent_name: If provided, imports into per-agent memory.
|
||||
user_id: If provided, scopes memory to a specific user.
|
||||
|
||||
Returns:
|
||||
The saved memory data after storage normalization.
|
||||
@ -67,15 +68,15 @@ def import_memory_data(memory_data: dict[str, Any], agent_name: str | None = Non
|
||||
OSError: If persisting the imported memory fails.
|
||||
"""
|
||||
storage = get_memory_storage()
|
||||
if not storage.save(memory_data, agent_name):
|
||||
if not storage.save(memory_data, agent_name, user_id=user_id):
|
||||
raise OSError("Failed to save imported memory data")
|
||||
return storage.load(agent_name)
|
||||
return storage.load(agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def clear_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
||||
def clear_memory_data(agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Clear all stored memory data and persist an empty structure."""
|
||||
cleared_memory = create_empty_memory()
|
||||
if not _save_memory_to_file(cleared_memory, agent_name):
|
||||
if not _save_memory_to_file(cleared_memory, agent_name, user_id=user_id):
|
||||
raise OSError("Failed to save cleared memory data")
|
||||
return cleared_memory
|
||||
|
||||
@ -92,6 +93,8 @@ def create_memory_fact(
|
||||
category: str = "context",
|
||||
confidence: float = 0.5,
|
||||
agent_name: str | None = None,
|
||||
*,
|
||||
user_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a new fact and persist the updated memory data."""
|
||||
normalized_content = content.strip()
|
||||
@ -101,7 +104,7 @@ def create_memory_fact(
|
||||
normalized_category = category.strip() or "context"
|
||||
validated_confidence = _validate_confidence(confidence)
|
||||
now = utc_now_iso_z()
|
||||
memory_data = get_memory_data(agent_name)
|
||||
memory_data = get_memory_data(agent_name, user_id=user_id)
|
||||
updated_memory = dict(memory_data)
|
||||
facts = list(memory_data.get("facts", []))
|
||||
facts.append(
|
||||
@ -116,15 +119,15 @@ def create_memory_fact(
|
||||
)
|
||||
updated_memory["facts"] = facts
|
||||
|
||||
if not _save_memory_to_file(updated_memory, agent_name):
|
||||
if not _save_memory_to_file(updated_memory, agent_name, user_id=user_id):
|
||||
raise OSError("Failed to save memory data after creating fact")
|
||||
|
||||
return updated_memory
|
||||
|
||||
|
||||
def delete_memory_fact(fact_id: str, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def delete_memory_fact(fact_id: str, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Delete a fact by its id and persist the updated memory data."""
|
||||
memory_data = get_memory_data(agent_name)
|
||||
memory_data = get_memory_data(agent_name, user_id=user_id)
|
||||
facts = memory_data.get("facts", [])
|
||||
updated_facts = [fact for fact in facts if fact.get("id") != fact_id]
|
||||
if len(updated_facts) == len(facts):
|
||||
@ -133,7 +136,7 @@ def delete_memory_fact(fact_id: str, agent_name: str | None = None) -> dict[str,
|
||||
updated_memory = dict(memory_data)
|
||||
updated_memory["facts"] = updated_facts
|
||||
|
||||
if not _save_memory_to_file(updated_memory, agent_name):
|
||||
if not _save_memory_to_file(updated_memory, agent_name, user_id=user_id):
|
||||
raise OSError(f"Failed to save memory data after deleting fact '{fact_id}'")
|
||||
|
||||
return updated_memory
|
||||
@ -145,9 +148,11 @@ def update_memory_fact(
|
||||
category: str | None = None,
|
||||
confidence: float | None = None,
|
||||
agent_name: str | None = None,
|
||||
*,
|
||||
user_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Update an existing fact and persist the updated memory data."""
|
||||
memory_data = get_memory_data(agent_name)
|
||||
memory_data = get_memory_data(agent_name, user_id=user_id)
|
||||
updated_memory = dict(memory_data)
|
||||
updated_facts: list[dict[str, Any]] = []
|
||||
found = False
|
||||
@ -174,7 +179,7 @@ def update_memory_fact(
|
||||
|
||||
updated_memory["facts"] = updated_facts
|
||||
|
||||
if not _save_memory_to_file(updated_memory, agent_name):
|
||||
if not _save_memory_to_file(updated_memory, agent_name, user_id=user_id):
|
||||
raise OSError(f"Failed to save memory data after updating fact '{fact_id}'")
|
||||
|
||||
return updated_memory
|
||||
@ -431,6 +436,7 @@ class MemoryUpdater:
|
||||
agent_name: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
user_id: str | None = None,
|
||||
) -> bool:
|
||||
"""Synchronously update memory via the async updater path.
|
||||
|
||||
@ -440,6 +446,7 @@ class MemoryUpdater:
|
||||
agent_name: If provided, updates per-agent memory. If None, updates global memory.
|
||||
correction_detected: Whether recent turns include an explicit correction signal.
|
||||
reinforcement_detected: Whether recent turns include a positive reinforcement signal.
|
||||
user_id: If provided, scopes memory to a specific user.
|
||||
|
||||
Returns:
|
||||
True if update was successful, False otherwise.
|
||||
@ -547,6 +554,7 @@ def update_memory_from_conversation(
|
||||
agent_name: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
user_id: str | None = None,
|
||||
) -> bool:
|
||||
"""Convenience function to update memory from a conversation.
|
||||
|
||||
@ -556,9 +564,10 @@ def update_memory_from_conversation(
|
||||
agent_name: If provided, updates per-agent memory. If None, updates global memory.
|
||||
correction_detected: Whether recent turns include an explicit correction signal.
|
||||
reinforcement_detected: Whether recent turns include a positive reinforcement signal.
|
||||
user_id: If provided, scopes memory to a specific user.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
updater = MemoryUpdater()
|
||||
return updater.update_memory(messages, thread_id, agent_name, correction_detected, reinforcement_detected)
|
||||
return updater.update_memory(messages, thread_id, agent_name, correction_detected, reinforcement_detected, user_id=user_id)
|
||||
|
||||
@ -362,7 +362,7 @@ class LoopDetectionMiddleware(AgentMiddleware[AgentState]):
|
||||
# the conversation; injecting one mid-conversation crashes
|
||||
# langchain_anthropic's _format_messages(). HumanMessage works
|
||||
# with all providers. See #1299.
|
||||
return {"messages": [HumanMessage(content=warning)]}
|
||||
return {"messages": [HumanMessage(content=warning, name="loop_warning")]}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@ -11,6 +11,7 @@ from langgraph.runtime import Runtime
|
||||
from deerflow.agents.memory.message_processing import detect_correction, detect_reinforcement, filter_messages_for_memory
|
||||
from deerflow.agents.memory.queue import get_memory_queue
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -86,11 +87,16 @@ class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
|
||||
# Queue the filtered conversation for memory update
|
||||
correction_detected = detect_correction(filtered_messages)
|
||||
reinforcement_detected = not correction_detected and detect_reinforcement(filtered_messages)
|
||||
# Capture user_id at enqueue time while the request context is still alive.
|
||||
# threading.Timer fires on a different thread where ContextVar values are not
|
||||
# propagated, so we must store user_id explicitly in ConversationContext.
|
||||
user_id = get_effective_user_id()
|
||||
queue = get_memory_queue()
|
||||
queue.add(
|
||||
thread_id=thread_id,
|
||||
messages=filtered_messages,
|
||||
agent_name=self._agent_name,
|
||||
user_id=user_id,
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
)
|
||||
|
||||
@ -5,11 +5,11 @@ from __future__ import annotations
|
||||
import logging
|
||||
from collections.abc import Collection
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
from typing import Any, Protocol, override, runtime_checkable
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import SummarizationMiddleware
|
||||
from langchain_core.messages import AIMessage, AnyMessage, RemoveMessage, ToolMessage
|
||||
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage, RemoveMessage, ToolMessage
|
||||
from langgraph.config import get_config
|
||||
from langgraph.graph.message import REMOVE_ALL_MESSAGES
|
||||
from langgraph.runtime import Runtime
|
||||
@ -173,6 +173,13 @@ class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
|
||||
]
|
||||
}
|
||||
|
||||
@override
|
||||
def _build_new_messages(self, summary: str) -> list[HumanMessage]:
|
||||
"""Override the base implementation to let the human message with the special name 'summary'.
|
||||
And this message will be ignored to display in the frontend, but still can be used as context for the model.
|
||||
"""
|
||||
return [HumanMessage(content=f"Here is a summary of the conversation to date:\n\n{summary}", name="summary")]
|
||||
|
||||
def _partition_with_skill_rescue(
|
||||
self,
|
||||
messages: list[AnyMessage],
|
||||
|
||||
@ -1,13 +1,16 @@
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from typing import NotRequired, override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langgraph.config import get_config
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.agents.thread_state import ThreadDataState
|
||||
from deerflow.config.paths import Paths, get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -46,32 +49,34 @@ class ThreadDataMiddleware(AgentMiddleware[ThreadDataMiddlewareState]):
|
||||
self._paths = Paths(base_dir) if base_dir else get_paths()
|
||||
self._lazy_init = lazy_init
|
||||
|
||||
def _get_thread_paths(self, thread_id: str) -> dict[str, str]:
|
||||
def _get_thread_paths(self, thread_id: str, user_id: str | None = None) -> dict[str, str]:
|
||||
"""Get the paths for a thread's data directories.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID.
|
||||
user_id: Optional user ID for per-user path isolation.
|
||||
|
||||
Returns:
|
||||
Dictionary with workspace_path, uploads_path, and outputs_path.
|
||||
"""
|
||||
return {
|
||||
"workspace_path": str(self._paths.sandbox_work_dir(thread_id)),
|
||||
"uploads_path": str(self._paths.sandbox_uploads_dir(thread_id)),
|
||||
"outputs_path": str(self._paths.sandbox_outputs_dir(thread_id)),
|
||||
"workspace_path": str(self._paths.sandbox_work_dir(thread_id, user_id=user_id)),
|
||||
"uploads_path": str(self._paths.sandbox_uploads_dir(thread_id, user_id=user_id)),
|
||||
"outputs_path": str(self._paths.sandbox_outputs_dir(thread_id, user_id=user_id)),
|
||||
}
|
||||
|
||||
def _create_thread_directories(self, thread_id: str) -> dict[str, str]:
|
||||
def _create_thread_directories(self, thread_id: str, user_id: str | None = None) -> dict[str, str]:
|
||||
"""Create the thread data directories.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID.
|
||||
user_id: Optional user ID for per-user path isolation.
|
||||
|
||||
Returns:
|
||||
Dictionary with the created directory paths.
|
||||
"""
|
||||
self._paths.ensure_thread_dirs(thread_id)
|
||||
return self._get_thread_paths(thread_id)
|
||||
self._paths.ensure_thread_dirs(thread_id, user_id=user_id)
|
||||
return self._get_thread_paths(thread_id, user_id=user_id)
|
||||
|
||||
@override
|
||||
def before_agent(self, state: ThreadDataMiddlewareState, runtime: Runtime) -> dict | None:
|
||||
@ -84,16 +89,30 @@ class ThreadDataMiddleware(AgentMiddleware[ThreadDataMiddlewareState]):
|
||||
if thread_id is None:
|
||||
raise ValueError("Thread ID is required in runtime context or config.configurable")
|
||||
|
||||
user_id = get_effective_user_id()
|
||||
|
||||
if self._lazy_init:
|
||||
# Lazy initialization: only compute paths, don't create directories
|
||||
paths = self._get_thread_paths(thread_id)
|
||||
paths = self._get_thread_paths(thread_id, user_id=user_id)
|
||||
else:
|
||||
# Eager initialization: create directories immediately
|
||||
paths = self._create_thread_directories(thread_id)
|
||||
paths = self._create_thread_directories(thread_id, user_id=user_id)
|
||||
logger.debug("Created thread data directories for thread %s", thread_id)
|
||||
|
||||
messages = list(state.get("messages", []))
|
||||
last_message = messages[-1] if messages else None
|
||||
|
||||
if last_message and isinstance(last_message, HumanMessage):
|
||||
messages[-1] = HumanMessage(
|
||||
content=last_message.content,
|
||||
id=last_message.id,
|
||||
name=last_message.name or "user-input",
|
||||
additional_kwargs={**last_message.additional_kwargs, "run_id": runtime.context.get("run_id"), "timestamp": datetime.now(UTC).isoformat()},
|
||||
)
|
||||
|
||||
return {
|
||||
"thread_data": {
|
||||
**paths,
|
||||
}
|
||||
},
|
||||
"messages": messages,
|
||||
}
|
||||
|
||||
@ -2,10 +2,11 @@
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import NotRequired, override
|
||||
from typing import Any, NotRequired, override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langgraph.config import get_config
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.config.title_config import get_title_config
|
||||
@ -106,6 +107,21 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
|
||||
return user_msg[:fallback_chars].rstrip() + "..."
|
||||
return user_msg if user_msg else "New Conversation"
|
||||
|
||||
def _get_runnable_config(self) -> dict[str, Any]:
|
||||
"""Inherit the parent RunnableConfig and add middleware tag.
|
||||
|
||||
This ensures RunJournal identifies LLM calls from this middleware
|
||||
as ``middleware:title`` instead of ``lead_agent``.
|
||||
"""
|
||||
try:
|
||||
parent = get_config()
|
||||
except Exception:
|
||||
parent = {}
|
||||
config = {**parent}
|
||||
config["run_name"] = "title_agent"
|
||||
config["tags"] = [*(config.get("tags") or []), "middleware:title"]
|
||||
return config
|
||||
|
||||
def _generate_title_result(self, state: TitleMiddlewareState) -> dict | None:
|
||||
"""Generate a local fallback title without blocking on an LLM call."""
|
||||
if not self._should_generate_title(state):
|
||||
@ -127,7 +143,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
|
||||
model = create_chat_model(name=config.model_name, thinking_enabled=False)
|
||||
else:
|
||||
model = create_chat_model(thinking_enabled=False)
|
||||
response = await model.ainvoke(prompt, config={"run_name": "title_agent"})
|
||||
response = await model.ainvoke(prompt, config=self._get_runnable_config())
|
||||
title = self._parse_title(response.content)
|
||||
if title:
|
||||
return {"title": title}
|
||||
|
||||
@ -10,6 +10,7 @@ from langchain_core.messages import HumanMessage
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.config.paths import Paths, get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
from deerflow.utils.file_conversion import extract_outline
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -221,7 +222,7 @@ class UploadsMiddleware(AgentMiddleware[UploadsMiddlewareState]):
|
||||
thread_id = get_config().get("configurable", {}).get("thread_id")
|
||||
except RuntimeError:
|
||||
pass # get_config() raises outside a runnable context (e.g. unit tests)
|
||||
uploads_dir = self._paths.sandbox_uploads_dir(thread_id) if thread_id else None
|
||||
uploads_dir = self._paths.sandbox_uploads_dir(thread_id, user_id=get_effective_user_id()) if thread_id else None
|
||||
|
||||
# Get newly uploaded files from the current message's additional_kwargs.files
|
||||
new_files = self._files_from_kwargs(last_message, uploads_dir) or []
|
||||
@ -282,6 +283,7 @@ class UploadsMiddleware(AgentMiddleware[UploadsMiddlewareState]):
|
||||
updated_message = HumanMessage(
|
||||
content=updated_content,
|
||||
id=last_message.id,
|
||||
name=last_message.name,
|
||||
additional_kwargs=last_message.additional_kwargs,
|
||||
)
|
||||
|
||||
|
||||
@ -40,6 +40,7 @@ from deerflow.config.app_config import get_app_config, reload_app_config
|
||||
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig, get_extensions_config, reload_extensions_config
|
||||
from deerflow.config.paths import get_paths
|
||||
from deerflow.models import create_chat_model
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
from deerflow.skills.installer import install_skill_from_archive
|
||||
from deerflow.uploads.manager import (
|
||||
claim_unique_filename,
|
||||
@ -240,7 +241,7 @@ class DeerFlowClient:
|
||||
}
|
||||
checkpointer = self._checkpointer
|
||||
if checkpointer is None:
|
||||
from deerflow.agents.checkpointer import get_checkpointer
|
||||
from deerflow.runtime.checkpointer import get_checkpointer
|
||||
|
||||
checkpointer = get_checkpointer()
|
||||
if checkpointer is not None:
|
||||
@ -374,7 +375,7 @@ class DeerFlowClient:
|
||||
"""
|
||||
checkpointer = self._checkpointer
|
||||
if checkpointer is None:
|
||||
from deerflow.agents.checkpointer.provider import get_checkpointer
|
||||
from deerflow.runtime.checkpointer.provider import get_checkpointer
|
||||
|
||||
checkpointer = get_checkpointer()
|
||||
|
||||
@ -429,7 +430,7 @@ class DeerFlowClient:
|
||||
"""
|
||||
checkpointer = self._checkpointer
|
||||
if checkpointer is None:
|
||||
from deerflow.agents.checkpointer.provider import get_checkpointer
|
||||
from deerflow.runtime.checkpointer.provider import get_checkpointer
|
||||
|
||||
checkpointer = get_checkpointer()
|
||||
|
||||
@ -774,19 +775,19 @@ class DeerFlowClient:
|
||||
"""
|
||||
from deerflow.agents.memory.updater import get_memory_data
|
||||
|
||||
return get_memory_data()
|
||||
return get_memory_data(user_id=get_effective_user_id())
|
||||
|
||||
def export_memory(self) -> dict:
|
||||
"""Export current memory data for backup or transfer."""
|
||||
from deerflow.agents.memory.updater import get_memory_data
|
||||
|
||||
return get_memory_data()
|
||||
return get_memory_data(user_id=get_effective_user_id())
|
||||
|
||||
def import_memory(self, memory_data: dict) -> dict:
|
||||
"""Import and persist full memory data."""
|
||||
from deerflow.agents.memory.updater import import_memory_data
|
||||
|
||||
return import_memory_data(memory_data)
|
||||
return import_memory_data(memory_data, user_id=get_effective_user_id())
|
||||
|
||||
def get_model(self, name: str) -> dict | None:
|
||||
"""Get a specific model's configuration by name.
|
||||
@ -961,13 +962,13 @@ class DeerFlowClient:
|
||||
"""
|
||||
from deerflow.agents.memory.updater import reload_memory_data
|
||||
|
||||
return reload_memory_data()
|
||||
return reload_memory_data(user_id=get_effective_user_id())
|
||||
|
||||
def clear_memory(self) -> dict:
|
||||
"""Clear all persisted memory data."""
|
||||
from deerflow.agents.memory.updater import clear_memory_data
|
||||
|
||||
return clear_memory_data()
|
||||
return clear_memory_data(user_id=get_effective_user_id())
|
||||
|
||||
def create_memory_fact(self, content: str, category: str = "context", confidence: float = 0.5) -> dict:
|
||||
"""Create a single fact manually."""
|
||||
@ -1184,7 +1185,7 @@ class DeerFlowClient:
|
||||
ValueError: If the path is invalid.
|
||||
"""
|
||||
try:
|
||||
actual = get_paths().resolve_virtual_path(thread_id, path)
|
||||
actual = get_paths().resolve_virtual_path(thread_id, path, user_id=get_effective_user_id())
|
||||
except ValueError as exc:
|
||||
if "traversal" in str(exc):
|
||||
from deerflow.uploads.manager import PathTraversalError
|
||||
|
||||
@ -27,6 +27,7 @@ except ImportError: # pragma: no cover - Windows fallback
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
from deerflow.sandbox.sandbox import Sandbox
|
||||
from deerflow.sandbox.sandbox_provider import SandboxProvider
|
||||
|
||||
@ -270,15 +271,16 @@ class AioSandboxProvider(SandboxProvider):
|
||||
mounted Docker socket (DooD), the host Docker daemon can resolve the paths.
|
||||
"""
|
||||
paths = get_paths()
|
||||
paths.ensure_thread_dirs(thread_id)
|
||||
user_id = get_effective_user_id()
|
||||
paths.ensure_thread_dirs(thread_id, user_id=user_id)
|
||||
|
||||
return [
|
||||
(paths.host_sandbox_work_dir(thread_id), f"{VIRTUAL_PATH_PREFIX}/workspace", False),
|
||||
(paths.host_sandbox_uploads_dir(thread_id), f"{VIRTUAL_PATH_PREFIX}/uploads", False),
|
||||
(paths.host_sandbox_outputs_dir(thread_id), f"{VIRTUAL_PATH_PREFIX}/outputs", False),
|
||||
(paths.host_sandbox_work_dir(thread_id, user_id=user_id), f"{VIRTUAL_PATH_PREFIX}/workspace", False),
|
||||
(paths.host_sandbox_uploads_dir(thread_id, user_id=user_id), f"{VIRTUAL_PATH_PREFIX}/uploads", False),
|
||||
(paths.host_sandbox_outputs_dir(thread_id, user_id=user_id), f"{VIRTUAL_PATH_PREFIX}/outputs", False),
|
||||
# ACP workspace: read-only inside the sandbox (lead agent reads results;
|
||||
# the ACP subprocess writes from the host side, not from within the container).
|
||||
(paths.host_acp_workspace_dir(thread_id), "/mnt/acp-workspace", True),
|
||||
(paths.host_acp_workspace_dir(thread_id, user_id=user_id), "/mnt/acp-workspace", True),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
@ -490,8 +492,9 @@ class AioSandboxProvider(SandboxProvider):
|
||||
across multiple processes, preventing container-name conflicts.
|
||||
"""
|
||||
paths = get_paths()
|
||||
paths.ensure_thread_dirs(thread_id)
|
||||
lock_path = paths.thread_dir(thread_id) / f"{sandbox_id}.lock"
|
||||
user_id = get_effective_user_id()
|
||||
paths.ensure_thread_dirs(thread_id, user_id=user_id)
|
||||
lock_path = paths.thread_dir(thread_id, user_id=user_id) / f"{sandbox_id}.lock"
|
||||
|
||||
with open(lock_path, "a", encoding="utf-8") as lock_file:
|
||||
locked = False
|
||||
|
||||
@ -11,10 +11,12 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
from deerflow.config.acp_config import load_acp_config_from_dict
|
||||
from deerflow.config.agents_api_config import AgentsApiConfig, load_agents_api_config_from_dict
|
||||
from deerflow.config.checkpointer_config import CheckpointerConfig, load_checkpointer_config_from_dict
|
||||
from deerflow.config.database_config import DatabaseConfig
|
||||
from deerflow.config.extensions_config import ExtensionsConfig
|
||||
from deerflow.config.guardrails_config import GuardrailsConfig, load_guardrails_config_from_dict
|
||||
from deerflow.config.memory_config import MemoryConfig, load_memory_config_from_dict
|
||||
from deerflow.config.model_config import ModelConfig
|
||||
from deerflow.config.run_events_config import RunEventsConfig
|
||||
from deerflow.config.sandbox_config import SandboxConfig
|
||||
from deerflow.config.skill_evolution_config import SkillEvolutionConfig
|
||||
from deerflow.config.skills_config import SkillsConfig
|
||||
@ -31,6 +33,12 @@ load_dotenv()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CONFIG_FILE_DATABASE_DEFAULTS = {
|
||||
"backend": "sqlite",
|
||||
"sqlite_dir": ".deer-flow/data",
|
||||
}
|
||||
|
||||
|
||||
class CircuitBreakerConfig(BaseModel):
|
||||
"""Configuration for the LLM Circuit Breaker."""
|
||||
|
||||
@ -65,7 +73,9 @@ class AppConfig(BaseModel):
|
||||
subagents: SubagentsAppConfig = Field(default_factory=SubagentsAppConfig, description="Subagent runtime configuration")
|
||||
guardrails: GuardrailsConfig = Field(default_factory=GuardrailsConfig, description="Guardrail middleware configuration")
|
||||
circuit_breaker: CircuitBreakerConfig = Field(default_factory=CircuitBreakerConfig, description="LLM circuit breaker configuration")
|
||||
model_config = ConfigDict(extra="allow", frozen=False)
|
||||
model_config = ConfigDict(extra="allow")
|
||||
database: DatabaseConfig = Field(default_factory=DatabaseConfig, description="Unified database backend configuration")
|
||||
run_events: RunEventsConfig = Field(default_factory=RunEventsConfig, description="Run event storage configuration")
|
||||
checkpointer: CheckpointerConfig | None = Field(default=None, description="Checkpointer configuration")
|
||||
stream_bridge: StreamBridgeConfig | None = Field(default=None, description="Stream bridge configuration")
|
||||
|
||||
@ -114,6 +124,7 @@ class AppConfig(BaseModel):
|
||||
cls._check_config_version(config_data, resolved_path)
|
||||
|
||||
config_data = cls.resolve_env_variables(config_data)
|
||||
cls._apply_database_defaults(config_data)
|
||||
|
||||
# Load title config if present
|
||||
if "title" in config_data:
|
||||
@ -165,6 +176,18 @@ class AppConfig(BaseModel):
|
||||
result = cls.model_validate(config_data)
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def _apply_database_defaults(cls, config_data: dict[str, Any]) -> None:
|
||||
"""Apply config.yaml defaults for persistence when the section is absent."""
|
||||
database_config = config_data.get("database")
|
||||
if database_config is None:
|
||||
database_config = {}
|
||||
config_data["database"] = database_config
|
||||
if not isinstance(database_config, dict):
|
||||
return
|
||||
for key, value in CONFIG_FILE_DATABASE_DEFAULTS.items():
|
||||
database_config.setdefault(key, value)
|
||||
|
||||
@classmethod
|
||||
def _check_config_version(cls, config_data: dict, config_path: Path) -> None:
|
||||
"""Check if the user's config.yaml is outdated compared to config.example.yaml.
|
||||
@ -269,6 +292,9 @@ class AppConfig(BaseModel):
|
||||
return next((group for group in self.tool_groups if group.name == name), None)
|
||||
|
||||
|
||||
# Compatibility singleton layer for code paths that have not yet been
|
||||
# migrated to explicit ``AppConfig`` threading. New composition roots should
|
||||
# prefer constructing ``AppConfig`` once and passing it down directly.
|
||||
_app_config: AppConfig | None = None
|
||||
_app_config_path: Path | None = None
|
||||
_app_config_mtime: float | None = None
|
||||
|
||||
102
backend/packages/harness/deerflow/config/database_config.py
Normal file
102
backend/packages/harness/deerflow/config/database_config.py
Normal file
@ -0,0 +1,102 @@
|
||||
"""Unified database backend configuration.
|
||||
|
||||
Controls BOTH the LangGraph checkpointer and the DeerFlow application
|
||||
persistence layer (runs, threads metadata, users, etc.). The user
|
||||
configures one backend; the system handles physical separation details.
|
||||
|
||||
SQLite mode: checkpointer and app share a single .db file
|
||||
({sqlite_dir}/deerflow.db) with WAL journal mode enabled on every
|
||||
connection. WAL allows concurrent readers and a single writer without
|
||||
blocking, making a unified file safe for both workloads. Writers
|
||||
that contend for the lock wait via the default 5-second sqlite3
|
||||
busy timeout rather than failing immediately.
|
||||
|
||||
Postgres mode: both use the same database URL but maintain independent
|
||||
connection pools with different lifecycles.
|
||||
|
||||
Memory mode: checkpointer uses MemorySaver, app uses in-memory stores.
|
||||
No database is initialized.
|
||||
|
||||
Sensitive values (postgres_url) should use $VAR syntax in config.yaml
|
||||
to reference environment variables from .env:
|
||||
|
||||
database:
|
||||
backend: postgres
|
||||
postgres_url: $DATABASE_URL
|
||||
|
||||
The $VAR resolution is handled by AppConfig.resolve_env_variables()
|
||||
before this config is instantiated -- DatabaseConfig itself does not
|
||||
need to do any environment variable processing.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class DatabaseConfig(BaseModel):
|
||||
backend: Literal["memory", "sqlite", "postgres"] = Field(
|
||||
default="memory",
|
||||
description=("Storage backend for both checkpointer and application data. 'memory' for development (no persistence across restarts), 'sqlite' for single-node deployment, 'postgres' for production multi-node deployment."),
|
||||
)
|
||||
sqlite_dir: str = Field(
|
||||
default=".deer-flow/data",
|
||||
description=("Directory for the SQLite database file. Both checkpointer and application data share {sqlite_dir}/deerflow.db."),
|
||||
)
|
||||
postgres_url: str = Field(
|
||||
default="",
|
||||
description=(
|
||||
"PostgreSQL connection URL, shared by checkpointer and app. "
|
||||
"Use $DATABASE_URL in config.yaml to reference .env. "
|
||||
"Example: postgresql://user:pass@host:5432/deerflow "
|
||||
"(the +asyncpg driver suffix is added automatically where needed)."
|
||||
),
|
||||
)
|
||||
echo_sql: bool = Field(
|
||||
default=False,
|
||||
description="Echo all SQL statements to log (debug only).",
|
||||
)
|
||||
pool_size: int = Field(
|
||||
default=5,
|
||||
description="Connection pool size for the app ORM engine (postgres only).",
|
||||
)
|
||||
|
||||
# -- Derived helpers (not user-configured) --
|
||||
|
||||
@property
|
||||
def _resolved_sqlite_dir(self) -> str:
|
||||
"""Resolve sqlite_dir to an absolute path (relative to CWD)."""
|
||||
from pathlib import Path
|
||||
|
||||
return str(Path(self.sqlite_dir).resolve())
|
||||
|
||||
@property
|
||||
def sqlite_path(self) -> str:
|
||||
"""Unified SQLite file path shared by checkpointer and app."""
|
||||
return os.path.join(self._resolved_sqlite_dir, "deerflow.db")
|
||||
|
||||
# Backward-compatible aliases
|
||||
@property
|
||||
def checkpointer_sqlite_path(self) -> str:
|
||||
"""SQLite file path for the LangGraph checkpointer (alias for sqlite_path)."""
|
||||
return self.sqlite_path
|
||||
|
||||
@property
|
||||
def app_sqlite_path(self) -> str:
|
||||
"""SQLite file path for application ORM data (alias for sqlite_path)."""
|
||||
return self.sqlite_path
|
||||
|
||||
@property
|
||||
def app_sqlalchemy_url(self) -> str:
|
||||
"""SQLAlchemy async URL for the application ORM engine."""
|
||||
if self.backend == "sqlite":
|
||||
return f"sqlite+aiosqlite:///{self.sqlite_path}"
|
||||
if self.backend == "postgres":
|
||||
url = self.postgres_url
|
||||
if url.startswith("postgresql://"):
|
||||
url = url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
||||
return url
|
||||
raise ValueError(f"No SQLAlchemy URL for backend={self.backend!r}")
|
||||
@ -14,8 +14,9 @@ class MemoryConfig(BaseModel):
|
||||
default="",
|
||||
description=(
|
||||
"Path to store memory data. "
|
||||
"If empty, defaults to `{base_dir}/memory.json` (see Paths.memory_file). "
|
||||
"Absolute paths are used as-is. "
|
||||
"If empty, defaults to per-user memory at `{base_dir}/users/{user_id}/memory.json`. "
|
||||
"Absolute paths are used as-is and opt out of per-user isolation "
|
||||
"(all users share the same file). "
|
||||
"Relative paths are resolved against `Paths.base_dir` "
|
||||
"(not the backend working directory). "
|
||||
"Note: if you previously set this to `.deer-flow/memory.json`, "
|
||||
|
||||
@ -7,6 +7,7 @@ from pathlib import Path, PureWindowsPath
|
||||
VIRTUAL_PATH_PREFIX = "/mnt/user-data"
|
||||
|
||||
_SAFE_THREAD_ID_RE = re.compile(r"^[A-Za-z0-9_\-]+$")
|
||||
_SAFE_USER_ID_RE = re.compile(r"^[A-Za-z0-9_\-]+$")
|
||||
|
||||
|
||||
def _default_local_base_dir() -> Path:
|
||||
@ -22,6 +23,13 @@ def _validate_thread_id(thread_id: str) -> str:
|
||||
return thread_id
|
||||
|
||||
|
||||
def _validate_user_id(user_id: str) -> str:
|
||||
"""Validate a user ID before using it in filesystem paths."""
|
||||
if not _SAFE_USER_ID_RE.match(user_id):
|
||||
raise ValueError(f"Invalid user_id {user_id!r}: only alphanumeric characters, hyphens, and underscores are allowed.")
|
||||
return user_id
|
||||
|
||||
|
||||
def _join_host_path(base: str, *parts: str) -> str:
|
||||
"""Join host filesystem path segments while preserving native style.
|
||||
|
||||
@ -134,44 +142,63 @@ class Paths:
|
||||
"""Per-agent memory file: `{base_dir}/agents/{name}/memory.json`."""
|
||||
return self.agent_dir(name) / "memory.json"
|
||||
|
||||
def thread_dir(self, thread_id: str) -> Path:
|
||||
def user_dir(self, user_id: str) -> Path:
|
||||
"""Directory for a specific user: `{base_dir}/users/{user_id}/`."""
|
||||
return self.base_dir / "users" / _validate_user_id(user_id)
|
||||
|
||||
def user_memory_file(self, user_id: str) -> Path:
|
||||
"""Per-user memory file: `{base_dir}/users/{user_id}/memory.json`."""
|
||||
return self.user_dir(user_id) / "memory.json"
|
||||
|
||||
def user_agent_memory_file(self, user_id: str, agent_name: str) -> Path:
|
||||
"""Per-user per-agent memory: `{base_dir}/users/{user_id}/agents/{name}/memory.json`."""
|
||||
return self.user_dir(user_id) / "agents" / agent_name.lower() / "memory.json"
|
||||
|
||||
def thread_dir(self, thread_id: str, *, user_id: str | None = None) -> Path:
|
||||
"""
|
||||
Host path for a thread's data: `{base_dir}/threads/{thread_id}/`
|
||||
Host path for a thread's data.
|
||||
|
||||
When *user_id* is provided:
|
||||
`{base_dir}/users/{user_id}/threads/{thread_id}/`
|
||||
Otherwise (legacy layout):
|
||||
`{base_dir}/threads/{thread_id}/`
|
||||
|
||||
This directory contains a `user-data/` subdirectory that is mounted
|
||||
as `/mnt/user-data/` inside the sandbox.
|
||||
|
||||
Raises:
|
||||
ValueError: If `thread_id` contains unsafe characters (path separators
|
||||
or `..`) that could cause directory traversal.
|
||||
ValueError: If `thread_id` or `user_id` contains unsafe characters (path
|
||||
separators or `..`) that could cause directory traversal.
|
||||
"""
|
||||
if user_id is not None:
|
||||
return self.user_dir(user_id) / "threads" / _validate_thread_id(thread_id)
|
||||
return self.base_dir / "threads" / _validate_thread_id(thread_id)
|
||||
|
||||
def sandbox_work_dir(self, thread_id: str) -> Path:
|
||||
def sandbox_work_dir(self, thread_id: str, *, user_id: str | None = None) -> Path:
|
||||
"""
|
||||
Host path for the agent's workspace directory.
|
||||
Host: `{base_dir}/threads/{thread_id}/user-data/workspace/`
|
||||
Sandbox: `/mnt/user-data/workspace/`
|
||||
"""
|
||||
return self.thread_dir(thread_id) / "user-data" / "workspace"
|
||||
return self.thread_dir(thread_id, user_id=user_id) / "user-data" / "workspace"
|
||||
|
||||
def sandbox_uploads_dir(self, thread_id: str) -> Path:
|
||||
def sandbox_uploads_dir(self, thread_id: str, *, user_id: str | None = None) -> Path:
|
||||
"""
|
||||
Host path for user-uploaded files.
|
||||
Host: `{base_dir}/threads/{thread_id}/user-data/uploads/`
|
||||
Sandbox: `/mnt/user-data/uploads/`
|
||||
"""
|
||||
return self.thread_dir(thread_id) / "user-data" / "uploads"
|
||||
return self.thread_dir(thread_id, user_id=user_id) / "user-data" / "uploads"
|
||||
|
||||
def sandbox_outputs_dir(self, thread_id: str) -> Path:
|
||||
def sandbox_outputs_dir(self, thread_id: str, *, user_id: str | None = None) -> Path:
|
||||
"""
|
||||
Host path for agent-generated artifacts.
|
||||
Host: `{base_dir}/threads/{thread_id}/user-data/outputs/`
|
||||
Sandbox: `/mnt/user-data/outputs/`
|
||||
"""
|
||||
return self.thread_dir(thread_id) / "user-data" / "outputs"
|
||||
return self.thread_dir(thread_id, user_id=user_id) / "user-data" / "outputs"
|
||||
|
||||
def acp_workspace_dir(self, thread_id: str) -> Path:
|
||||
def acp_workspace_dir(self, thread_id: str, *, user_id: str | None = None) -> Path:
|
||||
"""
|
||||
Host path for the ACP workspace of a specific thread.
|
||||
Host: `{base_dir}/threads/{thread_id}/acp-workspace/`
|
||||
@ -180,41 +207,43 @@ class Paths:
|
||||
Each thread gets its own isolated ACP workspace so that concurrent
|
||||
sessions cannot read each other's ACP agent outputs.
|
||||
"""
|
||||
return self.thread_dir(thread_id) / "acp-workspace"
|
||||
return self.thread_dir(thread_id, user_id=user_id) / "acp-workspace"
|
||||
|
||||
def sandbox_user_data_dir(self, thread_id: str) -> Path:
|
||||
def sandbox_user_data_dir(self, thread_id: str, *, user_id: str | None = None) -> Path:
|
||||
"""
|
||||
Host path for the user-data root.
|
||||
Host: `{base_dir}/threads/{thread_id}/user-data/`
|
||||
Sandbox: `/mnt/user-data/`
|
||||
"""
|
||||
return self.thread_dir(thread_id) / "user-data"
|
||||
return self.thread_dir(thread_id, user_id=user_id) / "user-data"
|
||||
|
||||
def host_thread_dir(self, thread_id: str) -> str:
|
||||
def host_thread_dir(self, thread_id: str, *, user_id: str | None = None) -> str:
|
||||
"""Host path for a thread directory, preserving Windows path syntax."""
|
||||
if user_id is not None:
|
||||
return _join_host_path(self._host_base_dir_str(), "users", _validate_user_id(user_id), "threads", _validate_thread_id(thread_id))
|
||||
return _join_host_path(self._host_base_dir_str(), "threads", _validate_thread_id(thread_id))
|
||||
|
||||
def host_sandbox_user_data_dir(self, thread_id: str) -> str:
|
||||
def host_sandbox_user_data_dir(self, thread_id: str, *, user_id: str | None = None) -> str:
|
||||
"""Host path for a thread's user-data root."""
|
||||
return _join_host_path(self.host_thread_dir(thread_id), "user-data")
|
||||
return _join_host_path(self.host_thread_dir(thread_id, user_id=user_id), "user-data")
|
||||
|
||||
def host_sandbox_work_dir(self, thread_id: str) -> str:
|
||||
def host_sandbox_work_dir(self, thread_id: str, *, user_id: str | None = None) -> str:
|
||||
"""Host path for the workspace mount source."""
|
||||
return _join_host_path(self.host_sandbox_user_data_dir(thread_id), "workspace")
|
||||
return _join_host_path(self.host_sandbox_user_data_dir(thread_id, user_id=user_id), "workspace")
|
||||
|
||||
def host_sandbox_uploads_dir(self, thread_id: str) -> str:
|
||||
def host_sandbox_uploads_dir(self, thread_id: str, *, user_id: str | None = None) -> str:
|
||||
"""Host path for the uploads mount source."""
|
||||
return _join_host_path(self.host_sandbox_user_data_dir(thread_id), "uploads")
|
||||
return _join_host_path(self.host_sandbox_user_data_dir(thread_id, user_id=user_id), "uploads")
|
||||
|
||||
def host_sandbox_outputs_dir(self, thread_id: str) -> str:
|
||||
def host_sandbox_outputs_dir(self, thread_id: str, *, user_id: str | None = None) -> str:
|
||||
"""Host path for the outputs mount source."""
|
||||
return _join_host_path(self.host_sandbox_user_data_dir(thread_id), "outputs")
|
||||
return _join_host_path(self.host_sandbox_user_data_dir(thread_id, user_id=user_id), "outputs")
|
||||
|
||||
def host_acp_workspace_dir(self, thread_id: str) -> str:
|
||||
def host_acp_workspace_dir(self, thread_id: str, *, user_id: str | None = None) -> str:
|
||||
"""Host path for the ACP workspace mount source."""
|
||||
return _join_host_path(self.host_thread_dir(thread_id), "acp-workspace")
|
||||
return _join_host_path(self.host_thread_dir(thread_id, user_id=user_id), "acp-workspace")
|
||||
|
||||
def ensure_thread_dirs(self, thread_id: str) -> None:
|
||||
def ensure_thread_dirs(self, thread_id: str, *, user_id: str | None = None) -> None:
|
||||
"""Create all standard sandbox directories for a thread.
|
||||
|
||||
Directories are created with mode 0o777 so that sandbox containers
|
||||
@ -228,24 +257,24 @@ class Paths:
|
||||
ACP agent invocation.
|
||||
"""
|
||||
for d in [
|
||||
self.sandbox_work_dir(thread_id),
|
||||
self.sandbox_uploads_dir(thread_id),
|
||||
self.sandbox_outputs_dir(thread_id),
|
||||
self.acp_workspace_dir(thread_id),
|
||||
self.sandbox_work_dir(thread_id, user_id=user_id),
|
||||
self.sandbox_uploads_dir(thread_id, user_id=user_id),
|
||||
self.sandbox_outputs_dir(thread_id, user_id=user_id),
|
||||
self.acp_workspace_dir(thread_id, user_id=user_id),
|
||||
]:
|
||||
d.mkdir(parents=True, exist_ok=True)
|
||||
d.chmod(0o777)
|
||||
|
||||
def delete_thread_dir(self, thread_id: str) -> None:
|
||||
def delete_thread_dir(self, thread_id: str, *, user_id: str | None = None) -> None:
|
||||
"""Delete all persisted data for a thread.
|
||||
|
||||
The operation is idempotent: missing thread directories are ignored.
|
||||
"""
|
||||
thread_dir = self.thread_dir(thread_id)
|
||||
thread_dir = self.thread_dir(thread_id, user_id=user_id)
|
||||
if thread_dir.exists():
|
||||
shutil.rmtree(thread_dir)
|
||||
|
||||
def resolve_virtual_path(self, thread_id: str, virtual_path: str) -> Path:
|
||||
def resolve_virtual_path(self, thread_id: str, virtual_path: str, *, user_id: str | None = None) -> Path:
|
||||
"""Resolve a sandbox virtual path to the actual host filesystem path.
|
||||
|
||||
Args:
|
||||
@ -253,6 +282,7 @@ class Paths:
|
||||
virtual_path: Virtual path as seen inside the sandbox, e.g.
|
||||
``/mnt/user-data/outputs/report.pdf``.
|
||||
Leading slashes are stripped before matching.
|
||||
user_id: Optional user ID for user-scoped path resolution.
|
||||
|
||||
Returns:
|
||||
The resolved absolute host filesystem path.
|
||||
@ -270,7 +300,7 @@ class Paths:
|
||||
raise ValueError(f"Path must start with /{prefix}")
|
||||
|
||||
relative = stripped[len(prefix) :].lstrip("/")
|
||||
base = self.sandbox_user_data_dir(thread_id).resolve()
|
||||
base = self.sandbox_user_data_dir(thread_id, user_id=user_id).resolve()
|
||||
actual = (base / relative).resolve()
|
||||
|
||||
try:
|
||||
|
||||
@ -0,0 +1,33 @@
|
||||
"""Run event storage configuration.
|
||||
|
||||
Controls where run events (messages + execution traces) are persisted.
|
||||
|
||||
Backends:
|
||||
- memory: In-memory storage, data lost on restart. Suitable for
|
||||
development and testing.
|
||||
- db: SQL database via SQLAlchemy ORM. Provides full query capability.
|
||||
Suitable for production deployments.
|
||||
- jsonl: Append-only JSONL files. Lightweight alternative for
|
||||
single-node deployments that need persistence without a database.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class RunEventsConfig(BaseModel):
|
||||
backend: Literal["memory", "db", "jsonl"] = Field(
|
||||
default="memory",
|
||||
description="Storage backend for run events. 'memory' for development (no persistence), 'db' for production (SQL queries), 'jsonl' for lightweight single-node persistence.",
|
||||
)
|
||||
max_trace_content: int = Field(
|
||||
default=10240,
|
||||
description="Maximum trace content size in bytes before truncation (db backend only).",
|
||||
)
|
||||
track_token_usage: bool = Field(
|
||||
default=True,
|
||||
description="Whether RunJournal should accumulate token counts to RunRow.",
|
||||
)
|
||||
@ -3,6 +3,7 @@ import logging
|
||||
from langchain.chat_models import BaseChatModel
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.reflection import resolve_class
|
||||
from deerflow.tracing import build_tracing_callbacks
|
||||
|
||||
@ -46,7 +47,7 @@ def _enable_stream_usage_by_default(model_use_path: str, model_settings_from_con
|
||||
model_settings_from_config["stream_usage"] = True
|
||||
|
||||
|
||||
def create_chat_model(name: str | None = None, thinking_enabled: bool = False, **kwargs) -> BaseChatModel:
|
||||
def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *, app_config: AppConfig | None = None, **kwargs) -> BaseChatModel:
|
||||
"""Create a chat model instance from the config.
|
||||
|
||||
Args:
|
||||
@ -55,7 +56,7 @@ def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *
|
||||
Returns:
|
||||
A chat model instance.
|
||||
"""
|
||||
config = get_app_config()
|
||||
config = app_config or get_app_config()
|
||||
if name is None:
|
||||
name = config.models[0].name
|
||||
model_config = config.get_model_config(name)
|
||||
@ -137,7 +138,16 @@ def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *
|
||||
# Enforce max_retries constraint to prevent cascading timeouts.
|
||||
model_settings_from_config["max_retries"] = model_settings_from_config.get("max_retries", 1)
|
||||
|
||||
model_instance = model_class(**{**model_settings_from_config, **kwargs})
|
||||
# Ensure stream_usage is enabled so that token usage metadata is available
|
||||
# in streaming responses. LangChain's BaseChatOpenAI only defaults
|
||||
# stream_usage=True when no custom base_url/api_base is set, so models
|
||||
# hitting third-party endpoints (e.g. doubao, deepseek) silently lose
|
||||
# usage data. We default it to True unless explicitly configured.
|
||||
if "stream_usage" not in model_settings_from_config and "stream_usage" not in kwargs:
|
||||
if "stream_usage" in getattr(model_class, "model_fields", {}):
|
||||
model_settings_from_config["stream_usage"] = True
|
||||
|
||||
model_instance = model_class(**kwargs, **model_settings_from_config)
|
||||
|
||||
callbacks = build_tracing_callbacks()
|
||||
if callbacks:
|
||||
|
||||
13
backend/packages/harness/deerflow/persistence/__init__.py
Normal file
13
backend/packages/harness/deerflow/persistence/__init__.py
Normal file
@ -0,0 +1,13 @@
|
||||
"""DeerFlow application persistence layer (SQLAlchemy 2.0 async ORM).
|
||||
|
||||
This module manages DeerFlow's own application data -- runs metadata,
|
||||
thread ownership, cron jobs, users. It is completely separate from
|
||||
LangGraph's checkpointer, which manages graph execution state.
|
||||
|
||||
Usage:
|
||||
from deerflow.persistence import init_engine, close_engine, get_session_factory
|
||||
"""
|
||||
|
||||
from deerflow.persistence.engine import close_engine, get_engine, get_session_factory, init_engine
|
||||
|
||||
__all__ = ["close_engine", "get_engine", "get_session_factory", "init_engine"]
|
||||
40
backend/packages/harness/deerflow/persistence/base.py
Normal file
40
backend/packages/harness/deerflow/persistence/base.py
Normal file
@ -0,0 +1,40 @@
|
||||
"""SQLAlchemy declarative base with automatic to_dict support.
|
||||
|
||||
All DeerFlow ORM models inherit from this Base. It provides a generic
|
||||
to_dict() method via SQLAlchemy's inspect() so individual models don't
|
||||
need to write their own serialization logic.
|
||||
|
||||
LangGraph's checkpointer tables are NOT managed by this Base.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy import inspect as sa_inspect
|
||||
from sqlalchemy.orm import DeclarativeBase
|
||||
|
||||
|
||||
class Base(DeclarativeBase):
|
||||
"""Base class for all DeerFlow ORM models.
|
||||
|
||||
Provides:
|
||||
- Automatic to_dict() via SQLAlchemy column inspection.
|
||||
- Standard __repr__() showing all column values.
|
||||
"""
|
||||
|
||||
def to_dict(self, *, exclude: set[str] | None = None) -> dict:
|
||||
"""Convert ORM instance to plain dict.
|
||||
|
||||
Uses SQLAlchemy's inspect() to iterate mapped column attributes.
|
||||
|
||||
Args:
|
||||
exclude: Optional set of column keys to omit.
|
||||
|
||||
Returns:
|
||||
Dict of {column_key: value} for all mapped columns.
|
||||
"""
|
||||
exclude = exclude or set()
|
||||
return {c.key: getattr(self, c.key) for c in sa_inspect(type(self)).mapper.column_attrs if c.key not in exclude}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
cols = ", ".join(f"{c.key}={getattr(self, c.key)!r}" for c in sa_inspect(type(self)).mapper.column_attrs)
|
||||
return f"{type(self).__name__}({cols})"
|
||||
190
backend/packages/harness/deerflow/persistence/engine.py
Normal file
190
backend/packages/harness/deerflow/persistence/engine.py
Normal file
@ -0,0 +1,190 @@
|
||||
"""Async SQLAlchemy engine lifecycle management.
|
||||
|
||||
Initializes at Gateway startup, provides session factory for
|
||||
repositories, disposes at shutdown.
|
||||
|
||||
When database.backend="memory", init_engine is a no-op and
|
||||
get_session_factory() returns None. Repositories must check for
|
||||
None and fall back to in-memory implementations.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker, create_async_engine
|
||||
|
||||
|
||||
def _json_serializer(obj: object) -> str:
|
||||
"""JSON serializer with ensure_ascii=False for Chinese character support."""
|
||||
return json.dumps(obj, ensure_ascii=False)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_engine: AsyncEngine | None = None
|
||||
_session_factory: async_sessionmaker[AsyncSession] | None = None
|
||||
|
||||
|
||||
async def _auto_create_postgres_db(url: str) -> None:
|
||||
"""Connect to the ``postgres`` maintenance DB and CREATE DATABASE.
|
||||
|
||||
The target database name is extracted from *url*. The connection is
|
||||
made to the default ``postgres`` database on the same server using
|
||||
``AUTOCOMMIT`` isolation (CREATE DATABASE cannot run inside a
|
||||
transaction).
|
||||
"""
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.engine.url import make_url
|
||||
|
||||
parsed = make_url(url)
|
||||
db_name = parsed.database
|
||||
if not db_name:
|
||||
raise ValueError("Cannot auto-create database: no database name in URL")
|
||||
|
||||
# Connect to the default 'postgres' database to issue CREATE DATABASE
|
||||
maint_url = parsed.set(database="postgres")
|
||||
maint_engine = create_async_engine(maint_url, isolation_level="AUTOCOMMIT")
|
||||
try:
|
||||
async with maint_engine.connect() as conn:
|
||||
await conn.execute(text(f'CREATE DATABASE "{db_name}"'))
|
||||
logger.info("Auto-created PostgreSQL database: %s", db_name)
|
||||
finally:
|
||||
await maint_engine.dispose()
|
||||
|
||||
|
||||
async def init_engine(
|
||||
backend: str,
|
||||
*,
|
||||
url: str = "",
|
||||
echo: bool = False,
|
||||
pool_size: int = 5,
|
||||
sqlite_dir: str = "",
|
||||
) -> None:
|
||||
"""Create the async engine and session factory, then auto-create tables.
|
||||
|
||||
Args:
|
||||
backend: "memory", "sqlite", or "postgres".
|
||||
url: SQLAlchemy async URL (for sqlite/postgres).
|
||||
echo: Echo SQL to log.
|
||||
pool_size: Postgres connection pool size.
|
||||
sqlite_dir: Directory to create for SQLite (ensured to exist).
|
||||
"""
|
||||
global _engine, _session_factory
|
||||
|
||||
if backend == "memory":
|
||||
logger.info("Persistence backend=memory -- ORM engine not initialized")
|
||||
return
|
||||
|
||||
if backend == "postgres":
|
||||
try:
|
||||
import asyncpg # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError("database.backend is set to 'postgres' but asyncpg is not installed.\nInstall it with:\n uv sync --extra postgres\nOr switch to backend: sqlite in config.yaml for single-node deployment.") from None
|
||||
|
||||
if backend == "sqlite":
|
||||
import os
|
||||
|
||||
from sqlalchemy import event
|
||||
|
||||
os.makedirs(sqlite_dir or ".", exist_ok=True)
|
||||
_engine = create_async_engine(url, echo=echo, json_serializer=_json_serializer)
|
||||
|
||||
# Enable WAL on every new connection. SQLite PRAGMA settings are
|
||||
# per-connection, so we wire the listener instead of running PRAGMA
|
||||
# once at startup. WAL gives concurrent reads + writers without
|
||||
# blocking and is the standard recommendation for any production
|
||||
# SQLite deployment (TC-UPG-06 in AUTH_TEST_PLAN.md). The companion
|
||||
# ``synchronous=NORMAL`` is the safe-and-fast pairing — fsync only
|
||||
# at WAL checkpoint boundaries instead of every commit.
|
||||
# Note: we do not set PRAGMA busy_timeout here — Python's sqlite3
|
||||
# driver already defaults to a 5-second busy timeout (see the
|
||||
# ``timeout`` kwarg of ``sqlite3.connect``), and aiosqlite /
|
||||
# SQLAlchemy's aiosqlite dialect inherit that default. Setting
|
||||
# it again would be a no-op.
|
||||
@event.listens_for(_engine.sync_engine, "connect")
|
||||
def _enable_sqlite_wal(dbapi_conn, _record): # noqa: ARG001 — SQLAlchemy contract
|
||||
cursor = dbapi_conn.cursor()
|
||||
try:
|
||||
cursor.execute("PRAGMA journal_mode=WAL;")
|
||||
cursor.execute("PRAGMA synchronous=NORMAL;")
|
||||
cursor.execute("PRAGMA foreign_keys=ON;")
|
||||
finally:
|
||||
cursor.close()
|
||||
elif backend == "postgres":
|
||||
_engine = create_async_engine(
|
||||
url,
|
||||
echo=echo,
|
||||
pool_size=pool_size,
|
||||
pool_pre_ping=True,
|
||||
json_serializer=_json_serializer,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown persistence backend: {backend!r}")
|
||||
|
||||
_session_factory = async_sessionmaker(_engine, expire_on_commit=False)
|
||||
|
||||
# Auto-create tables (dev convenience). Production should use Alembic.
|
||||
from deerflow.persistence.base import Base
|
||||
|
||||
# Import all models so Base.metadata discovers them.
|
||||
# When no models exist yet (scaffolding phase), this is a no-op.
|
||||
try:
|
||||
import deerflow.persistence.models # noqa: F401
|
||||
except ImportError:
|
||||
# Models package not yet available — tables won't be auto-created.
|
||||
# This is expected during initial scaffolding or minimal installs.
|
||||
logger.debug("deerflow.persistence.models not found; skipping auto-create tables")
|
||||
|
||||
try:
|
||||
async with _engine.begin() as conn:
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
except Exception as exc:
|
||||
if backend == "postgres" and "does not exist" in str(exc):
|
||||
# Database not yet created — attempt to auto-create it, then retry.
|
||||
await _auto_create_postgres_db(url)
|
||||
# Rebuild engine against the now-existing database
|
||||
await _engine.dispose()
|
||||
_engine = create_async_engine(url, echo=echo, pool_size=pool_size, pool_pre_ping=True, json_serializer=_json_serializer)
|
||||
_session_factory = async_sessionmaker(_engine, expire_on_commit=False)
|
||||
async with _engine.begin() as conn:
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
else:
|
||||
raise
|
||||
|
||||
logger.info("Persistence engine initialized: backend=%s", backend)
|
||||
|
||||
|
||||
async def init_engine_from_config(config) -> None:
|
||||
"""Convenience: init engine from a DatabaseConfig object."""
|
||||
if config.backend == "memory":
|
||||
await init_engine("memory")
|
||||
return
|
||||
await init_engine(
|
||||
backend=config.backend,
|
||||
url=config.app_sqlalchemy_url,
|
||||
echo=config.echo_sql,
|
||||
pool_size=config.pool_size,
|
||||
sqlite_dir=config.sqlite_dir if config.backend == "sqlite" else "",
|
||||
)
|
||||
|
||||
|
||||
def get_session_factory() -> async_sessionmaker[AsyncSession] | None:
|
||||
"""Return the async session factory, or None if backend=memory."""
|
||||
return _session_factory
|
||||
|
||||
|
||||
def get_engine() -> AsyncEngine | None:
|
||||
"""Return the async engine, or None if not initialized."""
|
||||
return _engine
|
||||
|
||||
|
||||
async def close_engine() -> None:
|
||||
"""Dispose the engine, release all connections."""
|
||||
global _engine, _session_factory
|
||||
if _engine is not None:
|
||||
await _engine.dispose()
|
||||
logger.info("Persistence engine closed")
|
||||
_engine = None
|
||||
_session_factory = None
|
||||
@ -0,0 +1,6 @@
|
||||
"""Feedback persistence — ORM and SQL repository."""
|
||||
|
||||
from deerflow.persistence.feedback.model import FeedbackRow
|
||||
from deerflow.persistence.feedback.sql import FeedbackRepository
|
||||
|
||||
__all__ = ["FeedbackRepository", "FeedbackRow"]
|
||||
@ -0,0 +1,32 @@
|
||||
"""ORM model for user feedback on runs."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import DateTime, String, Text, UniqueConstraint
|
||||
from sqlalchemy.orm import Mapped, mapped_column
|
||||
|
||||
from deerflow.persistence.base import Base
|
||||
|
||||
|
||||
class FeedbackRow(Base):
|
||||
__tablename__ = "feedback"
|
||||
|
||||
__table_args__ = (UniqueConstraint("thread_id", "run_id", "user_id", name="uq_feedback_thread_run_user"),)
|
||||
|
||||
feedback_id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
run_id: Mapped[str] = mapped_column(String(64), nullable=False, index=True)
|
||||
thread_id: Mapped[str] = mapped_column(String(64), nullable=False, index=True)
|
||||
user_id: Mapped[str | None] = mapped_column(String(64), index=True)
|
||||
message_id: Mapped[str | None] = mapped_column(String(64))
|
||||
# message_id is an optional RunEventStore event identifier —
|
||||
# allows feedback to target a specific message or the entire run
|
||||
|
||||
rating: Mapped[int] = mapped_column(nullable=False)
|
||||
# +1 (thumbs-up) or -1 (thumbs-down)
|
||||
|
||||
comment: Mapped[str | None] = mapped_column(Text)
|
||||
# Optional text feedback from the user
|
||||
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(UTC))
|
||||
217
backend/packages/harness/deerflow/persistence/feedback/sql.py
Normal file
217
backend/packages/harness/deerflow/persistence/feedback/sql.py
Normal file
@ -0,0 +1,217 @@
|
||||
"""SQLAlchemy-backed feedback storage.
|
||||
|
||||
Each method acquires its own short-lived session.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import case, func, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
from deerflow.persistence.feedback.model import FeedbackRow
|
||||
from deerflow.runtime.user_context import AUTO, _AutoSentinel, resolve_user_id
|
||||
|
||||
|
||||
class FeedbackRepository:
|
||||
def __init__(self, session_factory: async_sessionmaker[AsyncSession]) -> None:
|
||||
self._sf = session_factory
|
||||
|
||||
@staticmethod
|
||||
def _row_to_dict(row: FeedbackRow) -> dict:
|
||||
d = row.to_dict()
|
||||
val = d.get("created_at")
|
||||
if isinstance(val, datetime):
|
||||
d["created_at"] = val.isoformat()
|
||||
return d
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
run_id: str,
|
||||
thread_id: str,
|
||||
rating: int,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
message_id: str | None = None,
|
||||
comment: str | None = None,
|
||||
) -> dict:
|
||||
"""Create a feedback record. rating must be +1 or -1."""
|
||||
if rating not in (1, -1):
|
||||
raise ValueError(f"rating must be +1 or -1, got {rating}")
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.create")
|
||||
row = FeedbackRow(
|
||||
feedback_id=str(uuid.uuid4()),
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
user_id=resolved_user_id,
|
||||
message_id=message_id,
|
||||
rating=rating,
|
||||
comment=comment,
|
||||
created_at=datetime.now(UTC),
|
||||
)
|
||||
async with self._sf() as session:
|
||||
session.add(row)
|
||||
await session.commit()
|
||||
await session.refresh(row)
|
||||
return self._row_to_dict(row)
|
||||
|
||||
async def get(
|
||||
self,
|
||||
feedback_id: str,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> dict | None:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.get")
|
||||
async with self._sf() as session:
|
||||
row = await session.get(FeedbackRow, feedback_id)
|
||||
if row is None:
|
||||
return None
|
||||
if resolved_user_id is not None and row.user_id != resolved_user_id:
|
||||
return None
|
||||
return self._row_to_dict(row)
|
||||
|
||||
async def list_by_run(
|
||||
self,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
*,
|
||||
limit: int = 100,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> list[dict]:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.list_by_run")
|
||||
stmt = select(FeedbackRow).where(FeedbackRow.thread_id == thread_id, FeedbackRow.run_id == run_id)
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(FeedbackRow.user_id == resolved_user_id)
|
||||
stmt = stmt.order_by(FeedbackRow.created_at.asc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
|
||||
async def list_by_thread(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
limit: int = 100,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> list[dict]:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.list_by_thread")
|
||||
stmt = select(FeedbackRow).where(FeedbackRow.thread_id == thread_id)
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(FeedbackRow.user_id == resolved_user_id)
|
||||
stmt = stmt.order_by(FeedbackRow.created_at.asc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
feedback_id: str,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> bool:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.delete")
|
||||
async with self._sf() as session:
|
||||
row = await session.get(FeedbackRow, feedback_id)
|
||||
if row is None:
|
||||
return False
|
||||
if resolved_user_id is not None and row.user_id != resolved_user_id:
|
||||
return False
|
||||
await session.delete(row)
|
||||
await session.commit()
|
||||
return True
|
||||
|
||||
async def upsert(
|
||||
self,
|
||||
*,
|
||||
run_id: str,
|
||||
thread_id: str,
|
||||
rating: int,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
comment: str | None = None,
|
||||
) -> dict:
|
||||
"""Create or update feedback for (thread_id, run_id, user_id). rating must be +1 or -1."""
|
||||
if rating not in (1, -1):
|
||||
raise ValueError(f"rating must be +1 or -1, got {rating}")
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.upsert")
|
||||
async with self._sf() as session:
|
||||
stmt = select(FeedbackRow).where(
|
||||
FeedbackRow.thread_id == thread_id,
|
||||
FeedbackRow.run_id == run_id,
|
||||
FeedbackRow.user_id == resolved_user_id,
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
row = result.scalar_one_or_none()
|
||||
if row is not None:
|
||||
row.rating = rating
|
||||
row.comment = comment
|
||||
row.created_at = datetime.now(UTC)
|
||||
else:
|
||||
row = FeedbackRow(
|
||||
feedback_id=str(uuid.uuid4()),
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
user_id=resolved_user_id,
|
||||
rating=rating,
|
||||
comment=comment,
|
||||
created_at=datetime.now(UTC),
|
||||
)
|
||||
session.add(row)
|
||||
await session.commit()
|
||||
await session.refresh(row)
|
||||
return self._row_to_dict(row)
|
||||
|
||||
async def delete_by_run(
|
||||
self,
|
||||
*,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> bool:
|
||||
"""Delete the current user's feedback for a run. Returns True if a record was deleted."""
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.delete_by_run")
|
||||
async with self._sf() as session:
|
||||
stmt = select(FeedbackRow).where(
|
||||
FeedbackRow.thread_id == thread_id,
|
||||
FeedbackRow.run_id == run_id,
|
||||
FeedbackRow.user_id == resolved_user_id,
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
row = result.scalar_one_or_none()
|
||||
if row is None:
|
||||
return False
|
||||
await session.delete(row)
|
||||
await session.commit()
|
||||
return True
|
||||
|
||||
async def list_by_thread_grouped(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> dict[str, dict]:
|
||||
"""Return feedback grouped by run_id for a thread: {run_id: feedback_dict}."""
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="FeedbackRepository.list_by_thread_grouped")
|
||||
stmt = select(FeedbackRow).where(FeedbackRow.thread_id == thread_id)
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(FeedbackRow.user_id == resolved_user_id)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return {row.run_id: self._row_to_dict(row) for row in result.scalars()}
|
||||
|
||||
async def aggregate_by_run(self, thread_id: str, run_id: str) -> dict:
|
||||
"""Aggregate feedback stats for a run using database-side counting."""
|
||||
stmt = select(
|
||||
func.count().label("total"),
|
||||
func.coalesce(func.sum(case((FeedbackRow.rating == 1, 1), else_=0)), 0).label("positive"),
|
||||
func.coalesce(func.sum(case((FeedbackRow.rating == -1, 1), else_=0)), 0).label("negative"),
|
||||
).where(FeedbackRow.thread_id == thread_id, FeedbackRow.run_id == run_id)
|
||||
async with self._sf() as session:
|
||||
row = (await session.execute(stmt)).one()
|
||||
return {
|
||||
"run_id": run_id,
|
||||
"total": row.total,
|
||||
"positive": row.positive,
|
||||
"negative": row.negative,
|
||||
}
|
||||
@ -0,0 +1,38 @@
|
||||
[alembic]
|
||||
script_location = %(here)s
|
||||
# Default URL for offline mode / autogenerate.
|
||||
# Runtime uses engine from DeerFlow config.
|
||||
sqlalchemy.url = sqlite+aiosqlite:///./data/deerflow.db
|
||||
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
||||
@ -0,0 +1,67 @@
|
||||
"""Alembic environment for DeerFlow application tables.
|
||||
|
||||
ONLY manages DeerFlow's tables (runs, threads_meta, cron_jobs, users).
|
||||
LangGraph's checkpointer tables are managed by LangGraph itself -- they
|
||||
have their own schema lifecycle and must not be touched by Alembic.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from logging.config import fileConfig
|
||||
|
||||
from alembic import context
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
from deerflow.persistence.base import Base
|
||||
|
||||
# Import all models so metadata is populated.
|
||||
try:
|
||||
import deerflow.persistence.models as models # register ORM models with Base.metadata
|
||||
|
||||
_ = models
|
||||
except ImportError:
|
||||
# Models not available — migration will work with existing metadata only.
|
||||
logging.getLogger(__name__).warning("Could not import deerflow.persistence.models; Alembic may not detect all tables")
|
||||
|
||||
config = context.config
|
||||
if config.config_file_name is not None:
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
target_metadata = Base.metadata
|
||||
|
||||
|
||||
def run_migrations_offline() -> None:
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url,
|
||||
target_metadata=target_metadata,
|
||||
literal_binds=True,
|
||||
render_as_batch=True,
|
||||
)
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def do_run_migrations(connection):
|
||||
context.configure(
|
||||
connection=connection,
|
||||
target_metadata=target_metadata,
|
||||
render_as_batch=True, # Required for SQLite ALTER TABLE support
|
||||
)
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
async def run_migrations_online() -> None:
|
||||
connectable = create_async_engine(config.get_main_option("sqlalchemy.url"))
|
||||
async with connectable.connect() as connection:
|
||||
await connection.run_sync(do_run_migrations)
|
||||
await connectable.dispose()
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
asyncio.run(run_migrations_online())
|
||||
@ -0,0 +1,23 @@
|
||||
"""ORM model registration entry point.
|
||||
|
||||
Importing this module ensures all ORM models are registered with
|
||||
``Base.metadata`` so Alembic autogenerate detects every table.
|
||||
|
||||
The actual ORM classes have moved to entity-specific subpackages:
|
||||
- ``deerflow.persistence.thread_meta``
|
||||
- ``deerflow.persistence.run``
|
||||
- ``deerflow.persistence.feedback``
|
||||
- ``deerflow.persistence.user``
|
||||
|
||||
``RunEventRow`` remains in ``deerflow.persistence.models.run_event`` because
|
||||
its storage implementation lives in ``deerflow.runtime.events.store.db`` and
|
||||
there is no matching entity directory.
|
||||
"""
|
||||
|
||||
from deerflow.persistence.feedback.model import FeedbackRow
|
||||
from deerflow.persistence.models.run_event import RunEventRow
|
||||
from deerflow.persistence.run.model import RunRow
|
||||
from deerflow.persistence.thread_meta.model import ThreadMetaRow
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
__all__ = ["FeedbackRow", "RunEventRow", "RunRow", "ThreadMetaRow", "UserRow"]
|
||||
@ -0,0 +1,35 @@
|
||||
"""ORM model for run events."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import JSON, DateTime, Index, String, Text, UniqueConstraint
|
||||
from sqlalchemy.orm import Mapped, mapped_column
|
||||
|
||||
from deerflow.persistence.base import Base
|
||||
|
||||
|
||||
class RunEventRow(Base):
|
||||
__tablename__ = "run_events"
|
||||
|
||||
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
|
||||
thread_id: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
run_id: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
# Owner of the conversation this event belongs to. Nullable for data
|
||||
# created before auth was introduced; populated by auth middleware on
|
||||
# new writes and by the boot-time orphan migration on existing rows.
|
||||
user_id: Mapped[str | None] = mapped_column(String(64), nullable=True, index=True)
|
||||
event_type: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||
category: Mapped[str] = mapped_column(String(16), nullable=False)
|
||||
# "message" | "trace" | "lifecycle"
|
||||
content: Mapped[str] = mapped_column(Text, default="")
|
||||
event_metadata: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
seq: Mapped[int] = mapped_column(nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(UTC))
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("thread_id", "seq", name="uq_events_thread_seq"),
|
||||
Index("ix_events_thread_cat_seq", "thread_id", "category", "seq"),
|
||||
Index("ix_events_run", "thread_id", "run_id", "seq"),
|
||||
)
|
||||
@ -0,0 +1,6 @@
|
||||
"""Run metadata persistence — ORM and SQL repository."""
|
||||
|
||||
from deerflow.persistence.run.model import RunRow
|
||||
from deerflow.persistence.run.sql import RunRepository
|
||||
|
||||
__all__ = ["RunRepository", "RunRow"]
|
||||
49
backend/packages/harness/deerflow/persistence/run/model.py
Normal file
49
backend/packages/harness/deerflow/persistence/run/model.py
Normal file
@ -0,0 +1,49 @@
|
||||
"""ORM model for run metadata."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import JSON, DateTime, Index, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column
|
||||
|
||||
from deerflow.persistence.base import Base
|
||||
|
||||
|
||||
class RunRow(Base):
|
||||
__tablename__ = "runs"
|
||||
|
||||
run_id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
thread_id: Mapped[str] = mapped_column(String(64), nullable=False, index=True)
|
||||
assistant_id: Mapped[str | None] = mapped_column(String(128))
|
||||
user_id: Mapped[str | None] = mapped_column(String(64), index=True)
|
||||
status: Mapped[str] = mapped_column(String(20), default="pending")
|
||||
# "pending" | "running" | "success" | "error" | "timeout" | "interrupted"
|
||||
|
||||
model_name: Mapped[str | None] = mapped_column(String(128))
|
||||
multitask_strategy: Mapped[str] = mapped_column(String(20), default="reject")
|
||||
metadata_json: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
kwargs_json: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
error: Mapped[str | None] = mapped_column(Text)
|
||||
|
||||
# Convenience fields (for listing pages without querying RunEventStore)
|
||||
message_count: Mapped[int] = mapped_column(default=0)
|
||||
first_human_message: Mapped[str | None] = mapped_column(Text)
|
||||
last_ai_message: Mapped[str | None] = mapped_column(Text)
|
||||
|
||||
# Token usage (accumulated in-memory by RunJournal, written on run completion)
|
||||
total_input_tokens: Mapped[int] = mapped_column(default=0)
|
||||
total_output_tokens: Mapped[int] = mapped_column(default=0)
|
||||
total_tokens: Mapped[int] = mapped_column(default=0)
|
||||
llm_call_count: Mapped[int] = mapped_column(default=0)
|
||||
lead_agent_tokens: Mapped[int] = mapped_column(default=0)
|
||||
subagent_tokens: Mapped[int] = mapped_column(default=0)
|
||||
middleware_tokens: Mapped[int] = mapped_column(default=0)
|
||||
|
||||
# Follow-up association
|
||||
follow_up_to_run_id: Mapped[str | None] = mapped_column(String(64))
|
||||
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(UTC))
|
||||
updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(UTC), onupdate=lambda: datetime.now(UTC))
|
||||
|
||||
__table_args__ = (Index("ix_runs_thread_status", "thread_id", "status"),)
|
||||
255
backend/packages/harness/deerflow/persistence/run/sql.py
Normal file
255
backend/packages/harness/deerflow/persistence/run/sql.py
Normal file
@ -0,0 +1,255 @@
|
||||
"""SQLAlchemy-backed RunStore implementation.
|
||||
|
||||
Each method acquires and releases its own short-lived session.
|
||||
Run status updates happen from background workers that may live
|
||||
minutes -- we don't hold connections across long execution.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
from sqlalchemy import func, select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
from deerflow.persistence.run.model import RunRow
|
||||
from deerflow.runtime.runs.store.base import RunStore
|
||||
from deerflow.runtime.user_context import AUTO, _AutoSentinel, resolve_user_id
|
||||
|
||||
|
||||
class RunRepository(RunStore):
|
||||
def __init__(self, session_factory: async_sessionmaker[AsyncSession]) -> None:
|
||||
self._sf = session_factory
|
||||
|
||||
@staticmethod
|
||||
def _safe_json(obj: Any) -> Any:
|
||||
"""Ensure obj is JSON-serializable. Falls back to model_dump() or str()."""
|
||||
if obj is None:
|
||||
return None
|
||||
if isinstance(obj, (str, int, float, bool)):
|
||||
return obj
|
||||
if isinstance(obj, dict):
|
||||
return {k: RunRepository._safe_json(v) for k, v in obj.items()}
|
||||
if isinstance(obj, (list, tuple)):
|
||||
return [RunRepository._safe_json(v) for v in obj]
|
||||
if hasattr(obj, "model_dump"):
|
||||
try:
|
||||
return obj.model_dump()
|
||||
except Exception:
|
||||
pass
|
||||
if hasattr(obj, "dict"):
|
||||
try:
|
||||
return obj.dict()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
json.dumps(obj)
|
||||
return obj
|
||||
except (TypeError, ValueError):
|
||||
return str(obj)
|
||||
|
||||
@staticmethod
|
||||
def _row_to_dict(row: RunRow) -> dict[str, Any]:
|
||||
d = row.to_dict()
|
||||
# Remap JSON columns to match RunStore interface
|
||||
d["metadata"] = d.pop("metadata_json", {})
|
||||
d["kwargs"] = d.pop("kwargs_json", {})
|
||||
# Convert datetime to ISO string for consistency with MemoryRunStore
|
||||
for key in ("created_at", "updated_at"):
|
||||
val = d.get(key)
|
||||
if isinstance(val, datetime):
|
||||
d[key] = val.isoformat()
|
||||
return d
|
||||
|
||||
async def put(
|
||||
self,
|
||||
run_id,
|
||||
*,
|
||||
thread_id,
|
||||
assistant_id=None,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
status="pending",
|
||||
multitask_strategy="reject",
|
||||
metadata=None,
|
||||
kwargs=None,
|
||||
error=None,
|
||||
created_at=None,
|
||||
follow_up_to_run_id=None,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="RunRepository.put")
|
||||
now = datetime.now(UTC)
|
||||
row = RunRow(
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
assistant_id=assistant_id,
|
||||
user_id=resolved_user_id,
|
||||
status=status,
|
||||
multitask_strategy=multitask_strategy,
|
||||
metadata_json=self._safe_json(metadata) or {},
|
||||
kwargs_json=self._safe_json(kwargs) or {},
|
||||
error=error,
|
||||
follow_up_to_run_id=follow_up_to_run_id,
|
||||
created_at=datetime.fromisoformat(created_at) if created_at else now,
|
||||
updated_at=now,
|
||||
)
|
||||
async with self._sf() as session:
|
||||
session.add(row)
|
||||
await session.commit()
|
||||
|
||||
async def get(
|
||||
self,
|
||||
run_id,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="RunRepository.get")
|
||||
async with self._sf() as session:
|
||||
row = await session.get(RunRow, run_id)
|
||||
if row is None:
|
||||
return None
|
||||
if resolved_user_id is not None and row.user_id != resolved_user_id:
|
||||
return None
|
||||
return self._row_to_dict(row)
|
||||
|
||||
async def list_by_thread(
|
||||
self,
|
||||
thread_id,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
limit=100,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="RunRepository.list_by_thread")
|
||||
stmt = select(RunRow).where(RunRow.thread_id == thread_id)
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(RunRow.user_id == resolved_user_id)
|
||||
stmt = stmt.order_by(RunRow.created_at.desc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
|
||||
async def update_status(self, run_id, status, *, error=None):
|
||||
values: dict[str, Any] = {"status": status, "updated_at": datetime.now(UTC)}
|
||||
if error is not None:
|
||||
values["error"] = error
|
||||
async with self._sf() as session:
|
||||
await session.execute(update(RunRow).where(RunRow.run_id == run_id).values(**values))
|
||||
await session.commit()
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
run_id,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="RunRepository.delete")
|
||||
async with self._sf() as session:
|
||||
row = await session.get(RunRow, run_id)
|
||||
if row is None:
|
||||
return
|
||||
if resolved_user_id is not None and row.user_id != resolved_user_id:
|
||||
return
|
||||
await session.delete(row)
|
||||
await session.commit()
|
||||
|
||||
async def list_pending(self, *, before=None):
|
||||
if before is None:
|
||||
before_dt = datetime.now(UTC)
|
||||
elif isinstance(before, datetime):
|
||||
before_dt = before
|
||||
else:
|
||||
before_dt = datetime.fromisoformat(before)
|
||||
stmt = select(RunRow).where(RunRow.status == "pending", RunRow.created_at <= before_dt).order_by(RunRow.created_at.asc())
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
|
||||
async def update_run_completion(
|
||||
self,
|
||||
run_id: str,
|
||||
*,
|
||||
status: str,
|
||||
total_input_tokens: int = 0,
|
||||
total_output_tokens: int = 0,
|
||||
total_tokens: int = 0,
|
||||
llm_call_count: int = 0,
|
||||
lead_agent_tokens: int = 0,
|
||||
subagent_tokens: int = 0,
|
||||
middleware_tokens: int = 0,
|
||||
message_count: int = 0,
|
||||
last_ai_message: str | None = None,
|
||||
first_human_message: str | None = None,
|
||||
error: str | None = None,
|
||||
) -> None:
|
||||
"""Update status + token usage + convenience fields on run completion."""
|
||||
values: dict[str, Any] = {
|
||||
"status": status,
|
||||
"total_input_tokens": total_input_tokens,
|
||||
"total_output_tokens": total_output_tokens,
|
||||
"total_tokens": total_tokens,
|
||||
"llm_call_count": llm_call_count,
|
||||
"lead_agent_tokens": lead_agent_tokens,
|
||||
"subagent_tokens": subagent_tokens,
|
||||
"middleware_tokens": middleware_tokens,
|
||||
"message_count": message_count,
|
||||
"updated_at": datetime.now(UTC),
|
||||
}
|
||||
if last_ai_message is not None:
|
||||
values["last_ai_message"] = last_ai_message[:2000]
|
||||
if first_human_message is not None:
|
||||
values["first_human_message"] = first_human_message[:2000]
|
||||
if error is not None:
|
||||
values["error"] = error
|
||||
async with self._sf() as session:
|
||||
await session.execute(update(RunRow).where(RunRow.run_id == run_id).values(**values))
|
||||
await session.commit()
|
||||
|
||||
async def aggregate_tokens_by_thread(self, thread_id: str) -> dict[str, Any]:
|
||||
"""Aggregate token usage via a single SQL GROUP BY query."""
|
||||
_completed = RunRow.status.in_(("success", "error"))
|
||||
_thread = RunRow.thread_id == thread_id
|
||||
|
||||
stmt = (
|
||||
select(
|
||||
func.coalesce(RunRow.model_name, "unknown").label("model"),
|
||||
func.count().label("runs"),
|
||||
func.coalesce(func.sum(RunRow.total_tokens), 0).label("total_tokens"),
|
||||
func.coalesce(func.sum(RunRow.total_input_tokens), 0).label("total_input_tokens"),
|
||||
func.coalesce(func.sum(RunRow.total_output_tokens), 0).label("total_output_tokens"),
|
||||
func.coalesce(func.sum(RunRow.lead_agent_tokens), 0).label("lead_agent"),
|
||||
func.coalesce(func.sum(RunRow.subagent_tokens), 0).label("subagent"),
|
||||
func.coalesce(func.sum(RunRow.middleware_tokens), 0).label("middleware"),
|
||||
)
|
||||
.where(_thread, _completed)
|
||||
.group_by(func.coalesce(RunRow.model_name, "unknown"))
|
||||
)
|
||||
|
||||
async with self._sf() as session:
|
||||
rows = (await session.execute(stmt)).all()
|
||||
|
||||
total_tokens = total_input = total_output = total_runs = 0
|
||||
lead_agent = subagent = middleware = 0
|
||||
by_model: dict[str, dict] = {}
|
||||
for r in rows:
|
||||
by_model[r.model] = {"tokens": r.total_tokens, "runs": r.runs}
|
||||
total_tokens += r.total_tokens
|
||||
total_input += r.total_input_tokens
|
||||
total_output += r.total_output_tokens
|
||||
total_runs += r.runs
|
||||
lead_agent += r.lead_agent
|
||||
subagent += r.subagent
|
||||
middleware += r.middleware
|
||||
|
||||
return {
|
||||
"total_tokens": total_tokens,
|
||||
"total_input_tokens": total_input,
|
||||
"total_output_tokens": total_output,
|
||||
"total_runs": total_runs,
|
||||
"by_model": by_model,
|
||||
"by_caller": {
|
||||
"lead_agent": lead_agent,
|
||||
"subagent": subagent,
|
||||
"middleware": middleware,
|
||||
},
|
||||
}
|
||||
@ -0,0 +1,38 @@
|
||||
"""Thread metadata persistence — ORM, abstract store, and concrete implementations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from deerflow.persistence.thread_meta.base import ThreadMetaStore
|
||||
from deerflow.persistence.thread_meta.memory import MemoryThreadMetaStore
|
||||
from deerflow.persistence.thread_meta.model import ThreadMetaRow
|
||||
from deerflow.persistence.thread_meta.sql import ThreadMetaRepository
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langgraph.store.base import BaseStore
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
__all__ = [
|
||||
"MemoryThreadMetaStore",
|
||||
"ThreadMetaRepository",
|
||||
"ThreadMetaRow",
|
||||
"ThreadMetaStore",
|
||||
"make_thread_store",
|
||||
]
|
||||
|
||||
|
||||
def make_thread_store(
|
||||
session_factory: async_sessionmaker[AsyncSession] | None,
|
||||
store: BaseStore | None = None,
|
||||
) -> ThreadMetaStore:
|
||||
"""Create the appropriate ThreadMetaStore based on available backends.
|
||||
|
||||
Returns a SQL-backed repository when a session factory is available,
|
||||
otherwise falls back to the in-memory LangGraph Store implementation.
|
||||
"""
|
||||
if session_factory is not None:
|
||||
return ThreadMetaRepository(session_factory)
|
||||
if store is None:
|
||||
raise ValueError("make_thread_store requires either a session_factory (SQL) or a store (memory)")
|
||||
return MemoryThreadMetaStore(store)
|
||||
@ -0,0 +1,76 @@
|
||||
"""Abstract interface for thread metadata storage.
|
||||
|
||||
Implementations:
|
||||
- ThreadMetaRepository: SQL-backed (sqlite / postgres via SQLAlchemy)
|
||||
- MemoryThreadMetaStore: wraps LangGraph BaseStore (memory mode)
|
||||
|
||||
All mutating and querying methods accept a ``user_id`` parameter with
|
||||
three-state semantics (see :mod:`deerflow.runtime.user_context`):
|
||||
|
||||
- ``AUTO`` (default): resolve from the request-scoped contextvar.
|
||||
- Explicit ``str``: use the provided value verbatim.
|
||||
- Explicit ``None``: bypass owner filtering (migration/CLI only).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
|
||||
from deerflow.runtime.user_context import AUTO, _AutoSentinel
|
||||
|
||||
|
||||
class ThreadMetaStore(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
async def create(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
assistant_id: str | None = None,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
display_name: str | None = None,
|
||||
metadata: dict | None = None,
|
||||
) -> dict:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def get(self, thread_id: str, *, user_id: str | None | _AutoSentinel = AUTO) -> dict | None:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def search(
|
||||
self,
|
||||
*,
|
||||
metadata: dict | None = None,
|
||||
status: str | None = None,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> list[dict]:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def update_display_name(self, thread_id: str, display_name: str, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def update_status(self, thread_id: str, status: str, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def update_metadata(self, thread_id: str, metadata: dict, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
"""Merge ``metadata`` into the thread's metadata field.
|
||||
|
||||
Existing keys are overwritten by the new values; keys absent from
|
||||
``metadata`` are preserved. No-op if the thread does not exist
|
||||
or the owner check fails.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def check_access(self, thread_id: str, user_id: str, *, require_existing: bool = False) -> bool:
|
||||
"""Check if ``user_id`` has access to ``thread_id``."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def delete(self, thread_id: str, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
pass
|
||||
@ -0,0 +1,149 @@
|
||||
"""In-memory ThreadMetaStore backed by LangGraph BaseStore.
|
||||
|
||||
Used when database.backend=memory. Delegates to the LangGraph Store's
|
||||
``("threads",)`` namespace — the same namespace used by the Gateway
|
||||
router for thread records.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from langgraph.store.base import BaseStore
|
||||
|
||||
from deerflow.persistence.thread_meta.base import ThreadMetaStore
|
||||
from deerflow.runtime.user_context import AUTO, _AutoSentinel, resolve_user_id
|
||||
|
||||
THREADS_NS: tuple[str, ...] = ("threads",)
|
||||
|
||||
|
||||
class MemoryThreadMetaStore(ThreadMetaStore):
|
||||
def __init__(self, store: BaseStore) -> None:
|
||||
self._store = store
|
||||
|
||||
async def _get_owned_record(
|
||||
self,
|
||||
thread_id: str,
|
||||
user_id: str | None | _AutoSentinel,
|
||||
method_name: str,
|
||||
) -> dict | None:
|
||||
"""Fetch a record and verify ownership. Returns a mutable copy, or None."""
|
||||
resolved = resolve_user_id(user_id, method_name=method_name)
|
||||
item = await self._store.aget(THREADS_NS, thread_id)
|
||||
if item is None:
|
||||
return None
|
||||
record = dict(item.value)
|
||||
if resolved is not None and record.get("user_id") != resolved:
|
||||
return None
|
||||
return record
|
||||
|
||||
async def create(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
assistant_id: str | None = None,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
display_name: str | None = None,
|
||||
metadata: dict | None = None,
|
||||
) -> dict:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="MemoryThreadMetaStore.create")
|
||||
now = time.time()
|
||||
record: dict[str, Any] = {
|
||||
"thread_id": thread_id,
|
||||
"assistant_id": assistant_id,
|
||||
"user_id": resolved_user_id,
|
||||
"display_name": display_name,
|
||||
"status": "idle",
|
||||
"metadata": metadata or {},
|
||||
"values": {},
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
}
|
||||
await self._store.aput(THREADS_NS, thread_id, record)
|
||||
return record
|
||||
|
||||
async def get(self, thread_id: str, *, user_id: str | None | _AutoSentinel = AUTO) -> dict | None:
|
||||
return await self._get_owned_record(thread_id, user_id, "MemoryThreadMetaStore.get")
|
||||
|
||||
async def search(
|
||||
self,
|
||||
*,
|
||||
metadata: dict | None = None,
|
||||
status: str | None = None,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> list[dict]:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="MemoryThreadMetaStore.search")
|
||||
filter_dict: dict[str, Any] = {}
|
||||
if metadata:
|
||||
filter_dict.update(metadata)
|
||||
if status:
|
||||
filter_dict["status"] = status
|
||||
if resolved_user_id is not None:
|
||||
filter_dict["user_id"] = resolved_user_id
|
||||
|
||||
items = await self._store.asearch(
|
||||
THREADS_NS,
|
||||
filter=filter_dict or None,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
)
|
||||
return [self._item_to_dict(item) for item in items]
|
||||
|
||||
async def check_access(self, thread_id: str, user_id: str, *, require_existing: bool = False) -> bool:
|
||||
item = await self._store.aget(THREADS_NS, thread_id)
|
||||
if item is None:
|
||||
return not require_existing
|
||||
record_user_id = item.value.get("user_id")
|
||||
if record_user_id is None:
|
||||
return True
|
||||
return record_user_id == user_id
|
||||
|
||||
async def update_display_name(self, thread_id: str, display_name: str, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
record = await self._get_owned_record(thread_id, user_id, "MemoryThreadMetaStore.update_display_name")
|
||||
if record is None:
|
||||
return
|
||||
record["display_name"] = display_name
|
||||
record["updated_at"] = time.time()
|
||||
await self._store.aput(THREADS_NS, thread_id, record)
|
||||
|
||||
async def update_status(self, thread_id: str, status: str, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
record = await self._get_owned_record(thread_id, user_id, "MemoryThreadMetaStore.update_status")
|
||||
if record is None:
|
||||
return
|
||||
record["status"] = status
|
||||
record["updated_at"] = time.time()
|
||||
await self._store.aput(THREADS_NS, thread_id, record)
|
||||
|
||||
async def update_metadata(self, thread_id: str, metadata: dict, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
record = await self._get_owned_record(thread_id, user_id, "MemoryThreadMetaStore.update_metadata")
|
||||
if record is None:
|
||||
return
|
||||
merged = dict(record.get("metadata") or {})
|
||||
merged.update(metadata)
|
||||
record["metadata"] = merged
|
||||
record["updated_at"] = time.time()
|
||||
await self._store.aput(THREADS_NS, thread_id, record)
|
||||
|
||||
async def delete(self, thread_id: str, *, user_id: str | None | _AutoSentinel = AUTO) -> None:
|
||||
record = await self._get_owned_record(thread_id, user_id, "MemoryThreadMetaStore.delete")
|
||||
if record is None:
|
||||
return
|
||||
await self._store.adelete(THREADS_NS, thread_id)
|
||||
|
||||
@staticmethod
|
||||
def _item_to_dict(item) -> dict[str, Any]:
|
||||
"""Convert a Store SearchItem to the dict format expected by callers."""
|
||||
val = item.value
|
||||
return {
|
||||
"thread_id": item.key,
|
||||
"assistant_id": val.get("assistant_id"),
|
||||
"user_id": val.get("user_id"),
|
||||
"display_name": val.get("display_name"),
|
||||
"status": val.get("status", "idle"),
|
||||
"metadata": val.get("metadata", {}),
|
||||
"created_at": str(val.get("created_at", "")),
|
||||
"updated_at": str(val.get("updated_at", "")),
|
||||
}
|
||||
@ -0,0 +1,23 @@
|
||||
"""ORM model for thread metadata."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import JSON, DateTime, String
|
||||
from sqlalchemy.orm import Mapped, mapped_column
|
||||
|
||||
from deerflow.persistence.base import Base
|
||||
|
||||
|
||||
class ThreadMetaRow(Base):
|
||||
__tablename__ = "threads_meta"
|
||||
|
||||
thread_id: Mapped[str] = mapped_column(String(64), primary_key=True)
|
||||
assistant_id: Mapped[str | None] = mapped_column(String(128), index=True)
|
||||
user_id: Mapped[str | None] = mapped_column(String(64), index=True)
|
||||
display_name: Mapped[str | None] = mapped_column(String(256))
|
||||
status: Mapped[str] = mapped_column(String(20), default="idle")
|
||||
metadata_json: Mapped[dict] = mapped_column(JSON, default=dict)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(UTC))
|
||||
updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(UTC), onupdate=lambda: datetime.now(UTC))
|
||||
217
backend/packages/harness/deerflow/persistence/thread_meta/sql.py
Normal file
217
backend/packages/harness/deerflow/persistence/thread_meta/sql.py
Normal file
@ -0,0 +1,217 @@
|
||||
"""SQLAlchemy-backed thread metadata repository."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
from sqlalchemy import select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
from deerflow.persistence.thread_meta.base import ThreadMetaStore
|
||||
from deerflow.persistence.thread_meta.model import ThreadMetaRow
|
||||
from deerflow.runtime.user_context import AUTO, _AutoSentinel, resolve_user_id
|
||||
|
||||
|
||||
class ThreadMetaRepository(ThreadMetaStore):
|
||||
def __init__(self, session_factory: async_sessionmaker[AsyncSession]) -> None:
|
||||
self._sf = session_factory
|
||||
|
||||
@staticmethod
|
||||
def _row_to_dict(row: ThreadMetaRow) -> dict[str, Any]:
|
||||
d = row.to_dict()
|
||||
d["metadata"] = d.pop("metadata_json", {})
|
||||
for key in ("created_at", "updated_at"):
|
||||
val = d.get(key)
|
||||
if isinstance(val, datetime):
|
||||
d[key] = val.isoformat()
|
||||
return d
|
||||
|
||||
async def create(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
assistant_id: str | None = None,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
display_name: str | None = None,
|
||||
metadata: dict | None = None,
|
||||
) -> dict:
|
||||
# Auto-resolve user_id from contextvar when AUTO; explicit None
|
||||
# creates an orphan row (used by migration scripts).
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="ThreadMetaRepository.create")
|
||||
now = datetime.now(UTC)
|
||||
row = ThreadMetaRow(
|
||||
thread_id=thread_id,
|
||||
assistant_id=assistant_id,
|
||||
user_id=resolved_user_id,
|
||||
display_name=display_name,
|
||||
metadata_json=metadata or {},
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
async with self._sf() as session:
|
||||
session.add(row)
|
||||
await session.commit()
|
||||
await session.refresh(row)
|
||||
return self._row_to_dict(row)
|
||||
|
||||
async def get(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> dict | None:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="ThreadMetaRepository.get")
|
||||
async with self._sf() as session:
|
||||
row = await session.get(ThreadMetaRow, thread_id)
|
||||
if row is None:
|
||||
return None
|
||||
# Enforce owner filter unless explicitly bypassed (user_id=None).
|
||||
if resolved_user_id is not None and row.user_id != resolved_user_id:
|
||||
return None
|
||||
return self._row_to_dict(row)
|
||||
|
||||
async def check_access(self, thread_id: str, user_id: str, *, require_existing: bool = False) -> bool:
|
||||
"""Check if ``user_id`` has access to ``thread_id``.
|
||||
|
||||
Two modes — one row, two distinct semantics depending on what
|
||||
the caller is about to do:
|
||||
|
||||
- ``require_existing=False`` (default, permissive):
|
||||
Returns True for: row missing (untracked legacy thread),
|
||||
``row.user_id`` is None (shared / pre-auth data),
|
||||
or ``row.user_id == user_id``. Use for **read-style**
|
||||
decorators where treating an untracked thread as accessible
|
||||
preserves backward-compat.
|
||||
|
||||
- ``require_existing=True`` (strict):
|
||||
Returns True **only** when the row exists AND
|
||||
(``row.user_id == user_id`` OR ``row.user_id is None``).
|
||||
Use for **destructive / mutating** decorators (DELETE, PATCH,
|
||||
state-update) so a thread that has *already been deleted*
|
||||
cannot be re-targeted by any caller — closing the
|
||||
delete-idempotence cross-user gap where the row vanishing
|
||||
made every other user appear to "own" it.
|
||||
"""
|
||||
async with self._sf() as session:
|
||||
row = await session.get(ThreadMetaRow, thread_id)
|
||||
if row is None:
|
||||
return not require_existing
|
||||
if row.user_id is None:
|
||||
return True
|
||||
return row.user_id == user_id
|
||||
|
||||
async def search(
|
||||
self,
|
||||
*,
|
||||
metadata: dict | None = None,
|
||||
status: str | None = None,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> list[dict]:
|
||||
"""Search threads with optional metadata and status filters.
|
||||
|
||||
Owner filter is enforced by default: caller must be in a user
|
||||
context. Pass ``user_id=None`` to bypass (migration/CLI).
|
||||
"""
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="ThreadMetaRepository.search")
|
||||
stmt = select(ThreadMetaRow).order_by(ThreadMetaRow.updated_at.desc())
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(ThreadMetaRow.user_id == resolved_user_id)
|
||||
if status:
|
||||
stmt = stmt.where(ThreadMetaRow.status == status)
|
||||
|
||||
if metadata:
|
||||
# When metadata filter is active, fetch a larger window and filter
|
||||
# in Python. TODO(Phase 2): use JSON DB operators (Postgres @>,
|
||||
# SQLite json_extract) for server-side filtering.
|
||||
stmt = stmt.limit(limit * 5 + offset)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
rows = [self._row_to_dict(r) for r in result.scalars()]
|
||||
rows = [r for r in rows if all(r.get("metadata", {}).get(k) == v for k, v in metadata.items())]
|
||||
return rows[offset : offset + limit]
|
||||
else:
|
||||
stmt = stmt.limit(limit).offset(offset)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
|
||||
async def _check_ownership(self, session: AsyncSession, thread_id: str, resolved_user_id: str | None) -> bool:
|
||||
"""Return True if the row exists and is owned (or filter bypassed)."""
|
||||
if resolved_user_id is None:
|
||||
return True # explicit bypass
|
||||
row = await session.get(ThreadMetaRow, thread_id)
|
||||
return row is not None and row.user_id == resolved_user_id
|
||||
|
||||
async def update_display_name(
|
||||
self,
|
||||
thread_id: str,
|
||||
display_name: str,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> None:
|
||||
"""Update the display_name (title) for a thread."""
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="ThreadMetaRepository.update_display_name")
|
||||
async with self._sf() as session:
|
||||
if not await self._check_ownership(session, thread_id, resolved_user_id):
|
||||
return
|
||||
await session.execute(update(ThreadMetaRow).where(ThreadMetaRow.thread_id == thread_id).values(display_name=display_name, updated_at=datetime.now(UTC)))
|
||||
await session.commit()
|
||||
|
||||
async def update_status(
|
||||
self,
|
||||
thread_id: str,
|
||||
status: str,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> None:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="ThreadMetaRepository.update_status")
|
||||
async with self._sf() as session:
|
||||
if not await self._check_ownership(session, thread_id, resolved_user_id):
|
||||
return
|
||||
await session.execute(update(ThreadMetaRow).where(ThreadMetaRow.thread_id == thread_id).values(status=status, updated_at=datetime.now(UTC)))
|
||||
await session.commit()
|
||||
|
||||
async def update_metadata(
|
||||
self,
|
||||
thread_id: str,
|
||||
metadata: dict,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> None:
|
||||
"""Merge ``metadata`` into ``metadata_json``.
|
||||
|
||||
Read-modify-write inside a single session/transaction so concurrent
|
||||
callers see consistent state. No-op if the row does not exist or
|
||||
the user_id check fails.
|
||||
"""
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="ThreadMetaRepository.update_metadata")
|
||||
async with self._sf() as session:
|
||||
row = await session.get(ThreadMetaRow, thread_id)
|
||||
if row is None:
|
||||
return
|
||||
if resolved_user_id is not None and row.user_id != resolved_user_id:
|
||||
return
|
||||
merged = dict(row.metadata_json or {})
|
||||
merged.update(metadata)
|
||||
row.metadata_json = merged
|
||||
row.updated_at = datetime.now(UTC)
|
||||
await session.commit()
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
) -> None:
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="ThreadMetaRepository.delete")
|
||||
async with self._sf() as session:
|
||||
row = await session.get(ThreadMetaRow, thread_id)
|
||||
if row is None:
|
||||
return
|
||||
if resolved_user_id is not None and row.user_id != resolved_user_id:
|
||||
return
|
||||
await session.delete(row)
|
||||
await session.commit()
|
||||
@ -0,0 +1,12 @@
|
||||
"""User storage subpackage.
|
||||
|
||||
Holds the ORM model for the ``users`` table. The concrete repository
|
||||
implementation (``SQLiteUserRepository``) lives in the app layer
|
||||
(``app.gateway.auth.repositories.sqlite``) because it converts
|
||||
between the ORM row and the auth module's pydantic ``User`` class.
|
||||
This keeps the harness package free of any dependency on app code.
|
||||
"""
|
||||
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
__all__ = ["UserRow"]
|
||||
59
backend/packages/harness/deerflow/persistence/user/model.py
Normal file
59
backend/packages/harness/deerflow/persistence/user/model.py
Normal file
@ -0,0 +1,59 @@
|
||||
"""ORM model for the users table.
|
||||
|
||||
Lives in the harness persistence package so it is picked up by
|
||||
``Base.metadata.create_all()`` alongside ``threads_meta``, ``runs``,
|
||||
``run_events``, and ``feedback``. Using the shared engine means:
|
||||
|
||||
- One SQLite/Postgres database, one connection pool
|
||||
- One schema initialisation codepath
|
||||
- Consistent async sessions across auth and persistence reads
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import Boolean, DateTime, Index, String, text
|
||||
from sqlalchemy.orm import Mapped, mapped_column
|
||||
|
||||
from deerflow.persistence.base import Base
|
||||
|
||||
|
||||
class UserRow(Base):
|
||||
__tablename__ = "users"
|
||||
|
||||
# UUIDs are stored as 36-char strings for cross-backend portability.
|
||||
id: Mapped[str] = mapped_column(String(36), primary_key=True)
|
||||
|
||||
email: Mapped[str] = mapped_column(String(320), unique=True, nullable=False, index=True)
|
||||
password_hash: Mapped[str | None] = mapped_column(String(128), nullable=True)
|
||||
|
||||
# "admin" | "user" — kept as plain string to avoid ALTER TABLE pain
|
||||
# when new roles are introduced.
|
||||
system_role: Mapped[str] = mapped_column(String(16), nullable=False, default="user")
|
||||
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True),
|
||||
nullable=False,
|
||||
default=lambda: datetime.now(UTC),
|
||||
)
|
||||
|
||||
# OAuth linkage (optional). A partial unique index enforces one
|
||||
# account per (provider, oauth_id) pair, leaving NULL/NULL rows
|
||||
# unconstrained so plain password accounts can coexist.
|
||||
oauth_provider: Mapped[str | None] = mapped_column(String(32), nullable=True)
|
||||
oauth_id: Mapped[str | None] = mapped_column(String(128), nullable=True)
|
||||
|
||||
# Auth lifecycle flags
|
||||
needs_setup: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)
|
||||
token_version: Mapped[int] = mapped_column(nullable=False, default=0)
|
||||
|
||||
__table_args__ = (
|
||||
Index(
|
||||
"idx_users_oauth_identity",
|
||||
"oauth_provider",
|
||||
"oauth_id",
|
||||
unique=True,
|
||||
sqlite_where=text("oauth_provider IS NOT NULL AND oauth_id IS NOT NULL"),
|
||||
),
|
||||
)
|
||||
@ -5,15 +5,22 @@ Re-exports the public API of :mod:`~deerflow.runtime.runs` and
|
||||
directly from ``deerflow.runtime``.
|
||||
"""
|
||||
|
||||
from .runs import ConflictError, DisconnectMode, RunManager, RunRecord, RunStatus, UnsupportedStrategyError, run_agent
|
||||
from .checkpointer import checkpointer_context, get_checkpointer, make_checkpointer, reset_checkpointer
|
||||
from .runs import ConflictError, DisconnectMode, RunContext, RunManager, RunRecord, RunStatus, UnsupportedStrategyError, run_agent
|
||||
from .serialization import serialize, serialize_channel_values, serialize_lc_object, serialize_messages_tuple
|
||||
from .store import get_store, make_store, reset_store, store_context
|
||||
from .stream_bridge import END_SENTINEL, HEARTBEAT_SENTINEL, MemoryStreamBridge, StreamBridge, StreamEvent, make_stream_bridge
|
||||
|
||||
__all__ = [
|
||||
# checkpointer
|
||||
"checkpointer_context",
|
||||
"get_checkpointer",
|
||||
"make_checkpointer",
|
||||
"reset_checkpointer",
|
||||
# runs
|
||||
"ConflictError",
|
||||
"DisconnectMode",
|
||||
"RunContext",
|
||||
"RunManager",
|
||||
"RunRecord",
|
||||
"RunStatus",
|
||||
|
||||
@ -7,12 +7,12 @@ Supported backends: memory, sqlite, postgres.
|
||||
|
||||
Usage (e.g. FastAPI lifespan)::
|
||||
|
||||
from deerflow.agents.checkpointer.async_provider import make_checkpointer
|
||||
from deerflow.runtime.checkpointer.async_provider import make_checkpointer
|
||||
|
||||
async with make_checkpointer() as checkpointer:
|
||||
app.state.checkpointer = checkpointer # InMemorySaver if not configured
|
||||
|
||||
For sync usage see :mod:`deerflow.agents.checkpointer.provider`.
|
||||
For sync usage see :mod:`deerflow.runtime.checkpointer.provider`.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@ -24,12 +24,12 @@ from collections.abc import AsyncIterator
|
||||
|
||||
from langgraph.types import Checkpointer
|
||||
|
||||
from deerflow.agents.checkpointer.provider import (
|
||||
from deerflow.config.app_config import AppConfig, get_app_config
|
||||
from deerflow.runtime.checkpointer.provider import (
|
||||
POSTGRES_CONN_REQUIRED,
|
||||
POSTGRES_INSTALL,
|
||||
SQLITE_INSTALL,
|
||||
)
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from deerflow.runtime.store._sqlite_utils import ensure_sqlite_parent_dir, resolve_sqlite_conn_str
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -84,23 +84,77 @@ async def _async_checkpointer(config) -> AsyncIterator[Checkpointer]:
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def make_checkpointer() -> AsyncIterator[Checkpointer]:
|
||||
"""Async context manager that yields a checkpointer for the caller's lifetime.
|
||||
Resources are opened on enter and closed on exit — no global state::
|
||||
|
||||
async with make_checkpointer() as checkpointer:
|
||||
app.state.checkpointer = checkpointer
|
||||
|
||||
Yields an ``InMemorySaver`` when no checkpointer is configured in *config.yaml*.
|
||||
"""
|
||||
|
||||
config = get_app_config()
|
||||
|
||||
if config.checkpointer is None:
|
||||
async def _async_checkpointer_from_database(db_config) -> AsyncIterator[Checkpointer]:
|
||||
"""Async context manager that constructs a checkpointer from unified DatabaseConfig."""
|
||||
if db_config.backend == "memory":
|
||||
from langgraph.checkpoint.memory import InMemorySaver
|
||||
|
||||
yield InMemorySaver()
|
||||
return
|
||||
|
||||
async with _async_checkpointer(config.checkpointer) as saver:
|
||||
yield saver
|
||||
if db_config.backend == "sqlite":
|
||||
try:
|
||||
from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
|
||||
except ImportError as exc:
|
||||
raise ImportError(SQLITE_INSTALL) from exc
|
||||
|
||||
conn_str = db_config.checkpointer_sqlite_path
|
||||
ensure_sqlite_parent_dir(conn_str)
|
||||
async with AsyncSqliteSaver.from_conn_string(conn_str) as saver:
|
||||
await saver.setup()
|
||||
yield saver
|
||||
return
|
||||
|
||||
if db_config.backend == "postgres":
|
||||
try:
|
||||
from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver
|
||||
except ImportError as exc:
|
||||
raise ImportError(POSTGRES_INSTALL) from exc
|
||||
|
||||
if not db_config.postgres_url:
|
||||
raise ValueError("database.postgres_url is required for the postgres backend")
|
||||
|
||||
async with AsyncPostgresSaver.from_conn_string(db_config.postgres_url) as saver:
|
||||
await saver.setup()
|
||||
yield saver
|
||||
return
|
||||
|
||||
raise ValueError(f"Unknown database backend: {db_config.backend!r}")
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def make_checkpointer(app_config: AppConfig | None = None) -> AsyncIterator[Checkpointer]:
|
||||
"""Async context manager that yields a checkpointer for the caller's lifetime.
|
||||
Resources are opened on enter and closed on exit -- no global state::
|
||||
|
||||
async with make_checkpointer(app_config) as checkpointer:
|
||||
app.state.checkpointer = checkpointer
|
||||
|
||||
Yields an ``InMemorySaver`` when no checkpointer is configured in *config.yaml*.
|
||||
|
||||
Priority:
|
||||
1. Legacy ``checkpointer:`` config section (backward compatible)
|
||||
2. Unified ``database:`` config section
|
||||
3. Default InMemorySaver
|
||||
"""
|
||||
|
||||
if app_config is None:
|
||||
app_config = get_app_config()
|
||||
|
||||
# Legacy: standalone checkpointer config takes precedence
|
||||
if app_config.checkpointer is not None:
|
||||
async with _async_checkpointer(app_config.checkpointer) as saver:
|
||||
yield saver
|
||||
return
|
||||
|
||||
# Unified database config
|
||||
db_config = getattr(app_config, "database", None)
|
||||
if db_config is not None and db_config.backend != "memory":
|
||||
async with _async_checkpointer_from_database(db_config) as saver:
|
||||
yield saver
|
||||
return
|
||||
|
||||
# Default: in-memory
|
||||
from langgraph.checkpoint.memory import InMemorySaver
|
||||
|
||||
yield InMemorySaver()
|
||||
@ -7,7 +7,7 @@ Supported backends: memory, sqlite, postgres.
|
||||
|
||||
Usage::
|
||||
|
||||
from deerflow.agents.checkpointer.provider import get_checkpointer, checkpointer_context
|
||||
from deerflow.runtime.checkpointer.provider import get_checkpointer, checkpointer_context
|
||||
|
||||
# Singleton — reused across calls, closed on process exit
|
||||
cp = get_checkpointer()
|
||||
136
backend/packages/harness/deerflow/runtime/converters.py
Normal file
136
backend/packages/harness/deerflow/runtime/converters.py
Normal file
@ -0,0 +1,136 @@
|
||||
"""Pure functions to convert LangChain message objects to OpenAI Chat Completions format.
|
||||
|
||||
Utility for translating LangChain message types to OpenAI-compatible dicts.
|
||||
Not currently wired into RunJournal (which uses message.model_dump() directly),
|
||||
but available for consumers that need the OpenAI wire format.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
_ROLE_MAP = {
|
||||
"human": "user",
|
||||
"ai": "assistant",
|
||||
"system": "system",
|
||||
"tool": "tool",
|
||||
}
|
||||
|
||||
|
||||
def langchain_to_openai_message(message: Any) -> dict:
|
||||
"""Convert a single LangChain BaseMessage to an OpenAI message dict.
|
||||
|
||||
Handles:
|
||||
- HumanMessage → {"role": "user", "content": "..."}
|
||||
- AIMessage (text only) → {"role": "assistant", "content": "..."}
|
||||
- AIMessage (with tool_calls) → {"role": "assistant", "content": null, "tool_calls": [...]}
|
||||
- AIMessage (text + tool_calls) → both content and tool_calls present
|
||||
- AIMessage (list content / multimodal) → content preserved as list
|
||||
- SystemMessage → {"role": "system", "content": "..."}
|
||||
- ToolMessage → {"role": "tool", "tool_call_id": "...", "content": "..."}
|
||||
"""
|
||||
msg_type = getattr(message, "type", "")
|
||||
role = _ROLE_MAP.get(msg_type, msg_type)
|
||||
content = getattr(message, "content", "")
|
||||
|
||||
if role == "tool":
|
||||
return {
|
||||
"role": "tool",
|
||||
"tool_call_id": getattr(message, "tool_call_id", ""),
|
||||
"content": content,
|
||||
}
|
||||
|
||||
if role == "assistant":
|
||||
tool_calls = getattr(message, "tool_calls", None) or []
|
||||
result: dict = {"role": "assistant"}
|
||||
|
||||
if tool_calls:
|
||||
openai_tool_calls = []
|
||||
for tc in tool_calls:
|
||||
args = tc.get("args", {})
|
||||
openai_tool_calls.append(
|
||||
{
|
||||
"id": tc.get("id", ""),
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tc.get("name", ""),
|
||||
"arguments": json.dumps(args) if not isinstance(args, str) else args,
|
||||
},
|
||||
}
|
||||
)
|
||||
# If no text content, set content to null per OpenAI spec
|
||||
result["content"] = content if (isinstance(content, list) and content) or (isinstance(content, str) and content) else None
|
||||
result["tool_calls"] = openai_tool_calls
|
||||
else:
|
||||
result["content"] = content
|
||||
|
||||
return result
|
||||
|
||||
# user / system / unknown
|
||||
return {"role": role, "content": content}
|
||||
|
||||
|
||||
def _infer_finish_reason(message: Any) -> str:
|
||||
"""Infer OpenAI finish_reason from an AIMessage.
|
||||
|
||||
Returns "tool_calls" if tool_calls present, else looks in
|
||||
response_metadata.finish_reason, else returns "stop".
|
||||
"""
|
||||
tool_calls = getattr(message, "tool_calls", None) or []
|
||||
if tool_calls:
|
||||
return "tool_calls"
|
||||
resp_meta = getattr(message, "response_metadata", None) or {}
|
||||
if isinstance(resp_meta, dict):
|
||||
finish = resp_meta.get("finish_reason")
|
||||
if finish:
|
||||
return finish
|
||||
return "stop"
|
||||
|
||||
|
||||
def langchain_to_openai_completion(message: Any) -> dict:
|
||||
"""Convert an AIMessage and its metadata to an OpenAI completion response dict.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"id": message.id,
|
||||
"model": message.response_metadata.get("model_name"),
|
||||
"choices": [{"index": 0, "message": <openai_message>, "finish_reason": <inferred>}],
|
||||
"usage": {"prompt_tokens": ..., "completion_tokens": ..., "total_tokens": ...} or None,
|
||||
}
|
||||
"""
|
||||
resp_meta = getattr(message, "response_metadata", None) or {}
|
||||
model_name = resp_meta.get("model_name") if isinstance(resp_meta, dict) else None
|
||||
|
||||
openai_msg = langchain_to_openai_message(message)
|
||||
finish_reason = _infer_finish_reason(message)
|
||||
|
||||
usage_metadata = getattr(message, "usage_metadata", None)
|
||||
if usage_metadata is not None:
|
||||
input_tokens = usage_metadata.get("input_tokens", 0) or 0
|
||||
output_tokens = usage_metadata.get("output_tokens", 0) or 0
|
||||
usage: dict | None = {
|
||||
"prompt_tokens": input_tokens,
|
||||
"completion_tokens": output_tokens,
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
}
|
||||
else:
|
||||
usage = None
|
||||
|
||||
return {
|
||||
"id": getattr(message, "id", None),
|
||||
"model": model_name,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": openai_msg,
|
||||
"finish_reason": finish_reason,
|
||||
}
|
||||
],
|
||||
"usage": usage,
|
||||
}
|
||||
|
||||
|
||||
def langchain_messages_to_openai(messages: list) -> list[dict]:
|
||||
"""Convert a list of LangChain BaseMessages to OpenAI message dicts."""
|
||||
return [langchain_to_openai_message(m) for m in messages]
|
||||
@ -0,0 +1,4 @@
|
||||
from deerflow.runtime.events.store.base import RunEventStore
|
||||
from deerflow.runtime.events.store.memory import MemoryRunEventStore
|
||||
|
||||
__all__ = ["MemoryRunEventStore", "RunEventStore"]
|
||||
@ -0,0 +1,26 @@
|
||||
from deerflow.runtime.events.store.base import RunEventStore
|
||||
from deerflow.runtime.events.store.memory import MemoryRunEventStore
|
||||
|
||||
|
||||
def make_run_event_store(config=None) -> RunEventStore:
|
||||
"""Create a RunEventStore based on run_events.backend configuration."""
|
||||
if config is None or config.backend == "memory":
|
||||
return MemoryRunEventStore()
|
||||
if config.backend == "db":
|
||||
from deerflow.persistence.engine import get_session_factory
|
||||
|
||||
sf = get_session_factory()
|
||||
if sf is None:
|
||||
# database.backend=memory but run_events.backend=db -> fallback
|
||||
return MemoryRunEventStore()
|
||||
from deerflow.runtime.events.store.db import DbRunEventStore
|
||||
|
||||
return DbRunEventStore(sf, max_trace_content=config.max_trace_content)
|
||||
if config.backend == "jsonl":
|
||||
from deerflow.runtime.events.store.jsonl import JsonlRunEventStore
|
||||
|
||||
return JsonlRunEventStore()
|
||||
raise ValueError(f"Unknown run_events backend: {config.backend!r}")
|
||||
|
||||
|
||||
__all__ = ["MemoryRunEventStore", "RunEventStore", "make_run_event_store"]
|
||||
109
backend/packages/harness/deerflow/runtime/events/store/base.py
Normal file
109
backend/packages/harness/deerflow/runtime/events/store/base.py
Normal file
@ -0,0 +1,109 @@
|
||||
"""Abstract interface for run event storage.
|
||||
|
||||
RunEventStore is the unified storage interface for run event streams.
|
||||
Messages (frontend display) and execution traces (debugging/audit) go
|
||||
through the same interface, distinguished by the ``category`` field.
|
||||
|
||||
Implementations:
|
||||
- MemoryRunEventStore: in-memory dict (development, tests)
|
||||
- Future: DB-backed store (SQLAlchemy ORM), JSONL file store
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
|
||||
|
||||
class RunEventStore(abc.ABC):
|
||||
"""Run event stream storage interface.
|
||||
|
||||
All implementations must guarantee:
|
||||
1. put() events are retrievable in subsequent queries
|
||||
2. seq is strictly increasing within the same thread
|
||||
3. list_messages() only returns category="message" events
|
||||
4. list_events() returns all events for the specified run
|
||||
5. Returned dicts match the RunEvent field structure
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def put(
|
||||
self,
|
||||
*,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
event_type: str,
|
||||
category: str,
|
||||
content: str | dict = "",
|
||||
metadata: dict | None = None,
|
||||
created_at: str | None = None,
|
||||
) -> dict:
|
||||
"""Write an event, auto-assign seq, return the complete record."""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def put_batch(self, events: list[dict]) -> list[dict]:
|
||||
"""Batch-write events. Used by RunJournal flush buffer.
|
||||
|
||||
Each dict's keys match put()'s keyword arguments.
|
||||
Returns complete records with seq assigned.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def list_messages(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
limit: int = 50,
|
||||
before_seq: int | None = None,
|
||||
after_seq: int | None = None,
|
||||
) -> list[dict]:
|
||||
"""Return displayable messages (category=message) for a thread, ordered by seq ascending.
|
||||
|
||||
Supports bidirectional cursor pagination:
|
||||
- before_seq: return the last ``limit`` records with seq < before_seq (ascending)
|
||||
- after_seq: return the first ``limit`` records with seq > after_seq (ascending)
|
||||
- neither: return the latest ``limit`` records (ascending)
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def list_events(
|
||||
self,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
*,
|
||||
event_types: list[str] | None = None,
|
||||
limit: int = 500,
|
||||
) -> list[dict]:
|
||||
"""Return the full event stream for a run, ordered by seq ascending.
|
||||
|
||||
Optionally filter by event_types.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def list_messages_by_run(
|
||||
self,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
*,
|
||||
limit: int = 50,
|
||||
before_seq: int | None = None,
|
||||
after_seq: int | None = None,
|
||||
) -> list[dict]:
|
||||
"""Return displayable messages (category=message) for a specific run, ordered by seq ascending.
|
||||
|
||||
Supports bidirectional cursor pagination:
|
||||
- after_seq: return the first ``limit`` records with seq > after_seq (ascending)
|
||||
- before_seq: return the last ``limit`` records with seq < before_seq (ascending)
|
||||
- neither: return the latest ``limit`` records (ascending)
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def count_messages(self, thread_id: str) -> int:
|
||||
"""Count displayable messages (category=message) in a thread."""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def delete_by_thread(self, thread_id: str) -> int:
|
||||
"""Delete all events for a thread. Return the number of deleted events."""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def delete_by_run(self, thread_id: str, run_id: str) -> int:
|
||||
"""Delete all events for a specific run. Return the number of deleted events."""
|
||||
286
backend/packages/harness/deerflow/runtime/events/store/db.py
Normal file
286
backend/packages/harness/deerflow/runtime/events/store/db.py
Normal file
@ -0,0 +1,286 @@
|
||||
"""SQLAlchemy-backed RunEventStore implementation.
|
||||
|
||||
Persists events to the ``run_events`` table. Trace content is truncated
|
||||
at ``max_trace_content`` bytes to avoid bloating the database.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import delete, func, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
from deerflow.persistence.models.run_event import RunEventRow
|
||||
from deerflow.runtime.events.store.base import RunEventStore
|
||||
from deerflow.runtime.user_context import AUTO, _AutoSentinel, get_current_user, resolve_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DbRunEventStore(RunEventStore):
|
||||
def __init__(self, session_factory: async_sessionmaker[AsyncSession], *, max_trace_content: int = 10240):
|
||||
self._sf = session_factory
|
||||
self._max_trace_content = max_trace_content
|
||||
|
||||
@staticmethod
|
||||
def _row_to_dict(row: RunEventRow) -> dict:
|
||||
d = row.to_dict()
|
||||
d["metadata"] = d.pop("event_metadata", {})
|
||||
val = d.get("created_at")
|
||||
if isinstance(val, datetime):
|
||||
d["created_at"] = val.isoformat()
|
||||
d.pop("id", None)
|
||||
# Restore dict content that was JSON-serialized on write
|
||||
raw = d.get("content", "")
|
||||
if isinstance(raw, str) and d.get("metadata", {}).get("content_is_dict"):
|
||||
try:
|
||||
d["content"] = json.loads(raw)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
# Content looked like JSON (content_is_dict flag) but failed to parse;
|
||||
# keep the raw string as-is.
|
||||
logger.debug("Failed to deserialize content as JSON for event seq=%s", d.get("seq"))
|
||||
return d
|
||||
|
||||
def _truncate_trace(self, category: str, content: str | dict, metadata: dict | None) -> tuple[str | dict, dict]:
|
||||
if category == "trace":
|
||||
text = json.dumps(content, default=str, ensure_ascii=False) if isinstance(content, dict) else content
|
||||
encoded = text.encode("utf-8")
|
||||
if len(encoded) > self._max_trace_content:
|
||||
# Truncate by bytes, then decode back (may cut a multi-byte char, so use errors="ignore")
|
||||
content = encoded[: self._max_trace_content].decode("utf-8", errors="ignore")
|
||||
metadata = {**(metadata or {}), "content_truncated": True, "original_byte_length": len(encoded)}
|
||||
return content, metadata or {}
|
||||
|
||||
@staticmethod
|
||||
def _user_id_from_context() -> str | None:
|
||||
"""Soft read of user_id from contextvar for write paths.
|
||||
|
||||
Returns ``None`` (no filter / no stamp) if contextvar is unset,
|
||||
which is the expected case for background worker writes. HTTP
|
||||
request writes will have the contextvar set by auth middleware
|
||||
and get their user_id stamped automatically.
|
||||
|
||||
Coerces ``user.id`` to ``str`` at the boundary: ``User.id`` is
|
||||
typed as ``UUID`` by the auth layer, but ``run_events.user_id``
|
||||
is ``VARCHAR(64)`` and aiosqlite cannot bind a raw UUID object
|
||||
to a VARCHAR column ("type 'UUID' is not supported") — the
|
||||
INSERT would silently roll back and the worker would hang.
|
||||
"""
|
||||
user = get_current_user()
|
||||
return str(user.id) if user is not None else None
|
||||
|
||||
async def put(self, *, thread_id, run_id, event_type, category, content="", metadata=None, created_at=None): # noqa: D401
|
||||
"""Write a single event — low-frequency path only.
|
||||
|
||||
This opens a dedicated transaction with a FOR UPDATE lock to
|
||||
assign a monotonic *seq*. For high-throughput writes use
|
||||
:meth:`put_batch`, which acquires the lock once for the whole
|
||||
batch. Currently the only caller is ``worker.run_agent`` for
|
||||
the initial ``human_message`` event (once per run).
|
||||
"""
|
||||
content, metadata = self._truncate_trace(category, content, metadata)
|
||||
if isinstance(content, dict):
|
||||
db_content = json.dumps(content, default=str, ensure_ascii=False)
|
||||
metadata = {**(metadata or {}), "content_is_dict": True}
|
||||
else:
|
||||
db_content = content
|
||||
user_id = self._user_id_from_context()
|
||||
async with self._sf() as session:
|
||||
async with session.begin():
|
||||
# Use FOR UPDATE to serialize seq assignment within a thread.
|
||||
# NOTE: with_for_update() on aggregates is a no-op on SQLite;
|
||||
# the UNIQUE(thread_id, seq) constraint catches races there.
|
||||
max_seq = await session.scalar(select(func.max(RunEventRow.seq)).where(RunEventRow.thread_id == thread_id).with_for_update())
|
||||
seq = (max_seq or 0) + 1
|
||||
row = RunEventRow(
|
||||
thread_id=thread_id,
|
||||
run_id=run_id,
|
||||
user_id=user_id,
|
||||
event_type=event_type,
|
||||
category=category,
|
||||
content=db_content,
|
||||
event_metadata=metadata,
|
||||
seq=seq,
|
||||
created_at=datetime.fromisoformat(created_at) if created_at else datetime.now(UTC),
|
||||
)
|
||||
session.add(row)
|
||||
return self._row_to_dict(row)
|
||||
|
||||
async def put_batch(self, events):
|
||||
if not events:
|
||||
return []
|
||||
user_id = self._user_id_from_context()
|
||||
async with self._sf() as session:
|
||||
async with session.begin():
|
||||
# Get max seq for the thread (assume all events in batch belong to same thread).
|
||||
# NOTE: with_for_update() on aggregates is a no-op on SQLite;
|
||||
# the UNIQUE(thread_id, seq) constraint catches races there.
|
||||
thread_id = events[0]["thread_id"]
|
||||
max_seq = await session.scalar(select(func.max(RunEventRow.seq)).where(RunEventRow.thread_id == thread_id).with_for_update())
|
||||
seq = max_seq or 0
|
||||
rows = []
|
||||
for e in events:
|
||||
seq += 1
|
||||
content = e.get("content", "")
|
||||
category = e.get("category", "trace")
|
||||
metadata = e.get("metadata")
|
||||
content, metadata = self._truncate_trace(category, content, metadata)
|
||||
if isinstance(content, dict):
|
||||
db_content = json.dumps(content, default=str, ensure_ascii=False)
|
||||
metadata = {**(metadata or {}), "content_is_dict": True}
|
||||
else:
|
||||
db_content = content
|
||||
row = RunEventRow(
|
||||
thread_id=e["thread_id"],
|
||||
run_id=e["run_id"],
|
||||
user_id=e.get("user_id", user_id),
|
||||
event_type=e["event_type"],
|
||||
category=category,
|
||||
content=db_content,
|
||||
event_metadata=metadata,
|
||||
seq=seq,
|
||||
created_at=datetime.fromisoformat(e["created_at"]) if e.get("created_at") else datetime.now(UTC),
|
||||
)
|
||||
session.add(row)
|
||||
rows.append(row)
|
||||
return [self._row_to_dict(r) for r in rows]
|
||||
|
||||
async def list_messages(
|
||||
self,
|
||||
thread_id,
|
||||
*,
|
||||
limit=50,
|
||||
before_seq=None,
|
||||
after_seq=None,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="DbRunEventStore.list_messages")
|
||||
stmt = select(RunEventRow).where(RunEventRow.thread_id == thread_id, RunEventRow.category == "message")
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(RunEventRow.user_id == resolved_user_id)
|
||||
if before_seq is not None:
|
||||
stmt = stmt.where(RunEventRow.seq < before_seq)
|
||||
if after_seq is not None:
|
||||
stmt = stmt.where(RunEventRow.seq > after_seq)
|
||||
|
||||
if after_seq is not None:
|
||||
# Forward pagination: first `limit` records after cursor
|
||||
stmt = stmt.order_by(RunEventRow.seq.asc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
else:
|
||||
# before_seq or default (latest): take last `limit` records, return ascending
|
||||
stmt = stmt.order_by(RunEventRow.seq.desc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
rows = list(result.scalars())
|
||||
return [self._row_to_dict(r) for r in reversed(rows)]
|
||||
|
||||
async def list_events(
|
||||
self,
|
||||
thread_id,
|
||||
run_id,
|
||||
*,
|
||||
event_types=None,
|
||||
limit=500,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="DbRunEventStore.list_events")
|
||||
stmt = select(RunEventRow).where(RunEventRow.thread_id == thread_id, RunEventRow.run_id == run_id)
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(RunEventRow.user_id == resolved_user_id)
|
||||
if event_types:
|
||||
stmt = stmt.where(RunEventRow.event_type.in_(event_types))
|
||||
stmt = stmt.order_by(RunEventRow.seq.asc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
|
||||
async def list_messages_by_run(
|
||||
self,
|
||||
thread_id,
|
||||
run_id,
|
||||
*,
|
||||
limit=50,
|
||||
before_seq=None,
|
||||
after_seq=None,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="DbRunEventStore.list_messages_by_run")
|
||||
stmt = select(RunEventRow).where(
|
||||
RunEventRow.thread_id == thread_id,
|
||||
RunEventRow.run_id == run_id,
|
||||
RunEventRow.category == "message",
|
||||
)
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(RunEventRow.user_id == resolved_user_id)
|
||||
if before_seq is not None:
|
||||
stmt = stmt.where(RunEventRow.seq < before_seq)
|
||||
if after_seq is not None:
|
||||
stmt = stmt.where(RunEventRow.seq > after_seq)
|
||||
|
||||
if after_seq is not None:
|
||||
stmt = stmt.order_by(RunEventRow.seq.asc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
return [self._row_to_dict(r) for r in result.scalars()]
|
||||
else:
|
||||
stmt = stmt.order_by(RunEventRow.seq.desc()).limit(limit)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
rows = list(result.scalars())
|
||||
return [self._row_to_dict(r) for r in reversed(rows)]
|
||||
|
||||
async def count_messages(
|
||||
self,
|
||||
thread_id,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="DbRunEventStore.count_messages")
|
||||
stmt = select(func.count()).select_from(RunEventRow).where(RunEventRow.thread_id == thread_id, RunEventRow.category == "message")
|
||||
if resolved_user_id is not None:
|
||||
stmt = stmt.where(RunEventRow.user_id == resolved_user_id)
|
||||
async with self._sf() as session:
|
||||
return await session.scalar(stmt) or 0
|
||||
|
||||
async def delete_by_thread(
|
||||
self,
|
||||
thread_id,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="DbRunEventStore.delete_by_thread")
|
||||
async with self._sf() as session:
|
||||
count_conditions = [RunEventRow.thread_id == thread_id]
|
||||
if resolved_user_id is not None:
|
||||
count_conditions.append(RunEventRow.user_id == resolved_user_id)
|
||||
count_stmt = select(func.count()).select_from(RunEventRow).where(*count_conditions)
|
||||
count = await session.scalar(count_stmt) or 0
|
||||
if count > 0:
|
||||
await session.execute(delete(RunEventRow).where(*count_conditions))
|
||||
await session.commit()
|
||||
return count
|
||||
|
||||
async def delete_by_run(
|
||||
self,
|
||||
thread_id,
|
||||
run_id,
|
||||
*,
|
||||
user_id: str | None | _AutoSentinel = AUTO,
|
||||
):
|
||||
resolved_user_id = resolve_user_id(user_id, method_name="DbRunEventStore.delete_by_run")
|
||||
async with self._sf() as session:
|
||||
count_conditions = [RunEventRow.thread_id == thread_id, RunEventRow.run_id == run_id]
|
||||
if resolved_user_id is not None:
|
||||
count_conditions.append(RunEventRow.user_id == resolved_user_id)
|
||||
count_stmt = select(func.count()).select_from(RunEventRow).where(*count_conditions)
|
||||
count = await session.scalar(count_stmt) or 0
|
||||
if count > 0:
|
||||
await session.execute(delete(RunEventRow).where(*count_conditions))
|
||||
await session.commit()
|
||||
return count
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user