diff --git a/Makefile b/Makefile
index 0d31b7c9f..c60d9b9b2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
# DeerFlow - Unified Development Environment
-.PHONY: help config config-upgrade check install setup doctor dev dev-pro dev-daemon dev-daemon-pro start start-pro start-daemon start-daemon-pro stop up up-pro down clean docker-init docker-start docker-start-pro docker-stop docker-logs docker-logs-frontend docker-logs-gateway
+.PHONY: help config config-upgrade check install setup doctor dev dev-daemon start start-daemon stop up down clean docker-init docker-start docker-stop docker-logs docker-logs-frontend docker-logs-gateway
BASH ?= bash
BACKEND_UV_RUN = cd backend && uv run
@@ -26,25 +26,19 @@ help:
@echo " make install - Install all dependencies (frontend + backend + pre-commit hooks)"
@echo " make setup-sandbox - Pre-pull sandbox container image (recommended)"
@echo " make dev - Start all services in development mode (with hot-reloading)"
- @echo " make dev-pro - Start in dev + Gateway mode (experimental, no LangGraph server)"
@echo " make dev-daemon - Start dev services in background (daemon mode)"
- @echo " make dev-daemon-pro - Start dev daemon + Gateway mode (experimental)"
@echo " make start - Start all services in production mode (optimized, no hot-reloading)"
- @echo " make start-pro - Start in prod + Gateway mode (experimental)"
@echo " make start-daemon - Start prod services in background (daemon mode)"
- @echo " make start-daemon-pro - Start prod daemon + Gateway mode (experimental)"
@echo " make stop - Stop all running services"
@echo " make clean - Clean up processes and temporary files"
@echo ""
@echo "Docker Production Commands:"
@echo " make up - Build and start production Docker services (localhost:2026)"
- @echo " make up-pro - Build and start production Docker in Gateway mode (experimental)"
@echo " make down - Stop and remove production Docker containers"
@echo ""
@echo "Docker Development Commands:"
@echo " make docker-init - Pull the sandbox image"
@echo " make docker-start - Start Docker services (mode-aware from config.yaml, localhost:2026)"
- @echo " make docker-start-pro - Start Docker in Gateway mode (experimental, no LangGraph container)"
@echo " make docker-stop - Stop Docker development services"
@echo " make docker-logs - View Docker development logs"
@echo " make docker-logs-frontend - View Docker frontend logs"
@@ -123,41 +117,21 @@ dev:
@$(PYTHON) ./scripts/check.py
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev
-# Start all services in dev + Gateway mode (experimental: agent runtime embedded in Gateway)
-dev-pro:
- @$(PYTHON) ./scripts/check.py
- @$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --gateway
-
# Start all services in production mode (with optimizations)
start:
@$(PYTHON) ./scripts/check.py
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod
-# Start all services in prod + Gateway mode (experimental)
-start-pro:
- @$(PYTHON) ./scripts/check.py
- @$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --gateway
-
# Start all services in daemon mode (background)
dev-daemon:
@$(PYTHON) ./scripts/check.py
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --daemon
-# Start daemon + Gateway mode (experimental)
-dev-daemon-pro:
- @$(PYTHON) ./scripts/check.py
- @$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --gateway --daemon
-
# Start prod services in daemon mode (background)
start-daemon:
@$(PYTHON) ./scripts/check.py
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --daemon
-# Start prod daemon + Gateway mode (experimental)
-start-daemon-pro:
- @$(PYTHON) ./scripts/check.py
- @$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --gateway --daemon
-
# Stop all services
stop:
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --stop
@@ -182,10 +156,6 @@ docker-init:
docker-start:
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh start
-# Start Docker in Gateway mode (experimental)
-docker-start-pro:
- @$(RUN_WITH_GIT_BASH) ./scripts/docker.sh start --gateway
-
# Stop Docker development environment
docker-stop:
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh stop
@@ -208,10 +178,6 @@ docker-logs-gateway:
up:
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh
-# Build and start production services in Gateway mode
-up-pro:
- @$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh --gateway
-
# Stop and remove production containers
down:
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh down
diff --git a/README.md b/README.md
index 59461ee99..908f11c69 100644
--- a/README.md
+++ b/README.md
@@ -243,9 +243,6 @@ make up # Build images and start all production services
make down # Stop and remove containers
```
-> [!NOTE]
-> The LangGraph agent server currently runs via `langgraph dev` (the open-source CLI server).
-
Access: http://localhost:2026
See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed Docker development guide.
@@ -289,53 +286,31 @@ On Windows, run the local development flow from Git Bash. Native `cmd.exe` and P
#### Startup Modes
-DeerFlow supports multiple startup modes across two dimensions:
-
-- **Dev / Prod** — dev enables hot-reload; prod uses pre-built frontend
-- **Standard / Gateway** — standard uses a separate LangGraph server (4 processes); Gateway mode (experimental) embeds the agent runtime in the Gateway API (3 processes)
+DeerFlow runs the agent runtime inside the Gateway API. Development mode enables hot-reload; production mode uses a pre-built frontend.
| | **Local Foreground** | **Local Daemon** | **Docker Dev** | **Docker Prod** |
|---|---|---|---|---|
| **Dev** | `./scripts/serve.sh --dev`
`make dev` | `./scripts/serve.sh --dev --daemon`
`make dev-daemon` | `./scripts/docker.sh start`
`make docker-start` | — |
-| **Dev + Gateway** | `./scripts/serve.sh --dev --gateway`
`make dev-pro` | `./scripts/serve.sh --dev --gateway --daemon`
`make dev-daemon-pro` | `./scripts/docker.sh start --gateway`
`make docker-start-pro` | — |
| **Prod** | `./scripts/serve.sh --prod`
`make start` | `./scripts/serve.sh --prod --daemon`
`make start-daemon` | — | `./scripts/deploy.sh`
`make up` |
-| **Prod + Gateway** | `./scripts/serve.sh --prod --gateway`
`make start-pro` | `./scripts/serve.sh --prod --gateway --daemon`
`make start-daemon-pro` | — | `./scripts/deploy.sh --gateway`
`make up-pro` |
| Action | Local | Docker Dev | Docker Prod |
|---|---|---|---|
| **Stop** | `./scripts/serve.sh --stop`
`make stop` | `./scripts/docker.sh stop`
`make docker-stop` | `./scripts/deploy.sh down`
`make down` |
| **Restart** | `./scripts/serve.sh --restart [flags]` | `./scripts/docker.sh restart` | — |
-> **Gateway mode** eliminates the LangGraph server process — the Gateway API handles agent execution directly via async tasks, managing its own concurrency.
-
-#### Why Gateway Mode?
-
-In standard mode, DeerFlow runs a dedicated [LangGraph Platform](https://langchain-ai.github.io/langgraph/) server alongside the Gateway API. This architecture works well but has trade-offs:
-
-| | Standard Mode | Gateway Mode |
-|---|---|---|
-| **Architecture** | Gateway (REST API) + LangGraph (agent runtime) | Gateway embeds agent runtime |
-| **Concurrency** | `--n-jobs-per-worker` per worker (requires license) | `--workers` × async tasks (no per-worker cap) |
-| **Containers / Processes** | 4 (frontend, gateway, langgraph, nginx) | 3 (frontend, gateway, nginx) |
-| **Resource usage** | Higher (two Python runtimes) | Lower (single Python runtime) |
-| **LangGraph Platform license** | Required for production images | Not required |
-| **Cold start** | Slower (two services to initialize) | Faster |
-
-Both modes are functionally equivalent — the same agents, tools, and skills work in either mode.
+Gateway owns `/api/langgraph/*` and translates those public LangGraph-compatible paths to its native `/api/*` routers behind nginx.
#### Docker Production Deployment
-`deploy.sh` supports building and starting separately. Images are mode-agnostic — runtime mode is selected at start time:
+`deploy.sh` supports building and starting separately:
```bash
# One-step (build + start)
-deploy.sh # standard mode (default)
-deploy.sh --gateway # gateway mode
+deploy.sh
-# Two-step (build once, start with any mode)
+# Two-step (build once, start later)
deploy.sh build # build all images
-deploy.sh start # start in standard mode
-deploy.sh start --gateway # start in gateway mode
+deploy.sh start # start pre-built images
# Stop
deploy.sh down
@@ -375,8 +350,8 @@ DeerFlow supports receiving tasks from messaging apps. Channels auto-start when
```yaml
channels:
- # LangGraph Server URL (default: http://localhost:2024)
- langgraph_url: http://localhost:2024
+ # LangGraph-compatible Gateway API base URL (default: http://localhost:8001/api)
+ langgraph_url: http://localhost:8001/api
# Gateway API URL (default: http://localhost:8001)
gateway_url: http://localhost:8001
@@ -504,7 +479,7 @@ WECOM_BOT_SECRET=your_bot_secret
4. Make sure backend dependencies include `wecom-aibot-python-sdk`. The channel uses a WebSocket long connection and does not require a public callback URL.
5. The current integration supports inbound text, image, and file messages. Final images/files generated by the agent are also sent back to the WeCom conversation.
-When DeerFlow runs in Docker Compose, IM channels execute inside the `gateway` container. In that case, do not point `channels.langgraph_url` or `channels.gateway_url` at `localhost`; use container service names such as `http://langgraph:2024` and `http://gateway:8001`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` and `DEER_FLOW_CHANNELS_GATEWAY_URL`.
+When DeerFlow runs in Docker Compose, IM channels execute inside the `gateway` container. In that case, do not point `channels.langgraph_url` or `channels.gateway_url` at `localhost`; use container service names such as `http://gateway:8001/api` and `http://gateway:8001`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` and `DEER_FLOW_CHANNELS_GATEWAY_URL`.
**Commands**
diff --git a/backend/CLAUDE.md b/backend/CLAUDE.md
index 1fd822e9f..10b9db6c4 100644
--- a/backend/CLAUDE.md
+++ b/backend/CLAUDE.md
@@ -7,15 +7,13 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
DeerFlow is a LangGraph-based AI super agent system with a full-stack architecture. The backend provides a "super agent" with sandbox execution, persistent memory, subagent delegation, and extensible tool integration - all operating in per-thread isolated environments.
**Architecture**:
-- **LangGraph Server** (port 2024): Agent runtime and workflow execution
-- **Gateway API** (port 8001): REST API for models, MCP, skills, memory, artifacts, uploads, and local thread cleanup
+- **Gateway API** (port 8001): REST API plus embedded LangGraph-compatible agent runtime
- **Frontend** (port 3000): Next.js web interface
- **Nginx** (port 2026): Unified reverse proxy entry point
- **Provisioner** (port 8002, optional in Docker dev): Started only when sandbox is configured for provisioner/Kubernetes mode
-**Runtime Modes**:
-- **Standard mode** (`make dev`): LangGraph Server handles agent execution as a separate process. 4 processes total.
-- **Gateway mode** (`make dev-pro`, experimental): Agent runtime embedded in Gateway via `RunManager` + `run_agent()` + `StreamBridge` (`packages/harness/deerflow/runtime/`). Service manages its own concurrency via async tasks. 3 processes total, no LangGraph Server.
+**Runtime**:
+- `make dev`, Docker dev, and production all run the agent runtime in Gateway via `RunManager` + `run_agent()` + `StreamBridge` (`packages/harness/deerflow/runtime/`). Nginx exposes that runtime at `/api/langgraph/*` and rewrites it to Gateway's native `/api/*` routers.
**Project Structure**:
```
@@ -25,7 +23,7 @@ deer-flow/
├── extensions_config.json # MCP servers and skills configuration
├── backend/ # Backend application (this directory)
│ ├── Makefile # Backend-only commands (dev, gateway, lint)
-│ ├── langgraph.json # LangGraph server configuration
+│ ├── langgraph.json # LangGraph Studio graph configuration
│ ├── packages/
│ │ └── harness/ # deerflow-harness package (import: deerflow.*)
│ │ ├── pyproject.toml
@@ -83,16 +81,15 @@ When making code changes, you MUST update the relevant documentation:
```bash
make check # Check system requirements
make install # Install all dependencies (frontend + backend)
-make dev # Start all services (LangGraph + Gateway + Frontend + Nginx), with config.yaml preflight
-make dev-pro # Gateway mode (experimental): skip LangGraph, agent runtime embedded in Gateway
-make start-pro # Production + Gateway mode (experimental)
+make dev # Start all services (Gateway + Frontend + Nginx), with config.yaml preflight
+make start # Start production services locally
make stop # Stop all services
```
**Backend directory** (for backend development only):
```bash
make install # Install backend dependencies
-make dev # Run LangGraph server only (port 2024)
+make dev # Run Gateway API with reload (port 8001)
make gateway # Run Gateway API only (port 8001)
make test # Run all backend tests
make lint # Lint with ruff
@@ -315,9 +312,9 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
### IM Channels System (`app/channels/`)
-Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow agent via the LangGraph Server.
+Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow agent via Gateway's LangGraph-compatible API.
-**Architecture**: Channels communicate with the LangGraph Server through `langgraph-sdk` HTTP client (same as the frontend), ensuring threads are created and managed server-side.
+**Architecture**: Channels communicate with Gateway through the `langgraph-sdk` HTTP client (same as the frontend), ensuring threads are created and managed server-side.
**Components**:
- `message_bus.py` - Async pub/sub hub (`InboundMessage` → queue → dispatcher; `OutboundMessage` → callbacks → channels)
@@ -330,7 +327,7 @@ Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow a
**Message Flow**:
1. External platform -> Channel impl -> `MessageBus.publish_inbound()`
2. `ChannelManager._dispatch_loop()` consumes from queue
-3. For chat: look up/create thread on LangGraph Server
+3. For chat: look up/create thread through Gateway's LangGraph-compatible API
4. Feishu chat: `runs.stream()` → accumulate AI text → publish multiple outbound updates (`is_final=False`) → publish final outbound (`is_final=True`)
5. Slack/Telegram chat: `runs.wait()` → extract final response → publish outbound
6. Feishu channel sends one running reply card up front, then patches the same card for each outbound update (card JSON sets `config.update_multi=true` for Feishu's patch API requirement)
@@ -338,9 +335,9 @@ Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow a
8. Outbound → channel callbacks → platform reply
**Configuration** (`config.yaml` -> `channels`):
-- `langgraph_url` - LangGraph Server URL (default: `http://localhost:2024`)
+- `langgraph_url` - LangGraph-compatible Gateway API base URL (default: `http://localhost:8001/api`)
- `gateway_url` - Gateway API URL for auxiliary commands (default: `http://localhost:8001`)
-- In Docker Compose, IM channels run inside the `gateway` container, so `localhost` points back to that container. Use `http://langgraph:2024` / `http://gateway:8001`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` / `DEER_FLOW_CHANNELS_GATEWAY_URL`.
+- In Docker Compose, IM channels run inside the `gateway` container, so `localhost` points back to that container. Use `http://gateway:8001/api` for `langgraph_url` and `http://gateway:8001` for `gateway_url`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` / `DEER_FLOW_CHANNELS_GATEWAY_URL`.
- Per-channel configs: `feishu` (app_id, app_secret), `slack` (bot_token, app_token), `telegram` (bot_token)
### Memory System (`packages/harness/deerflow/agents/memory/`)
@@ -410,9 +407,9 @@ Both can be modified at runtime via Gateway API endpoints or `DeerFlowClient` me
`DeerFlowClient` provides direct in-process access to all DeerFlow capabilities without HTTP services. All return types align with the Gateway API response schemas, so consumer code works identically in HTTP and embedded modes.
-**Architecture**: Imports the same `deerflow` modules that LangGraph Server and Gateway API use. Shares the same config files and data directories. No FastAPI dependency.
+**Architecture**: Imports the same `deerflow` modules that Gateway API uses. Shares the same config files and data directories. No FastAPI dependency.
-**Agent Conversation** (replaces LangGraph Server):
+**Agent Conversation**:
- `chat(message, thread_id)` — synchronous, accumulates streaming deltas per message-id and returns the final AI text
- `stream(message, thread_id)` — subscribes to LangGraph `stream_mode=["values", "messages", "custom"]` and yields `StreamEvent`:
- `"values"` — full state snapshot (title, messages, artifacts); AI text already delivered via `messages` mode is **not** re-synthesized here to avoid duplicate deliveries
@@ -475,20 +472,15 @@ This starts all services and makes the application available at `http://localhos
| | **Local Foreground** | **Local Daemon** | **Docker Dev** | **Docker Prod** |
|---|---|---|---|---|
| **Dev** | `./scripts/serve.sh --dev`
`make dev` | `./scripts/serve.sh --dev --daemon`
`make dev-daemon` | `./scripts/docker.sh start`
`make docker-start` | — |
-| **Dev + Gateway** | `./scripts/serve.sh --dev --gateway`
`make dev-pro` | `./scripts/serve.sh --dev --gateway --daemon`
`make dev-daemon-pro` | `./scripts/docker.sh start --gateway`
`make docker-start-pro` | — |
| **Prod** | `./scripts/serve.sh --prod`
`make start` | `./scripts/serve.sh --prod --daemon`
`make start-daemon` | — | `./scripts/deploy.sh`
`make up` |
-| **Prod + Gateway** | `./scripts/serve.sh --prod --gateway`
`make start-pro` | `./scripts/serve.sh --prod --gateway --daemon`
`make start-daemon-pro` | — | `./scripts/deploy.sh --gateway`
`make up-pro` |
| Action | Local | Docker Dev | Docker Prod |
|---|---|---|---|
| **Stop** | `./scripts/serve.sh --stop`
`make stop` | `./scripts/docker.sh stop`
`make docker-stop` | `./scripts/deploy.sh down`
`make down` |
| **Restart** | `./scripts/serve.sh --restart [flags]` | `./scripts/docker.sh restart` | — |
-Gateway mode embeds the agent runtime in Gateway, no LangGraph server.
-
**Nginx routing**:
-- Standard mode: `/api/langgraph/*` → LangGraph Server (2024)
-- Gateway mode: `/api/langgraph/*` → Gateway embedded runtime (8001) (via envsubst)
+- `/api/langgraph/*` → Gateway embedded runtime (8001), rewritten to `/api/*`
- `/api/*` (other) → Gateway API (8001)
- `/` (non-API) → Frontend (3000)
@@ -497,15 +489,11 @@ Gateway mode embeds the agent runtime in Gateway, no LangGraph server.
From the **backend** directory:
```bash
-# Terminal 1: LangGraph server
-make dev
-
-# Terminal 2: Gateway API
+# Gateway API
make gateway
```
Direct access (without nginx):
-- LangGraph: `http://localhost:2024`
- Gateway: `http://localhost:8001`
### Frontend Configuration
diff --git a/backend/Makefile b/backend/Makefile
index dd06742a0..81a055684 100644
--- a/backend/Makefile
+++ b/backend/Makefile
@@ -2,7 +2,7 @@ install:
uv sync
dev:
- uv run langgraph dev --no-browser --no-reload --n-jobs-per-worker 10
+ PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001 --reload
gateway:
PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001
diff --git a/backend/app/channels/__init__.py b/backend/app/channels/__init__.py
index 4a583c074..77b17335e 100644
--- a/backend/app/channels/__init__.py
+++ b/backend/app/channels/__init__.py
@@ -2,7 +2,7 @@
Provides a pluggable channel system that connects external messaging platforms
(Feishu/Lark, Slack, Telegram) to the DeerFlow agent via the ChannelManager,
-which uses ``langgraph-sdk`` to communicate with the underlying LangGraph Server.
+which uses ``langgraph-sdk`` to communicate with Gateway's LangGraph-compatible API.
"""
from app.channels.base import Channel
diff --git a/backend/app/channels/manager.py b/backend/app/channels/manager.py
index 778c8c860..5c5848bc2 100644
--- a/backend/app/channels/manager.py
+++ b/backend/app/channels/manager.py
@@ -1,4 +1,4 @@
-"""ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via LangGraph Server."""
+"""ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via Gateway."""
from __future__ import annotations
@@ -21,7 +21,7 @@ from deerflow.runtime.user_context import get_effective_user_id
logger = logging.getLogger(__name__)
-DEFAULT_LANGGRAPH_URL = "http://localhost:2024"
+DEFAULT_LANGGRAPH_URL = "http://localhost:8001/api"
DEFAULT_GATEWAY_URL = "http://localhost:8001"
DEFAULT_ASSISTANT_ID = "lead_agent"
CUSTOM_AGENT_NAME_PATTERN = re.compile(r"^[A-Za-z0-9-]+$")
@@ -509,7 +509,7 @@ class ChannelManager:
"""Core dispatcher that bridges IM channels to the DeerFlow agent.
It reads from the MessageBus inbound queue, creates/reuses threads on
- the LangGraph Server, sends messages via ``runs.wait``, and publishes
+ Gateway's LangGraph-compatible API, sends messages via ``runs.wait``, and publishes
outbound responses back through the bus.
"""
@@ -669,7 +669,7 @@ class ChannelManager:
# -- chat handling -----------------------------------------------------
async def _create_thread(self, client, msg: InboundMessage) -> str:
- """Create a new thread on the LangGraph Server and store the mapping."""
+ """Create a new thread through Gateway and store the mapping."""
thread = await client.threads.create()
thread_id = thread["thread_id"]
self.store.set_thread_id(
@@ -679,7 +679,7 @@ class ChannelManager:
topic_id=msg.topic_id,
user_id=msg.user_id,
)
- logger.info("[Manager] new thread created on LangGraph Server: thread_id=%s for chat_id=%s topic_id=%s", thread_id, msg.chat_id, msg.topic_id)
+ logger.info("[Manager] new thread created through Gateway: thread_id=%s for chat_id=%s topic_id=%s", thread_id, msg.chat_id, msg.topic_id)
return thread_id
async def _handle_chat(self, msg: InboundMessage, extra_context: dict[str, Any] | None = None) -> None:
@@ -886,7 +886,7 @@ class ChannelManager:
return
if command == "new":
- # Create a new thread on the LangGraph Server
+ # Create a new thread through Gateway
client = self._get_client()
thread = await client.threads.create()
new_thread_id = thread["thread_id"]
diff --git a/backend/tests/test_channels.py b/backend/tests/test_channels.py
index bdb4584e5..b2a5573a3 100644
--- a/backend/tests/test_channels.py
+++ b/backend/tests/test_channels.py
@@ -495,7 +495,7 @@ class TestChannelManager:
await _wait_for(lambda: len(outbound_received) >= 1)
await manager.stop()
- # Thread should be created on the LangGraph Server
+ # Thread should be created through Gateway
mock_client.threads.create.assert_called_once()
# Thread ID should be stored
@@ -1987,28 +1987,28 @@ class TestChannelService:
def test_service_urls_fall_back_to_env(self, monkeypatch):
from app.channels.service import ChannelService
- monkeypatch.setenv("DEER_FLOW_CHANNELS_LANGGRAPH_URL", "http://langgraph:2024")
+ monkeypatch.setenv("DEER_FLOW_CHANNELS_LANGGRAPH_URL", "http://gateway:8001/api")
monkeypatch.setenv("DEER_FLOW_CHANNELS_GATEWAY_URL", "http://gateway:8001")
service = ChannelService(channels_config={})
- assert service.manager._langgraph_url == "http://langgraph:2024"
+ assert service.manager._langgraph_url == "http://gateway:8001/api"
assert service.manager._gateway_url == "http://gateway:8001"
def test_config_service_urls_override_env(self, monkeypatch):
from app.channels.service import ChannelService
- monkeypatch.setenv("DEER_FLOW_CHANNELS_LANGGRAPH_URL", "http://langgraph:2024")
+ monkeypatch.setenv("DEER_FLOW_CHANNELS_LANGGRAPH_URL", "http://gateway:8001/api")
monkeypatch.setenv("DEER_FLOW_CHANNELS_GATEWAY_URL", "http://gateway:8001")
service = ChannelService(
channels_config={
- "langgraph_url": "http://custom-langgraph:2024",
+ "langgraph_url": "http://custom-gateway:8001/api",
"gateway_url": "http://custom-gateway:8001",
}
)
- assert service.manager._langgraph_url == "http://custom-langgraph:2024"
+ assert service.manager._langgraph_url == "http://custom-gateway:8001/api"
assert service.manager._gateway_url == "http://custom-gateway:8001"
def test_disabled_channel_with_string_creds_emits_warning(self, caplog):
diff --git a/backend/tests/test_gateway_runtime_cleanup.py b/backend/tests/test_gateway_runtime_cleanup.py
new file mode 100644
index 000000000..2cc184215
--- /dev/null
+++ b/backend/tests/test_gateway_runtime_cleanup.py
@@ -0,0 +1,62 @@
+"""Regression coverage for the Gateway-owned LangGraph API runtime."""
+
+from __future__ import annotations
+
+import re
+from pathlib import Path
+
+REPO_ROOT = Path(__file__).resolve().parents[2]
+
+
+def _read(path: str) -> str:
+ return (REPO_ROOT / path).read_text(encoding="utf-8")
+
+
+def test_root_makefile_no_longer_exposes_transition_gateway_targets():
+ makefile = _read("Makefile")
+
+ assert "dev-pro" not in makefile
+ assert "start-pro" not in makefile
+ assert "dev-daemon-pro" not in makefile
+ assert "start-daemon-pro" not in makefile
+ assert "docker-start-pro" not in makefile
+ assert "up-pro" not in makefile
+ assert not re.search(r"serve\.sh .*--gateway", makefile)
+ assert "docker.sh start --gateway" not in makefile
+ assert "deploy.sh --gateway" not in makefile
+
+
+def test_service_launchers_always_use_gateway_runtime():
+ operational_files = {
+ "scripts/serve.sh": _read("scripts/serve.sh"),
+ "scripts/docker.sh": _read("scripts/docker.sh"),
+ "scripts/deploy.sh": _read("scripts/deploy.sh"),
+ "docker/docker-compose-dev.yaml": _read("docker/docker-compose-dev.yaml"),
+ "docker/docker-compose.yaml": _read("docker/docker-compose.yaml"),
+ }
+
+ for path, content in operational_files.items():
+ assert "start --gateway" not in content, path
+ assert "deploy.sh --gateway" not in content, path
+ assert "langgraph dev" not in content, path
+ assert "LANGGRAPH_UPSTREAM" not in content, path
+ assert "LANGGRAPH_REWRITE" not in content, path
+
+
+def test_nginx_routes_official_langgraph_prefix_to_gateway_api():
+ for path in ("docker/nginx/nginx.local.conf", "docker/nginx/nginx.conf"):
+ content = _read(path)
+
+ assert "/api/langgraph-compat" not in content
+ assert "proxy_pass http://langgraph" not in content
+ assert "rewrite ^/api/langgraph/(.*) /api/$1 break;" in content
+ assert "proxy_pass http://gateway" in content
+
+
+def test_frontend_rewrites_langgraph_prefix_to_gateway():
+ next_config = _read("frontend/next.config.js")
+ api_client = _read("frontend/src/core/api/api-client.ts")
+
+ assert "DEER_FLOW_INTERNAL_LANGGRAPH_BASE_URL" not in next_config
+ assert "http://127.0.0.1:2024" not in next_config
+ assert "langgraph-compat" not in api_client
diff --git a/config.example.yaml b/config.example.yaml
index cdc690f33..366e755ed 100644
--- a/config.example.yaml
+++ b/config.example.yaml
@@ -868,18 +868,18 @@ run_events:
# All channels use outbound connections (WebSocket or polling) — no public IP required.
# channels:
-# # LangGraph Server URL for thread/message management (default: http://localhost:2024)
+# # LangGraph-compatible Gateway API base URL for thread/message management (default: http://localhost:8001/api)
# # For Docker deployments, use the Docker service name instead of localhost:
-# # langgraph_url: http://langgraph:2024
+# # langgraph_url: http://gateway:8001/api
# # gateway_url: http://gateway:8001
-# langgraph_url: http://localhost:2024
+# langgraph_url: http://localhost:8001/api
# # Gateway API URL for auxiliary queries like /models, /memory (default: http://localhost:8001)
# gateway_url: http://localhost:8001
# #
# # Docker Compose note:
# # If channels run inside the gateway container, use container DNS names instead
# # of localhost, for example:
-# # langgraph_url: http://langgraph:2024
+# # langgraph_url: http://gateway:8001/api
# # gateway_url: http://gateway:8001
# # You can also set DEER_FLOW_CHANNELS_LANGGRAPH_URL / DEER_FLOW_CHANNELS_GATEWAY_URL.
#
diff --git a/docker/docker-compose-dev.yaml b/docker/docker-compose-dev.yaml
index 87d19abbe..8fb95124d 100644
--- a/docker/docker-compose-dev.yaml
+++ b/docker/docker-compose-dev.yaml
@@ -4,8 +4,7 @@
# Services:
# - nginx: Reverse proxy (port 2026)
# - frontend: Frontend Next.js dev server (port 3000)
-# - gateway: Backend Gateway API (port 8001)
-# - langgraph: LangGraph server (port 2024)
+# - gateway: Backend Gateway API + agent runtime (port 8001)
# - provisioner (optional): Sandbox provisioner (creates Pods in host Kubernetes)
#
# Prerequisites:
@@ -61,9 +60,7 @@ services:
start_period: 15s
# ── Reverse Proxy ──────────────────────────────────────────────────────
- # Routes API traffic to gateway/langgraph and (optionally) provisioner.
- # LANGGRAPH_UPSTREAM and LANGGRAPH_REWRITE control gateway vs standard
- # routing (processed by envsubst at container start).
+ # Routes API traffic to gateway and (optionally) provisioner.
nginx:
image: nginx:alpine
container_name: deer-flow-nginx
@@ -71,16 +68,12 @@ services:
- "2026:2026"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf.template:ro
- environment:
- - LANGGRAPH_UPSTREAM=${LANGGRAPH_UPSTREAM:-langgraph:2024}
- - LANGGRAPH_REWRITE=${LANGGRAPH_REWRITE:-/}
command:
- sh
- -c
- |
set -e
- envsubst '$$LANGGRAPH_UPSTREAM $$LANGGRAPH_REWRITE' \
- < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
+ cp /etc/nginx/nginx.conf.template /etc/nginx/nginx.conf
test -e /proc/net/if_inet6 || sed -i '/^[[:space:]]*listen[[:space:]]\+\[::\]:2026;/d' /etc/nginx/nginx.conf
exec nginx -g 'daemon off;'
depends_on:
@@ -114,7 +107,6 @@ services:
- WATCHPACK_POLLING=true
- CI=true
- DEER_FLOW_INTERNAL_GATEWAY_BASE_URL=http://gateway:8001
- - DEER_FLOW_INTERNAL_LANGGRAPH_BASE_URL=http://langgraph:2024
env_file:
- ../frontend/.env
networks:
@@ -147,7 +139,7 @@ services:
# On macOS/Docker Desktop, uv may fail to create symlinks inside shared
# host directories, which causes startup-time `uv sync` to crash.
- gateway-uv-cache:/root/.cache/uv
- # DooD: same as gateway — AioSandboxProvider runs inside LangGraph process.
+ # DooD: AioSandboxProvider runs inside the Gateway process.
- /var/run/docker.sock:/var/run/docker.sock
# CLI auth directories for auto-auth (Claude Code + Codex CLI)
- type: bind
@@ -166,7 +158,7 @@ services:
environment:
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
- - DEER_FLOW_CHANNELS_LANGGRAPH_URL=${DEER_FLOW_CHANNELS_LANGGRAPH_URL:-http://langgraph:2024}
+ - DEER_FLOW_CHANNELS_LANGGRAPH_URL=${DEER_FLOW_CHANNELS_LANGGRAPH_URL:-http://gateway:8001/api}
- DEER_FLOW_CHANNELS_GATEWAY_URL=${DEER_FLOW_CHANNELS_GATEWAY_URL:-http://gateway:8001}
- DEER_FLOW_HOST_BASE_DIR=${DEER_FLOW_ROOT}/backend/.deer-flow
- DEER_FLOW_HOST_SKILLS_PATH=${DEER_FLOW_ROOT}/skills
@@ -180,70 +172,11 @@ services:
- deer-flow-dev
restart: unless-stopped
- # Backend - LangGraph Server
- langgraph:
- build:
- context: ../
- dockerfile: backend/Dockerfile
- target: dev
- # cache_from disabled - requires manual setup: mkdir -p /tmp/docker-cache-langgraph
- args:
- APT_MIRROR: ${APT_MIRROR:-}
- UV_IMAGE: ${UV_IMAGE:-ghcr.io/astral-sh/uv:0.7.20}
- UV_INDEX_URL: ${UV_INDEX_URL:-https://pypi.org/simple}
- container_name: deer-flow-langgraph
- command: sh -c "cd backend && { (uv sync || (echo '[startup] uv sync failed; recreating .venv and retrying once' && uv venv --allow-existing .venv && uv sync)) && allow_blocking='' && if [ \"\${LANGGRAPH_ALLOW_BLOCKING:-0}\" = '1' ]; then allow_blocking='--allow-blocking'; fi && uv run langgraph dev --no-browser \${allow_blocking} --host 0.0.0.0 --port 2024 --n-jobs-per-worker \${LANGGRAPH_JOBS_PER_WORKER:-10}; } > /app/logs/langgraph.log 2>&1"
- volumes:
- - ../backend/:/app/backend/
- # Preserve the .venv built during Docker image build — mounting the full backend/
- # directory above would otherwise shadow it with the (empty) host directory.
- - langgraph-venv:/app/backend/.venv
- - ../config.yaml:/app/config.yaml
- - ../extensions_config.json:/app/extensions_config.json
- - ../skills:/app/skills
- - ../logs:/app/logs
- # Use a Docker-managed uv cache volume instead of a host bind mount.
- # On macOS/Docker Desktop, uv may fail to create symlinks inside shared
- # host directories, which causes startup-time `uv sync` to crash.
- - langgraph-uv-cache:/root/.cache/uv
- # DooD: same as gateway — AioSandboxProvider runs inside LangGraph process.
- - /var/run/docker.sock:/var/run/docker.sock
- # CLI auth directories for auto-auth (Claude Code + Codex CLI)
- - type: bind
- source: ${HOME:?HOME must be set}/.claude
- target: /root/.claude
- read_only: true
- bind:
- create_host_path: true
- - type: bind
- source: ${HOME:?HOME must be set}/.codex
- target: /root/.codex
- read_only: true
- bind:
- create_host_path: true
- working_dir: /app
- environment:
- - CI=true
- - DEER_FLOW_HOME=/app/backend/.deer-flow
- - DEER_FLOW_HOST_BASE_DIR=${DEER_FLOW_ROOT}/backend/.deer-flow
- - DEER_FLOW_HOST_SKILLS_PATH=${DEER_FLOW_ROOT}/skills
- - DEER_FLOW_SANDBOX_HOST=host.docker.internal
- env_file:
- - ../.env
- extra_hosts:
- # For Linux: map host.docker.internal to host gateway
- - "host.docker.internal:host-gateway"
- networks:
- - deer-flow-dev
- restart: unless-stopped
-
volumes:
# Persist .venv across container restarts so dependencies installed during
# image build are not shadowed by the host backend/ directory mount.
gateway-venv:
- langgraph-venv:
gateway-uv-cache:
- langgraph-uv-cache:
networks:
deer-flow-dev:
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index 31cb673da..82cb62425 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -4,8 +4,7 @@
# Services:
# - nginx: Reverse proxy (port 2026, configurable via PORT env var)
# - frontend: Next.js production server
-# - gateway: FastAPI Gateway API
-# - langgraph: LangGraph production server (Dockerfile generated by langgraph dockerfile)
+# - gateway: FastAPI Gateway API + agent runtime
# - provisioner: (optional) Sandbox provisioner for Kubernetes mode
#
# Key environment variables (set via environment/.env or scripts/deploy.sh):
@@ -30,12 +29,8 @@ services:
- "${PORT:-2026}:2026"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf.template:ro
- environment:
- - LANGGRAPH_UPSTREAM=${LANGGRAPH_UPSTREAM:-langgraph:2024}
- - LANGGRAPH_REWRITE=${LANGGRAPH_REWRITE:-/}
command: >
- sh -c "envsubst '$$LANGGRAPH_UPSTREAM $$LANGGRAPH_REWRITE'
- < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
+ sh -c "cp /etc/nginx/nginx.conf.template /etc/nginx/nginx.conf
&& nginx -g 'daemon off;'"
depends_on:
- frontend
@@ -57,7 +52,6 @@ services:
environment:
- BETTER_AUTH_SECRET=${BETTER_AUTH_SECRET}
- DEER_FLOW_INTERNAL_GATEWAY_BASE_URL=http://gateway:8001
- - DEER_FLOW_INTERNAL_LANGGRAPH_BASE_URL=http://langgraph:2024
env_file:
- ../frontend/.env
networks:
@@ -102,7 +96,7 @@ services:
- DEER_FLOW_HOME=/app/backend/.deer-flow
- DEER_FLOW_CONFIG_PATH=/app/backend/config.yaml
- DEER_FLOW_EXTENSIONS_CONFIG_PATH=/app/backend/extensions_config.json
- - DEER_FLOW_CHANNELS_LANGGRAPH_URL=${DEER_FLOW_CHANNELS_LANGGRAPH_URL:-http://langgraph:2024}
+ - DEER_FLOW_CHANNELS_LANGGRAPH_URL=${DEER_FLOW_CHANNELS_LANGGRAPH_URL:-http://gateway:8001/api}
- DEER_FLOW_CHANNELS_GATEWAY_URL=${DEER_FLOW_CHANNELS_GATEWAY_URL:-http://gateway:8001}
# DooD path/network translation
- DEER_FLOW_HOST_BASE_DIR=${DEER_FLOW_HOME}
@@ -116,58 +110,6 @@ services:
- deer-flow
restart: unless-stopped
- # ── LangGraph Server ───────────────────────────────────────────────────────
- # TODO: switch to langchain/langgraph-api (licensed) once a license key is available.
- # For now, use `langgraph dev` (no license required) with the standard backend image.
- langgraph:
- build:
- context: ../
- dockerfile: backend/Dockerfile
- args:
- APT_MIRROR: ${APT_MIRROR:-}
- UV_IMAGE: ${UV_IMAGE:-ghcr.io/astral-sh/uv:0.7.20}
- UV_INDEX_URL: ${UV_INDEX_URL:-https://pypi.org/simple}
- UV_EXTRAS: ${UV_EXTRAS:-}
- container_name: deer-flow-langgraph
- command: sh -c 'cd /app/backend && args="--no-browser --no-reload --host 0.0.0.0 --port 2024 --n-jobs-per-worker $${LANGGRAPH_JOBS_PER_WORKER:-10}" && if [ "$${LANGGRAPH_ALLOW_BLOCKING:-0}" = "1" ]; then args="$$args --allow-blocking"; fi && uv run langgraph dev $$args'
- volumes:
- - ${DEER_FLOW_CONFIG_PATH}:/app/backend/config.yaml:ro
- - ${DEER_FLOW_EXTENSIONS_CONFIG_PATH}:/app/backend/extensions_config.json:ro
- - ${DEER_FLOW_HOME}:/app/backend/.deer-flow
- - ../skills:/app/skills:ro
- - ../backend/.langgraph_api:/app/backend/.langgraph_api
- # DooD: same as gateway
- - ${DEER_FLOW_DOCKER_SOCKET}:/var/run/docker.sock
- # CLI auth directories for auto-auth (Claude Code + Codex CLI)
- - type: bind
- source: ${HOME:?HOME must be set}/.claude
- target: /root/.claude
- read_only: true
- bind:
- create_host_path: true
- - type: bind
- source: ${HOME:?HOME must be set}/.codex
- target: /root/.codex
- read_only: true
- bind:
- create_host_path: true
- environment:
- - CI=true
- - DEER_FLOW_HOME=/app/backend/.deer-flow
- - DEER_FLOW_CONFIG_PATH=/app/backend/config.yaml
- - DEER_FLOW_EXTENSIONS_CONFIG_PATH=/app/backend/extensions_config.json
- - DEER_FLOW_HOST_BASE_DIR=${DEER_FLOW_HOME}
- - DEER_FLOW_HOST_SKILLS_PATH=${DEER_FLOW_REPO_ROOT}/skills
- - DEER_FLOW_SANDBOX_HOST=host.docker.internal
- # LangSmith tracing: set LANGSMITH_TRACING=true and LANGSMITH_API_KEY in .env to enable.
- env_file:
- - ../.env
- extra_hosts:
- - "host.docker.internal:host-gateway"
- networks:
- - deer-flow
- restart: unless-stopped
-
# ── Sandbox Provisioner (optional, Kubernetes mode) ────────────────────────
provisioner:
build:
diff --git a/docker/nginx/nginx.conf b/docker/nginx/nginx.conf
index c9a7be32b..12096d5ba 100644
--- a/docker/nginx/nginx.conf
+++ b/docker/nginx/nginx.conf
@@ -26,10 +26,6 @@ http {
server gateway:8001;
}
- upstream langgraph {
- server ${LANGGRAPH_UPSTREAM};
- }
-
upstream frontend {
server frontend:3000;
}
@@ -56,13 +52,11 @@ http {
return 204;
}
- # LangGraph API routes
- # In standard mode: /api/langgraph/* → langgraph:2024 (rewrite to /*)
- # In gateway mode: /api/langgraph/* → gateway:8001 (rewrite to /api/*)
- # Controlled by LANGGRAPH_UPSTREAM and LANGGRAPH_REWRITE env vars.
+ # LangGraph-compatible API routes served by Gateway.
+ # Rewrites /api/langgraph/* to /api/* before proxying to Gateway.
location /api/langgraph/ {
- rewrite ^/api/langgraph/(.*) ${LANGGRAPH_REWRITE}$1 break;
- proxy_pass http://langgraph;
+ rewrite ^/api/langgraph/(.*) /api/$1 break;
+ proxy_pass http://gateway;
proxy_http_version 1.1;
# Headers
diff --git a/docker/nginx/nginx.local.conf b/docker/nginx/nginx.local.conf
index e5a2bef3d..473d3f1d1 100644
--- a/docker/nginx/nginx.local.conf
+++ b/docker/nginx/nginx.local.conf
@@ -19,10 +19,6 @@ http {
server 127.0.0.1:8001;
}
- upstream langgraph {
- server 127.0.0.1:2024;
- }
-
upstream frontend {
server 127.0.0.1:3000;
}
@@ -48,38 +44,10 @@ http {
return 204;
}
- # LangGraph API routes (served by langgraph dev)
- # Rewrites /api/langgraph/* to /* before proxying to LangGraph server
+ # LangGraph-compatible API routes served by Gateway.
+ # Rewrites /api/langgraph/* to /api/* before proxying to Gateway.
location /api/langgraph/ {
- rewrite ^/api/langgraph/(.*) /$1 break;
- proxy_pass http://langgraph;
- proxy_http_version 1.1;
-
- # Headers
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_set_header Connection '';
-
- # SSE/Streaming support
- proxy_buffering off;
- proxy_cache off;
- proxy_set_header X-Accel-Buffering no;
-
- # Timeouts for long-running requests
- proxy_connect_timeout 600s;
- proxy_send_timeout 600s;
- proxy_read_timeout 600s;
-
- # Chunked transfer encoding
- chunked_transfer_encoding on;
- }
-
- # Experimental: Gateway-backed LangGraph-compatible API
- # Frontend can opt-in via NEXT_PUBLIC_LANGGRAPH_BASE_URL=/api/langgraph-compat
- location /api/langgraph-compat/ {
- rewrite ^/api/langgraph-compat/(.*) /api/$1 break;
+ rewrite ^/api/langgraph/(.*) /api/$1 break;
proxy_pass http://gateway;
proxy_http_version 1.1;
diff --git a/frontend/.env.example b/frontend/.env.example
index 96c1431c8..19cce7478 100644
--- a/frontend/.env.example
+++ b/frontend/.env.example
@@ -14,10 +14,3 @@
# Only set these if you need to connect to backend services directly
# NEXT_PUBLIC_BACKEND_BASE_URL="http://localhost:8001"
# NEXT_PUBLIC_LANGGRAPH_BASE_URL="http://localhost:2024"
-
-# LangGraph API base URL
-# Default: /api/langgraph (uses langgraph dev server via nginx)
-# Set to /api/langgraph-compat to use the experimental Gateway-backed runtime
-# Requires: SKIP_LANGGRAPH_SERVER=1 in serve.sh (optional, saves resources)
-# NEXT_PUBLIC_LANGGRAPH_BASE_URL=/api/langgraph-compat
-
diff --git a/frontend/next.config.js b/frontend/next.config.js
index 0f22635c2..5b20aad5f 100644
--- a/frontend/next.config.js
+++ b/frontend/next.config.js
@@ -23,10 +23,6 @@ const config = {
devIndicators: false,
async rewrites() {
const rewrites = [];
- const langgraphURL = getInternalServiceURL(
- "DEER_FLOW_INTERNAL_LANGGRAPH_BASE_URL",
- "http://127.0.0.1:2024",
- );
const gatewayURL = getInternalServiceURL(
"DEER_FLOW_INTERNAL_GATEWAY_BASE_URL",
"http://127.0.0.1:8001",
@@ -35,11 +31,11 @@ const config = {
if (!process.env.NEXT_PUBLIC_LANGGRAPH_BASE_URL) {
rewrites.push({
source: "/api/langgraph",
- destination: langgraphURL,
+ destination: `${gatewayURL}/api`,
});
rewrites.push({
source: "/api/langgraph/:path*",
- destination: `${langgraphURL}/:path*`,
+ destination: `${gatewayURL}/api/:path*`,
});
}
@@ -66,8 +62,8 @@ const config = {
// their own NEXT_PUBLIC_* env var toggle.
//
// NOTE: this must come AFTER the /api/langgraph rewrite above so that
- // LangGraph routes are matched first when NEXT_PUBLIC_LANGGRAPH_BASE_URL
- // is unset.
+ // LangGraph-compatible routes keep their public prefix while Gateway
+ // receives its native /api/* paths.
rewrites.push({
source: "/api/:path*",
destination: `${gatewayURL}/api/:path*`,
diff --git a/frontend/src/core/api/api-client.ts b/frontend/src/core/api/api-client.ts
index 5e71730e7..0b4532ca9 100644
--- a/frontend/src/core/api/api-client.ts
+++ b/frontend/src/core/api/api-client.ts
@@ -13,8 +13,8 @@ import { sanitizeRunStreamOptions } from "./stream-mode";
*
* Reading the cookie per-request (rather than baking it into the SDK's
* ``defaultHeaders`` at construction) handles login / logout / password
- * change cookie rotation transparently. Both the ``/langgraph-compat/*``
- * SDK path and the direct REST endpoints in ``fetcher.ts:fetchWithAuth``
+ * change cookie rotation transparently. Both the ``/api/langgraph/*`` SDK
+ * path and the direct REST endpoints in ``fetcher.ts:fetchWithAuth``
* share :func:`readCsrfCookie` and :const:`STATE_CHANGING_METHODS` so
* the contract stays in lockstep.
*/
@@ -32,8 +32,10 @@ function injectCsrfHeader(_url: URL, init: RequestInit): RequestInit {
}
function createCompatibleClient(isMock?: boolean): LangGraphClient {
+ const apiUrl = getLangGraphBaseURL(isMock);
+ console.log(`Creating API client with base URL: ${apiUrl}`);
const client = new LangGraphClient({
- apiUrl: getLangGraphBaseURL(isMock),
+ apiUrl,
onRequest: injectCsrfHeader,
});
diff --git a/frontend/src/core/config/index.ts b/frontend/src/core/config/index.ts
index cb06e00ff..4aaa7122d 100644
--- a/frontend/src/core/config/index.ts
+++ b/frontend/src/core/config/index.ts
@@ -19,6 +19,10 @@ export function getBackendBaseURL() {
}
export function getLangGraphBaseURL(isMock?: boolean) {
+ console.log(
+ "env.NEXT_PUBLIC_LANGGRAPH_BASE_URL",
+ env.NEXT_PUBLIC_LANGGRAPH_BASE_URL,
+ );
if (env.NEXT_PUBLIC_LANGGRAPH_BASE_URL) {
return new URL(
env.NEXT_PUBLIC_LANGGRAPH_BASE_URL,
diff --git a/scripts/deploy.sh b/scripts/deploy.sh
index 3a8b047bf..b4b030d4b 100755
--- a/scripts/deploy.sh
+++ b/scripts/deploy.sh
@@ -3,52 +3,38 @@
# deploy.sh - Build, start, or stop DeerFlow production services
#
# Commands:
-# deploy.sh [--MODE] — build + start (default: --standard)
+# deploy.sh — build + start
# deploy.sh build — build all images (mode-agnostic)
-# deploy.sh start [--MODE] — start from pre-built images (default: --standard)
+# deploy.sh start — start from pre-built images
# deploy.sh down — stop and remove containers
#
-# Runtime modes:
-# --standard (default) All services including LangGraph server.
-# --gateway No LangGraph container; nginx routes /api/langgraph/*
-# to the Gateway compat API instead.
-#
# Sandbox mode (local / aio / provisioner) is auto-detected from config.yaml.
#
# Examples:
-# deploy.sh # build + start in standard mode
-# deploy.sh --gateway # build + start in gateway mode
+# deploy.sh # build + start
# deploy.sh build # build all images
-# deploy.sh start --gateway # start pre-built images in gateway mode
+# deploy.sh start # start pre-built images
# deploy.sh down # stop and remove containers
#
# Must be run from the repo root directory.
set -e
-RUNTIME_MODE="standard"
-
case "${1:-}" in
build|start|down)
CMD="$1"
if [ -n "${2:-}" ]; then
- case "$2" in
- --standard) RUNTIME_MODE="standard" ;;
- --gateway) RUNTIME_MODE="gateway" ;;
- *) echo "Unknown mode: $2"; echo "Usage: deploy.sh [build|start|down] [--standard|--gateway]"; exit 1 ;;
- esac
+ echo "Unknown argument: $2"
+ echo "Usage: deploy.sh [build|start|down]"
+ exit 1
fi
;;
- --standard|--gateway)
- CMD=""
- RUNTIME_MODE="${1#--}"
- ;;
"")
CMD=""
;;
*)
echo "Unknown argument: $1"
- echo "Usage: deploy.sh [build|start|down] [--standard|--gateway]"
+ echo "Usage: deploy.sh [build|start|down]"
exit 1
;;
esac
@@ -212,7 +198,7 @@ if [ "$CMD" = "build" ]; then
echo " ✓ Images built successfully"
echo "=========================================="
echo ""
- echo " Next: deploy.sh start [--gateway]"
+ echo " Next: deploy.sh start"
echo ""
exit 0
fi
@@ -225,23 +211,14 @@ echo "=========================================="
echo ""
# ── Detect runtime configuration ────────────────────────────────────────────
-# Only needed for start / up — determines which containers to launch.
+# Only needed for start / up — determines whether provisioner is launched.
sandbox_mode="$(detect_sandbox_mode)"
echo -e "${BLUE}Sandbox mode: $sandbox_mode${NC}"
-echo -e "${BLUE}Runtime mode: $RUNTIME_MODE${NC}"
+echo -e "${BLUE}Runtime: Gateway embedded agent runtime${NC}"
-case "$RUNTIME_MODE" in
- gateway)
- export LANGGRAPH_UPSTREAM=gateway:8001
- export LANGGRAPH_REWRITE=/api/
- services="frontend gateway nginx"
- ;;
- standard)
- services="frontend gateway langgraph nginx"
- ;;
-esac
+services="frontend gateway nginx"
if [ "$sandbox_mode" = "provisioner" ]; then
services="$services provisioner"
@@ -282,17 +259,13 @@ fi
echo ""
echo "=========================================="
-echo " DeerFlow is running! ($RUNTIME_MODE mode)"
+echo " DeerFlow is running!"
echo "=========================================="
echo ""
echo " 🌐 Application: http://localhost:${PORT:-2026}"
echo " 📡 API Gateway: http://localhost:${PORT:-2026}/api/*"
-if [ "$RUNTIME_MODE" = "gateway" ]; then
- echo " 🤖 Runtime: Gateway embedded"
- echo " API: /api/langgraph/* → Gateway (compat)"
-else
- echo " 🤖 LangGraph: http://localhost:${PORT:-2026}/api/langgraph/*"
-fi
+echo " 🤖 Runtime: Gateway embedded"
+echo " API: /api/langgraph/* → Gateway"
echo ""
echo " Manage:"
echo " make down — stop and remove containers"
diff --git a/scripts/docker.sh b/scripts/docker.sh
index b50df15c5..6b37b6b51 100755
--- a/scripts/docker.sh
+++ b/scripts/docker.sh
@@ -148,18 +148,15 @@ init() {
}
# Start Docker development environment
-# Usage: start [--gateway]
start() {
local sandbox_mode
local services
- local gateway_mode=false
- # Check for --gateway flag
- for arg in "$@"; do
- if [ "$arg" = "--gateway" ]; then
- gateway_mode=true
- fi
- done
+ if [ "$#" -gt 0 ]; then
+ echo -e "${YELLOW}Unknown option for start: $1${NC}"
+ echo "Usage: $0 start"
+ exit 1
+ fi
echo "=========================================="
echo " Starting DeerFlow Docker Development"
@@ -168,21 +165,12 @@ start() {
sandbox_mode="$(detect_sandbox_mode)"
- if $gateway_mode; then
- services="frontend gateway nginx"
- if [ "$sandbox_mode" = "provisioner" ]; then
- services="frontend gateway provisioner nginx"
- fi
- else
- services="frontend gateway langgraph nginx"
- if [ "$sandbox_mode" = "provisioner" ]; then
- services="frontend gateway langgraph provisioner nginx"
- fi
+ services="frontend gateway nginx"
+ if [ "$sandbox_mode" = "provisioner" ]; then
+ services="frontend gateway provisioner nginx"
fi
- if $gateway_mode; then
- echo -e "${BLUE}Runtime: Gateway mode (experimental) — no LangGraph container${NC}"
- fi
+ echo -e "${BLUE}Runtime: Gateway embedded agent runtime${NC}"
echo -e "${BLUE}Detected sandbox mode: $sandbox_mode${NC}"
if [ "$sandbox_mode" = "provisioner" ]; then
echo -e "${BLUE}Provisioner enabled (Kubernetes mode).${NC}"
@@ -232,12 +220,6 @@ start() {
fi
fi
- # Set nginx routing for gateway mode (envsubst in nginx container)
- if $gateway_mode; then
- export LANGGRAPH_UPSTREAM=gateway:8001
- export LANGGRAPH_REWRITE=/api/
- fi
-
echo "Building and starting containers..."
cd "$DOCKER_DIR" && $COMPOSE_CMD up --build -d --remove-orphans $services
echo ""
@@ -247,12 +229,8 @@ start() {
echo ""
echo " 🌐 Application: http://localhost:2026"
echo " 📡 API Gateway: http://localhost:2026/api/*"
- if $gateway_mode; then
- echo " 🤖 Runtime: Gateway embedded"
- echo " API: /api/langgraph/* → Gateway (compat)"
- else
- echo " 🤖 LangGraph: http://localhost:2026/api/langgraph/*"
- fi
+ echo " 🤖 Runtime: Gateway embedded"
+ echo " API: /api/langgraph/* → Gateway"
echo ""
echo " 📋 View logs: make docker-logs"
echo " 🛑 Stop: make docker-stop"
@@ -332,7 +310,6 @@ help() {
echo "Commands:"
echo " init - Pull the sandbox image (speeds up first Pod startup)"
echo " start - Start Docker services (auto-detects sandbox mode from config.yaml)"
- echo " start --gateway - Start without LangGraph container (Gateway mode, experimental)"
echo " restart - Restart all running Docker services"
echo " logs [option] - View Docker development logs"
echo " --frontend View frontend logs only"
diff --git a/scripts/serve.sh b/scripts/serve.sh
index 0d40ebe76..17d46eede 100755
--- a/scripts/serve.sh
+++ b/scripts/serve.sh
@@ -3,13 +3,11 @@
# serve.sh — Unified DeerFlow service launcher
#
# Usage:
-# ./scripts/serve.sh [--dev|--prod] [--gateway] [--daemon] [--stop|--restart]
+# ./scripts/serve.sh [--dev|--prod] [--daemon] [--stop|--restart]
#
# Modes:
# --dev Development mode with hot-reload (default)
# --prod Production mode, pre-built frontend, no hot-reload
-# --gateway Gateway mode (experimental): skip LangGraph server,
-# agent runtime embedded in Gateway API
# --daemon Run all services in background (nohup), exit after startup
#
# Actions:
@@ -18,13 +16,11 @@
# --restart Stop all services, then start with the given mode flags
#
# Examples:
-# ./scripts/serve.sh --dev # Standard dev (4 processes)
-# ./scripts/serve.sh --dev --gateway # Gateway dev (3 processes)
-# ./scripts/serve.sh --prod --gateway # Gateway prod (3 processes)
-# ./scripts/serve.sh --dev --daemon # Standard dev, background
-# ./scripts/serve.sh --dev --gateway --daemon # Gateway dev, background
+# ./scripts/serve.sh --dev # Gateway dev, hot reload
+# ./scripts/serve.sh --prod # Gateway prod
+# ./scripts/serve.sh --dev --daemon # Gateway dev, background
# ./scripts/serve.sh --stop # Stop all services
-# ./scripts/serve.sh --restart --dev --gateway # Restart in gateway mode
+# ./scripts/serve.sh --restart --dev # Restart dev services
#
# Must be run from the repo root directory.
@@ -44,7 +40,6 @@ fi
# ── Argument parsing ─────────────────────────────────────────────────────────
DEV_MODE=true
-GATEWAY_MODE=false
DAEMON_MODE=false
SKIP_INSTALL=false
ACTION="start" # start | stop | restart
@@ -53,14 +48,13 @@ for arg in "$@"; do
case "$arg" in
--dev) DEV_MODE=true ;;
--prod) DEV_MODE=false ;;
- --gateway) GATEWAY_MODE=true ;;
--daemon) DAEMON_MODE=true ;;
--skip-install) SKIP_INSTALL=true ;;
--stop) ACTION="stop" ;;
--restart) ACTION="restart" ;;
*)
echo "Unknown argument: $arg"
- echo "Usage: $0 [--dev|--prod] [--gateway] [--daemon] [--skip-install] [--stop|--restart]"
+ echo "Usage: $0 [--dev|--prod] [--daemon] [--skip-install] [--stop|--restart]"
exit 1
;;
esac
@@ -79,7 +73,6 @@ _kill_port() {
stop_all() {
echo "Stopping all services..."
- pkill -f "langgraph dev" 2>/dev/null || true
pkill -f "uvicorn app.gateway.app:app" 2>/dev/null || true
pkill -f "next dev" 2>/dev/null || true
pkill -f "next start" 2>/dev/null || true
@@ -88,7 +81,6 @@ stop_all() {
sleep 1
pkill -9 nginx 2>/dev/null || true
# Force-kill any survivors still holding the service ports
- _kill_port 2024
_kill_port 8001
_kill_port 3000
./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true
@@ -109,21 +101,11 @@ if [ "$ACTION" = "restart" ]; then
ALREADY_STOPPED=true
fi
-# ── Derive runtime flags ────────────────────────────────────────────────────
-
-if $GATEWAY_MODE; then
- export SKIP_LANGGRAPH_SERVER=1
-fi
-
# Mode label for banner
-if $DEV_MODE && $GATEWAY_MODE; then
- MODE_LABEL="DEV + GATEWAY (experimental)"
-elif $DEV_MODE; then
- MODE_LABEL="DEV (hot-reload enabled)"
-elif $GATEWAY_MODE; then
- MODE_LABEL="PROD + GATEWAY (experimental)"
+if $DEV_MODE; then
+ MODE_LABEL="DEV (Gateway runtime, hot-reload enabled)"
else
- MODE_LABEL="PROD (optimized)"
+ MODE_LABEL="PROD (Gateway runtime, optimized)"
fi
if $DAEMON_MODE; then
@@ -145,8 +127,7 @@ else
FRONTEND_CMD="env BETTER_AUTH_SECRET=$($PYTHON_BIN -c 'import secrets; print(secrets.token_hex(16))') pnpm run preview"
fi
-# Extra flags for uvicorn/langgraph
-LANGGRAPH_EXTRA_FLAGS="--no-reload"
+# Extra flags for uvicorn
if $DEV_MODE && ! $DAEMON_MODE; then
GATEWAY_EXTRA_FLAGS="--reload --reload-include='*.yaml' --reload-include='.env' --reload-exclude='*.pyc' --reload-exclude='__pycache__' --reload-exclude='sandbox/' --reload-exclude='.deer-flow/'"
else
@@ -185,32 +166,6 @@ else
echo "⏩ Skipping dependency install (--skip-install)"
fi
-# ── Sync frontend .env.local ─────────────────────────────────────────────────
-# Next.js .env.local takes precedence over process env vars.
-# The script manages the NEXT_PUBLIC_LANGGRAPH_BASE_URL line to ensure
-# the frontend routes match the active backend mode.
-
-FRONTEND_ENV_LOCAL="$REPO_ROOT/frontend/.env.local"
-ENV_KEY="NEXT_PUBLIC_LANGGRAPH_BASE_URL"
-
-sync_frontend_env() {
- if $GATEWAY_MODE; then
- # Point frontend to Gateway's compat API
- if [ -f "$FRONTEND_ENV_LOCAL" ] && grep -q "^${ENV_KEY}=" "$FRONTEND_ENV_LOCAL"; then
- sed -i.bak "s|^${ENV_KEY}=.*|${ENV_KEY}=/api/langgraph-compat|" "$FRONTEND_ENV_LOCAL" && rm -f "${FRONTEND_ENV_LOCAL}.bak"
- else
- echo "${ENV_KEY}=/api/langgraph-compat" >> "$FRONTEND_ENV_LOCAL"
- fi
- else
- # Remove override — frontend falls back to /api/langgraph (standard)
- if [ -f "$FRONTEND_ENV_LOCAL" ] && grep -q "^${ENV_KEY}=" "$FRONTEND_ENV_LOCAL"; then
- sed -i.bak "/^${ENV_KEY}=/d" "$FRONTEND_ENV_LOCAL" && rm -f "${FRONTEND_ENV_LOCAL}.bak"
- fi
- fi
-}
-
-sync_frontend_env
-
# ── Banner ───────────────────────────────────────────────────────────────────
echo ""
@@ -221,10 +176,7 @@ echo ""
echo " Mode: $MODE_LABEL"
echo ""
echo " Services:"
-if ! $GATEWAY_MODE; then
- echo " LangGraph → localhost:2024 (agent runtime)"
-fi
-echo " Gateway → localhost:8001 (REST API$(if $GATEWAY_MODE; then echo " + agent runtime"; fi))"
+echo " Gateway → localhost:8001 (REST API + agent runtime)"
echo " Frontend → localhost:3000 (Next.js)"
echo " Nginx → localhost:2026 (reverse proxy)"
echo ""
@@ -268,34 +220,17 @@ run_service() {
mkdir -p logs
mkdir -p temp/client_body_temp temp/proxy_temp temp/fastcgi_temp temp/uwsgi_temp temp/scgi_temp
-# 1. LangGraph (skip in gateway mode)
-if ! $GATEWAY_MODE; then
- CONFIG_LOG_LEVEL=$(grep -m1 '^log_level:' config.yaml 2>/dev/null | awk '{print $2}' | tr -d ' ')
- LANGGRAPH_LOG_LEVEL="${LANGGRAPH_LOG_LEVEL:-${CONFIG_LOG_LEVEL:-info}}"
- LANGGRAPH_JOBS_PER_WORKER="${LANGGRAPH_JOBS_PER_WORKER:-10}"
- LANGGRAPH_ALLOW_BLOCKING="${LANGGRAPH_ALLOW_BLOCKING:-0}"
- LANGGRAPH_ALLOW_BLOCKING_FLAG=""
- if [ "$LANGGRAPH_ALLOW_BLOCKING" = "1" ]; then
- LANGGRAPH_ALLOW_BLOCKING_FLAG="--allow-blocking"
- fi
- run_service "LangGraph" \
- "cd backend && NO_COLOR=1 CLICOLOR=0 CLICOLOR_FORCE=0 PY_COLORS=0 TERM=dumb uv run langgraph dev --no-browser $LANGGRAPH_ALLOW_BLOCKING_FLAG --n-jobs-per-worker $LANGGRAPH_JOBS_PER_WORKER --server-log-level $LANGGRAPH_LOG_LEVEL $LANGGRAPH_EXTRA_FLAGS 2>&1 | LC_ALL=C LC_CTYPE=C LANG=C perl -pe 's/\e\[[0-9;]*[[:alpha:]]//g' > ../logs/langgraph.log" \
- 2024 60
-else
- echo "⏩ Skipping LangGraph (Gateway mode — runtime embedded in Gateway)"
-fi
-
-# 2. Gateway API
+# 1. Gateway API
run_service "Gateway" \
"cd backend && PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001 $GATEWAY_EXTRA_FLAGS > ../logs/gateway.log 2>&1" \
8001 30
-# 3. Frontend
+# 2. Frontend
run_service "Frontend" \
"cd frontend && $FRONTEND_CMD > ../logs/frontend.log 2>&1" \
3000 120
-# 4. Nginx
+# 3. Nginx
run_service "Nginx" \
"nginx -g 'daemon off;' -c '$REPO_ROOT/docker/nginx/nginx.local.conf' -p '$REPO_ROOT' > logs/nginx.log 2>&1" \
2026 10
@@ -309,16 +244,11 @@ echo "=========================================="
echo ""
echo " 🌐 http://localhost:2026"
echo ""
-if $GATEWAY_MODE; then
- echo " Routing: Frontend → Nginx → Gateway (embedded runtime)"
- echo " API: /api/langgraph-compat/* → Gateway agent runtime"
-else
- echo " Routing: Frontend → Nginx → LangGraph + Gateway"
- echo " API: /api/langgraph/* → LangGraph server (2024)"
-fi
+echo " Routing: Frontend → Nginx → Gateway"
+echo " API: /api/langgraph/* → Gateway agent runtime"
echo " /api/* → Gateway REST API (8001)"
echo ""
-echo " 📋 Logs: logs/{langgraph,gateway,frontend,nginx}.log"
+echo " 📋 Logs: logs/{gateway,frontend,nginx}.log"
echo ""
if $DAEMON_MODE; then