greatmengqi 70323e052a refactor(config): migrate gateway routers and channels to Depends(get_config)
Phase 2 Task P2-2 (Category G): replace AppConfig.current() with the
typed Depends(get_config) FastAPI dependency in every gateway router.

- routers/models.py: list_models / get_model take config via Depends
- routers/mcp.py: get_mcp_configuration / update_mcp_configuration via Depends;
  reload path now swaps app.state.config alongside AppConfig.init() so both
  the new primitive and legacy current() callers see the fresh config
- routers/memory.py: get_memory_config_endpoint / get_memory_status via Depends
- routers/skills.py: update_skill via Depends; reload swaps app.state.config
- deps.py: get_run_context and langgraph_runtime read from app.state.config
  instead of calling AppConfig.current()
- auth/reset_admin.py: CLI constructs AppConfig.from_file() explicitly at the
  top (it is a standalone entry point, not a request handler)
- channels/service.py: from_app_config accepts optional AppConfig parameter;
  legacy fallback to AppConfig.current() preserved until P2-10

Test fix: test_update_skill_refreshes_prompt_cache_before_return now sets
app.state.config on the test FastAPI instance so Depends(get_config) resolves.

All 2379+ tests pass (one pre-existing flaky test_client_e2e unrelated).
2026-04-16 22:34:27 +08:00

116 lines
3.6 KiB
Python

from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel, Field
from app.gateway.deps import get_config
from deerflow.config.app_config import AppConfig
router = APIRouter(prefix="/api", tags=["models"])
class ModelResponse(BaseModel):
"""Response model for model information."""
name: str = Field(..., description="Unique identifier for the model")
model: str = Field(..., description="Actual provider model identifier")
display_name: str | None = Field(None, description="Human-readable name")
description: str | None = Field(None, description="Model description")
supports_thinking: bool = Field(default=False, description="Whether model supports thinking mode")
supports_reasoning_effort: bool = Field(default=False, description="Whether model supports reasoning effort")
class ModelsListResponse(BaseModel):
"""Response model for listing all models."""
models: list[ModelResponse]
@router.get(
"/models",
response_model=ModelsListResponse,
summary="List All Models",
description="Retrieve a list of all available AI models configured in the system.",
)
async def list_models(config: AppConfig = Depends(get_config)) -> ModelsListResponse:
"""List all available models from configuration.
Returns model information suitable for frontend display,
excluding sensitive fields like API keys and internal configuration.
Returns:
A list of all configured models with their metadata.
Example Response:
```json
{
"models": [
{
"name": "gpt-4",
"display_name": "GPT-4",
"description": "OpenAI GPT-4 model",
"supports_thinking": false
},
{
"name": "claude-3-opus",
"display_name": "Claude 3 Opus",
"description": "Anthropic Claude 3 Opus model",
"supports_thinking": true
}
]
}
```
"""
models = [
ModelResponse(
name=model.name,
model=model.model,
display_name=model.display_name,
description=model.description,
supports_thinking=model.supports_thinking,
supports_reasoning_effort=model.supports_reasoning_effort,
)
for model in config.models
]
return ModelsListResponse(models=models)
@router.get(
"/models/{model_name}",
response_model=ModelResponse,
summary="Get Model Details",
description="Retrieve detailed information about a specific AI model by its name.",
)
async def get_model(model_name: str, config: AppConfig = Depends(get_config)) -> ModelResponse:
"""Get a specific model by name.
Args:
model_name: The unique name of the model to retrieve.
Returns:
Model information if found.
Raises:
HTTPException: 404 if model not found.
Example Response:
```json
{
"name": "gpt-4",
"display_name": "GPT-4",
"description": "OpenAI GPT-4 model",
"supports_thinking": false
}
```
"""
model = config.get_model_config(model_name)
if model is None:
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
return ModelResponse(
name=model.name,
model=model.model,
display_name=model.display_name,
description=model.description,
supports_thinking=model.supports_thinking,
supports_reasoning_effort=model.supports_reasoning_effort,
)