Compare commits
219 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c53b9ccb02 | |||
| e99cb01fe1 | |||
| 3e6a34297d | |||
| 9dc25987e0 | |||
| 8a044142cb | |||
| 410f0c48b5 | |||
| 1f59e945af | |||
| f394c0d8c8 | |||
| 950821cb9b | |||
| 2bb1a2dfa2 | |||
| b970993425 | |||
| ec8a8cae38 | |||
| d78ed5c8f2 | |||
| f9ff3a698d | |||
| c2332bb790 | |||
| 3a61126824 | |||
| 11f557a2c6 | |||
| e8572b9d0c | |||
| 80a7446fd6 | |||
| cd12821134 | |||
| 30d619de08 | |||
| 4e72410154 | |||
| c42ae3af79 | |||
| bd35cd39aa | |||
| b90f219bd1 | |||
| 96d00f6073 | |||
| c43c803f66 | |||
| dbd777fe62 | |||
| 1ca2621285 | |||
| 5ba1dacf25 | |||
| 085c13edc7 | |||
| ef04174194 | |||
| 6dce26a52e | |||
| fc94e90f6c | |||
| f2013f47aa | |||
| 4be857f64b | |||
| c99865f53d | |||
| 05f1da03e5 | |||
| a62ca5dd47 | |||
| f514e35a36 | |||
| 7c87dc5bca | |||
| 80e210f5bb | |||
| 5656f90792 | |||
| 55474011c9 | |||
| 24fe5fbd8c | |||
| be4663505a | |||
| aa6098e6a4 | |||
| 1221448029 | |||
| 3b91df2b18 | |||
| ca1b7d5f48 | |||
| c6b0423558 | |||
| 898f4e8ac2 | |||
| 259a6844bf | |||
| a664d2f5c4 | |||
| 105db00987 | |||
| 0e16a7fe55 | |||
| 4d3038a7b6 | |||
| 2176b2bbfc | |||
| 8e3591312a | |||
| 242c654075 | |||
| 0c21cbf01f | |||
| 772538ddba | |||
| 35fb3dd65a | |||
| 692f79452d | |||
| 8760937439 | |||
| 4ba3167f48 | |||
| e4f896e90d | |||
| 07fc25d285 | |||
| 55bc09ac33 | |||
| c43a45ea40 | |||
| 9cf7153b1d | |||
| c91785dd68 | |||
| 053e18e1a6 | |||
| a7e7c6d667 | |||
| f4c17c66ce | |||
| 1df389b9d0 | |||
| 5db71cb68c | |||
| 4efc8d404f | |||
| 4d4ddb3d3f | |||
| 979a461af5 | |||
| ac04f2704f | |||
| c4d273a68a | |||
| dc50a7fdfb | |||
| 5b633449f8 | |||
| 02569136df | |||
| 024ac0e464 | |||
| 19030928e0 | |||
| 092bf13f5e | |||
| fe2595a05c | |||
| 718dddde75 | |||
| 679ca657ee | |||
| fa96acdf4b | |||
| 90299e2710 | |||
| 7dc0c7d01f | |||
| 809b341350 | |||
| b1aabe88b8 | |||
| 654354c624 | |||
| eef0a6e2da | |||
| b107444878 | |||
| 16aa51c9b3 | |||
| 133ffe7174 | |||
| f88970985a | |||
| 6572fa5b75 | |||
| 194bab4691 | |||
| 35f141fc48 | |||
| 0b6fa8b9e1 | |||
| 140907ce1d | |||
| 52718b0f23 | |||
| 563383c60f | |||
| 1b74d84590 | |||
| 823f3af98c | |||
| 13664e99e7 | |||
| 60e0abfdb8 | |||
| 616caa92b1 | |||
| 31a3c9a3de | |||
| ad6d934a5f | |||
| 5350b2fb24 | |||
| 29817c3b34 | |||
| e5b149068c | |||
| 85b7ed3cec | |||
| 24805200f0 | |||
| 722a9c4753 | |||
| d1baf7212b | |||
| 0948c7a4e1 | |||
| c3170f22da | |||
| 1193ac64dc | |||
| ab41de2961 | |||
| 3b3e8e1b0b | |||
| 4004fb849f | |||
| f467e613b6 | |||
| f0dd8cb0d2 | |||
| 7643a46fca | |||
| c4da0e8ca9 | |||
| 3acdf79beb | |||
| 2d068cc075 | |||
| 88e535269e | |||
| 888f7bfb9d | |||
| 055e4df049 | |||
| 1ced6e977c | |||
| f5088ed70d | |||
| 55e78de6fc | |||
| dd30e609f7 | |||
| 5fd2c581f6 | |||
| d7a3eff23e | |||
| ee06440205 | |||
| 7c68dd4ad4 | |||
| 29575c32f9 | |||
| ed90a2ee9d | |||
| 993fb0ff9d | |||
| ca2fb95ee6 | |||
| 117fa9b05d | |||
| 28474c47cb | |||
| 8049785de6 | |||
| 9ca68ffaaa | |||
| 0ffe5a73c1 | |||
| d3b59a7931 | |||
| e5416b539a | |||
| 72d4347adb | |||
| a283d4a02d | |||
| 5f8dac66e6 | |||
| 8bb14fa1a7 | |||
| 2a150f5d4a | |||
| 1c0051c1db | |||
| 144c9b2464 | |||
| 163121d327 | |||
| 19809800f1 | |||
| 6473d38917 | |||
| 4ceb18c6e4 | |||
| bbd0866374 | |||
| fd310582bd | |||
| fb2d99fd86 | |||
| db82b59254 | |||
| ddfc988bef | |||
| 5ff230eafd | |||
| 46d0c329c1 | |||
| a2aba23962 | |||
| 6dbdd4674f | |||
| 83039fa22c | |||
| 3d4f9a88fe | |||
| 1694c616ef | |||
| c6cdf200ce | |||
| 9735d73b83 | |||
| 48565664e0 | |||
| 76fad8b08d | |||
| 5664b9d413 | |||
| 6de9c7b43f | |||
| c1366cf559 | |||
| ef711a48b3 | |||
| 952059eb51 | |||
| 8128a3bc57 | |||
| 636053fb6d | |||
| f56d0b4869 | |||
| a2cb38f62b | |||
| 3aab2445a6 | |||
| f8fb8d6fb1 | |||
| 2d1f90d5dc | |||
| 3a672b39c7 | |||
| df5339b5d0 | |||
| 0eb6550cf4 | |||
| 0a379602b8 | |||
| 1fb5acee39 | |||
| 82c3dbbc6b | |||
| e97c8c9943 | |||
| 68d44f6755 | |||
| c2ff59a5b1 | |||
| 2f3744f807 | |||
| 52c8c06cf2 | |||
| 0cdecf7b30 | |||
| 3e461d9d08 | |||
| cf43584d24 | |||
| 6ff60f2af1 | |||
| a3bfea631c | |||
| aae59a8ba8 | |||
| 3ff15423d6 | |||
| c2f7be37b3 | |||
| 09a9209724 | |||
| b356a13da5 | |||
| ac9a6ee6a2 | |||
| 64e0f5329a |
@@ -0,0 +1,181 @@
|
||||
---
|
||||
name: smoke-test
|
||||
description: End-to-end smoke test skill for DeerFlow. Guides through: 1) Pulling latest code, 2) Docker OR Local installation and deployment (user preference, default to Local if Docker network issues), 3) Service availability verification, 4) Health check, 5) Final test report. Use when the user says "run smoke test", "smoke test deployment", "verify installation", "test service availability", "end-to-end test", or similar.
|
||||
---
|
||||
|
||||
# DeerFlow Smoke Test Skill
|
||||
|
||||
This skill guides the Agent through DeerFlow's full end-to-end smoke test workflow, including code updates, deployment (supporting both Docker and local installation modes), service availability verification, and health checks.
|
||||
|
||||
## Deployment Mode Selection
|
||||
|
||||
This skill supports two deployment modes:
|
||||
- **Local installation mode** (recommended, especially when network issues occur) - Run all services directly on the local machine
|
||||
- **Docker mode** - Run all services inside Docker containers
|
||||
|
||||
**Selection strategy**:
|
||||
- If the user explicitly asks for Docker mode, use Docker
|
||||
- If network issues occur (such as slow image pulls), automatically switch to local mode
|
||||
- Default to local mode whenever possible
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
smoke-test/
|
||||
├── SKILL.md ← You are here - core workflow and logic
|
||||
├── scripts/
|
||||
│ ├── check_docker.sh ← Check the Docker environment
|
||||
│ ├── check_local_env.sh ← Check local environment dependencies
|
||||
│ ├── frontend_check.sh ← Frontend page smoke check
|
||||
│ ├── pull_code.sh ← Pull the latest code
|
||||
│ ├── deploy_docker.sh ← Docker deployment
|
||||
│ ├── deploy_local.sh ← Local deployment
|
||||
│ └── health_check.sh ← Service health check
|
||||
├── references/
|
||||
│ ├── SOP.md ← Standard operating procedure
|
||||
│ └── troubleshooting.md ← Troubleshooting guide
|
||||
└── templates/
|
||||
├── report.local.template.md ← Local mode smoke test report template
|
||||
└── report.docker.template.md ← Docker mode smoke test report template
|
||||
```
|
||||
|
||||
## Standard Operating Procedure (SOP)
|
||||
|
||||
### Phase 1: Code Update Check
|
||||
|
||||
1. **Confirm current directory** - Verify that the current working directory is the DeerFlow project root
|
||||
2. **Check Git status** - See whether there are uncommitted changes
|
||||
3. **Pull the latest code** - Use `git pull origin main` to get the latest updates
|
||||
4. **Confirm code update** - Verify that the latest code was pulled successfully
|
||||
|
||||
### Phase 2: Deployment Mode Selection and Environment Check
|
||||
|
||||
**Choose deployment mode**:
|
||||
- Ask for user preference, or choose automatically based on network conditions
|
||||
- Default to local installation mode
|
||||
|
||||
**Local mode environment check**:
|
||||
1. **Check Node.js version** - Requires 22+
|
||||
2. **Check pnpm** - Package manager
|
||||
3. **Check uv** - Python package manager
|
||||
4. **Check nginx** - Reverse proxy
|
||||
5. **Check required ports** - Confirm that ports 2026, 3000, 8001, and 2024 are not occupied
|
||||
|
||||
**Docker mode environment check** (if Docker is selected):
|
||||
1. **Check whether Docker is installed** - Run `docker --version`
|
||||
2. **Check Docker daemon status** - Run `docker info`
|
||||
3. **Check Docker Compose availability** - Run `docker compose version`
|
||||
4. **Check required ports** - Confirm that port 2026 is not occupied
|
||||
|
||||
### Phase 3: Configuration Preparation
|
||||
|
||||
1. **Check whether config.yaml exists**
|
||||
- If it does not exist, run `make config` to generate it
|
||||
- If it already exists, check whether it needs an upgrade with `make config-upgrade`
|
||||
2. **Check the .env file**
|
||||
- Verify that required environment variables are configured
|
||||
- Especially model API keys such as `OPENAI_API_KEY`
|
||||
|
||||
### Phase 4: Deployment Execution
|
||||
|
||||
**Local mode deployment**:
|
||||
1. **Check dependencies** - Run `make check`
|
||||
2. **Install dependencies** - Run `make install`
|
||||
3. **(Optional) Pre-pull the sandbox image** - If needed, run `make setup-sandbox`
|
||||
4. **Start services** - Run `make dev-daemon` (background mode, recommended) or `make dev` (foreground mode)
|
||||
5. **Wait for startup** - Give all services enough time to start completely (90-120 seconds recommended)
|
||||
|
||||
**Docker mode deployment** (if Docker is selected):
|
||||
1. **Initialize Docker environment** - Run `make docker-init`
|
||||
2. **Start Docker services** - Run `make docker-start`
|
||||
3. **Wait for startup** - Give all containers enough time to start completely (60 seconds recommended)
|
||||
|
||||
### Phase 5: Service Health Check
|
||||
|
||||
**Local mode health check**:
|
||||
1. **Check process status** - Confirm that LangGraph, Gateway, Frontend, and Nginx processes are all running
|
||||
2. **Check frontend service** - Visit `http://localhost:2026` and verify that the page loads
|
||||
3. **Check API Gateway** - Verify the `http://localhost:2026/health` endpoint
|
||||
4. **Check LangGraph service** - Verify the availability of relevant endpoints
|
||||
5. **Frontend route smoke check** - Run `bash .agent/skills/smoke-test/scripts/frontend_check.sh` to verify key routes under `/workspace`
|
||||
|
||||
**Docker mode health check** (when using Docker):
|
||||
1. **Check container status** - Run `docker ps` and confirm that all containers are running
|
||||
2. **Check frontend service** - Visit `http://localhost:2026` and verify that the page loads
|
||||
3. **Check API Gateway** - Verify the `http://localhost:2026/health` endpoint
|
||||
4. **Check LangGraph service** - Verify the availability of relevant endpoints
|
||||
5. **Frontend route smoke check** - Run `bash .agent/skills/smoke-test/scripts/frontend_check.sh` to verify key routes under `/workspace`
|
||||
|
||||
### Optional Functional Verification
|
||||
|
||||
1. **List available models** - Verify that model configuration loads correctly
|
||||
2. **List available skills** - Verify that the skill directory is mounted correctly
|
||||
3. **Simple chat test** - Send a simple message to verify the end-to-end flow
|
||||
|
||||
### Phase 6: Generate Test Report
|
||||
|
||||
1. **Collect all test results** - Summarize execution status for each phase
|
||||
2. **Record encountered issues** - If anything fails, record the error details
|
||||
3. **Generate the final report** - Use the template that matches the selected deployment mode to create the complete test report, including overall conclusion, detailed key test cases, and explicit frontend page / route results
|
||||
4. **Provide follow-up recommendations** - Offer suggestions based on the test results
|
||||
|
||||
## Execution Rules
|
||||
|
||||
- **Follow the sequence** - Execute strictly in the order described above
|
||||
- **Idempotency** - Every step should be safe to repeat
|
||||
- **Error handling** - If a step fails, stop and report the issue, then provide troubleshooting suggestions
|
||||
- **Detailed logging** - Record the execution result and status of each step
|
||||
- **User confirmation** - Ask for confirmation before potentially risky operations such as overwriting config
|
||||
- **Mode preference** - Prefer local mode to avoid network-related issues
|
||||
- **Template requirement** - The final report must use the matching template under `templates/`; do not output a free-form summary instead of the template-based report
|
||||
- **Report clarity** - The execution summary must include the overall pass/fail conclusion plus per-case result explanations, and frontend smoke check results must be listed explicitly in the report
|
||||
- **Optional phase handling** - If functional verification is not executed, do not present it as a separate skipped phase in the final report
|
||||
|
||||
## Known Acceptable Warnings
|
||||
|
||||
The following warnings can appear during smoke testing and do not block a successful result:
|
||||
- Feishu/Lark SSL errors in Gateway logs (certificate verification failure) can be ignored if that channel is not enabled
|
||||
- Warnings in LangGraph logs about missing methods in the custom checkpointer, such as `adelete_for_runs` or `aprune`, do not affect the core functionality
|
||||
|
||||
## Key Tools
|
||||
|
||||
Use the following tools during execution:
|
||||
|
||||
1. **bash** - Run shell commands
|
||||
2. **present_file** - Show generated reports and important files
|
||||
3. **task_tool** - Organize complex steps with subtasks when needed
|
||||
|
||||
## Success Criteria
|
||||
|
||||
Smoke test pass criteria (local mode):
|
||||
- [x] Latest code is pulled successfully
|
||||
- [x] Local environment check passes (Node.js 22+, pnpm, uv, nginx)
|
||||
- [x] Configuration files are set up correctly
|
||||
- [x] `make check` passes
|
||||
- [x] `make install` completes successfully
|
||||
- [x] `make dev` starts successfully
|
||||
- [x] All service processes run normally
|
||||
- [x] Frontend page is accessible
|
||||
- [x] Frontend route smoke check passes (`/workspace` key routes)
|
||||
- [x] API Gateway health check passes
|
||||
- [x] Test report is generated completely
|
||||
|
||||
Smoke test pass criteria (Docker mode):
|
||||
- [x] Latest code is pulled successfully
|
||||
- [x] Docker environment check passes
|
||||
- [x] Configuration files are set up correctly
|
||||
- [x] `make docker-init` completes successfully
|
||||
- [x] `make docker-start` completes successfully
|
||||
- [x] All Docker containers run normally
|
||||
- [x] Frontend page is accessible
|
||||
- [x] Frontend route smoke check passes (`/workspace` key routes)
|
||||
- [x] API Gateway health check passes
|
||||
- [x] Test report is generated completely
|
||||
|
||||
## Read Reference Files
|
||||
|
||||
Before starting execution, read the following reference files:
|
||||
1. `references/SOP.md` - Detailed step-by-step operating instructions
|
||||
2. `references/troubleshooting.md` - Common issues and solutions
|
||||
3. `templates/report.local.template.md` - Local mode test report template
|
||||
4. `templates/report.docker.template.md` - Docker mode test report template
|
||||
@@ -0,0 +1,452 @@
|
||||
# DeerFlow Smoke Test Standard Operating Procedure (SOP)
|
||||
|
||||
This document describes the detailed operating steps for each phase of the DeerFlow smoke test.
|
||||
|
||||
## Phase 1: Code Update Check
|
||||
|
||||
### 1.1 Confirm Current Directory
|
||||
|
||||
**Objective**: Verify that the current working directory is the DeerFlow project root.
|
||||
|
||||
**Steps**:
|
||||
1. Run `pwd` to view the current working directory
|
||||
2. Check whether the directory contains the following files/directories:
|
||||
- `Makefile`
|
||||
- `backend/`
|
||||
- `frontend/`
|
||||
- `config.example.yaml`
|
||||
|
||||
**Success Criteria**: The current directory contains all of the files/directories listed above.
|
||||
|
||||
---
|
||||
|
||||
### 1.2 Check Git Status
|
||||
|
||||
**Objective**: Check whether there are uncommitted changes.
|
||||
|
||||
**Steps**:
|
||||
1. Run `git status`
|
||||
2. Check whether the output includes "Changes not staged for commit" or "Untracked files"
|
||||
|
||||
**Notes**:
|
||||
- If there are uncommitted changes, recommend that the user commit or stash them first to avoid conflicts while pulling
|
||||
- If the user confirms that they want to continue, this step can be skipped
|
||||
|
||||
---
|
||||
|
||||
### 1.3 Pull the Latest Code
|
||||
|
||||
**Objective**: Fetch the latest code updates.
|
||||
|
||||
**Steps**:
|
||||
1. Run `git fetch origin main`
|
||||
2. Run `git pull origin main`
|
||||
|
||||
**Success Criteria**:
|
||||
- The commands succeed without errors
|
||||
- The output shows "Already up to date" or indicates that new commits were pulled successfully
|
||||
|
||||
---
|
||||
|
||||
### 1.4 Confirm Code Update
|
||||
|
||||
**Objective**: Verify that the latest code was pulled successfully.
|
||||
|
||||
**Steps**:
|
||||
1. Run `git log -1 --oneline` to view the latest commit
|
||||
2. Record the commit hash and message
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Deployment Mode Selection and Environment Check
|
||||
|
||||
### 2.1 Choose Deployment Mode
|
||||
|
||||
**Objective**: Decide whether to use local mode or Docker mode.
|
||||
|
||||
**Decision Flow**:
|
||||
1. Prefer local mode first to avoid network-related issues
|
||||
2. If the user explicitly requests Docker, use Docker
|
||||
3. If Docker network issues occur, switch to local mode automatically
|
||||
|
||||
---
|
||||
|
||||
### 2.2 Local Mode Environment Check
|
||||
|
||||
**Objective**: Verify that local development environment dependencies are satisfied.
|
||||
|
||||
#### 2.2.1 Check Node.js Version
|
||||
|
||||
**Steps**:
|
||||
1. If nvm is used, run `nvm use 22` to switch to Node 22+
|
||||
2. Run `node --version`
|
||||
|
||||
**Success Criteria**: Version >= 22.x
|
||||
|
||||
**Failure Handling**:
|
||||
- If the version is too low, ask the user to install/switch Node.js with nvm:
|
||||
```bash
|
||||
nvm install 22
|
||||
nvm use 22
|
||||
```
|
||||
- Or install it from the official website: https://nodejs.org/
|
||||
|
||||
---
|
||||
|
||||
#### 2.2.2 Check pnpm
|
||||
|
||||
**Steps**:
|
||||
1. Run `pnpm --version`
|
||||
|
||||
**Success Criteria**: The command returns pnpm version information.
|
||||
|
||||
**Failure Handling**:
|
||||
- If pnpm is not installed, ask the user to install it with `npm install -g pnpm`
|
||||
|
||||
---
|
||||
|
||||
#### 2.2.3 Check uv
|
||||
|
||||
**Steps**:
|
||||
1. Run `uv --version`
|
||||
|
||||
**Success Criteria**: The command returns uv version information.
|
||||
|
||||
**Failure Handling**:
|
||||
- If uv is not installed, ask the user to install uv
|
||||
|
||||
---
|
||||
|
||||
#### 2.2.4 Check nginx
|
||||
|
||||
**Steps**:
|
||||
1. Run `nginx -v`
|
||||
|
||||
**Success Criteria**: The command returns nginx version information.
|
||||
|
||||
**Failure Handling**:
|
||||
- macOS: install with Homebrew using `brew install nginx`
|
||||
- Linux: install using the system package manager
|
||||
|
||||
---
|
||||
|
||||
#### 2.2.5 Check Required Ports
|
||||
|
||||
**Steps**:
|
||||
1. Run the following commands to check ports:
|
||||
```bash
|
||||
lsof -i :2026 # Main port
|
||||
lsof -i :3000 # Frontend
|
||||
lsof -i :8001 # Gateway
|
||||
lsof -i :2024 # LangGraph
|
||||
```
|
||||
|
||||
**Success Criteria**: All ports are free, or they are occupied only by DeerFlow-related processes.
|
||||
|
||||
**Failure Handling**:
|
||||
- If a port is occupied, ask the user to stop the related process
|
||||
|
||||
---
|
||||
|
||||
### 2.3 Docker Mode Environment Check (If Docker Is Selected)
|
||||
|
||||
#### 2.3.1 Check Whether Docker Is Installed
|
||||
|
||||
**Steps**:
|
||||
1. Run `docker --version`
|
||||
|
||||
**Success Criteria**: The command returns Docker version information, such as "Docker version 24.x.x".
|
||||
|
||||
---
|
||||
|
||||
#### 2.3.2 Check Docker Daemon Status
|
||||
|
||||
**Steps**:
|
||||
1. Run `docker info`
|
||||
|
||||
**Success Criteria**: The command runs successfully and shows Docker system information.
|
||||
|
||||
**Failure Handling**:
|
||||
- If it fails, ask the user to start Docker Desktop or the Docker service
|
||||
|
||||
---
|
||||
|
||||
#### 2.3.3 Check Docker Compose Availability
|
||||
|
||||
**Steps**:
|
||||
1. Run `docker compose version`
|
||||
|
||||
**Success Criteria**: The command returns Docker Compose version information.
|
||||
|
||||
---
|
||||
|
||||
#### 2.3.4 Check Required Ports
|
||||
|
||||
**Steps**:
|
||||
1. Run `lsof -i :2026` (macOS/Linux) or `netstat -ano | findstr :2026` (Windows)
|
||||
|
||||
**Success Criteria**: Port 2026 is free, or it is occupied only by a DeerFlow-related process.
|
||||
|
||||
**Failure Handling**:
|
||||
- If the port is occupied by another process, ask the user to stop that process or change the configuration
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Configuration Preparation
|
||||
|
||||
### 3.1 Check config.yaml
|
||||
|
||||
**Steps**:
|
||||
1. Check whether `config.yaml` exists
|
||||
2. If it does not exist, run `make config`
|
||||
3. If it already exists, consider running `make config-upgrade` to merge new fields
|
||||
|
||||
**Validation**:
|
||||
- Check whether at least one model is configured in config.yaml
|
||||
- Check whether the model configuration references the correct environment variables
|
||||
|
||||
---
|
||||
|
||||
### 3.2 Check the .env File
|
||||
|
||||
**Steps**:
|
||||
1. Check whether the `.env` file exists
|
||||
2. If it does not exist, copy it from `.env.example`
|
||||
3. Check whether the following environment variables are configured:
|
||||
- `OPENAI_API_KEY` (or other model API keys)
|
||||
- Other required settings
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Deployment Execution
|
||||
|
||||
### 4.1 Local Mode Deployment
|
||||
|
||||
#### 4.1.1 Check Dependencies
|
||||
|
||||
**Steps**:
|
||||
1. Run `make check`
|
||||
|
||||
**Description**: This command validates all required tools (Node.js 22+, pnpm, uv, nginx).
|
||||
|
||||
---
|
||||
|
||||
#### 4.1.2 Install Dependencies
|
||||
|
||||
**Steps**:
|
||||
1. Run `make install`
|
||||
|
||||
**Description**: This command installs both backend and frontend dependencies.
|
||||
|
||||
**Notes**:
|
||||
- This step may take some time
|
||||
- If network issues cause failures, try using a closer or mirrored package registry
|
||||
|
||||
---
|
||||
|
||||
#### 4.1.3 (Optional) Pre-pull the Sandbox Image
|
||||
|
||||
**Steps**:
|
||||
1. If Docker / Container sandbox is used, run `make setup-sandbox`
|
||||
|
||||
**Description**: This step is optional and not needed for local sandbox mode.
|
||||
|
||||
---
|
||||
|
||||
#### 4.1.4 Start Services
|
||||
|
||||
**Steps**:
|
||||
1. Run `make dev-daemon` (background mode)
|
||||
|
||||
**Description**: This command starts all services (LangGraph, Gateway, Frontend, Nginx).
|
||||
|
||||
**Notes**:
|
||||
- `make dev` runs in the foreground and stops with Ctrl+C
|
||||
- `make dev-daemon` runs in the background
|
||||
- Use `make stop` to stop services
|
||||
|
||||
---
|
||||
|
||||
#### 4.1.5 Wait for Services to Start
|
||||
|
||||
**Steps**:
|
||||
1. Wait 90-120 seconds for all services to start completely
|
||||
2. You can monitor startup progress by checking these log files:
|
||||
- `logs/langgraph.log`
|
||||
- `logs/gateway.log`
|
||||
- `logs/frontend.log`
|
||||
- `logs/nginx.log`
|
||||
|
||||
---
|
||||
|
||||
### 4.2 Docker Mode Deployment (If Docker Is Selected)
|
||||
|
||||
#### 4.2.1 Initialize the Docker Environment
|
||||
|
||||
**Steps**:
|
||||
1. Run `make docker-init`
|
||||
|
||||
**Description**: This command pulls the sandbox image if needed.
|
||||
|
||||
---
|
||||
|
||||
#### 4.2.2 Start Docker Services
|
||||
|
||||
**Steps**:
|
||||
1. Run `make docker-start`
|
||||
|
||||
**Description**: This command builds and starts all required Docker containers.
|
||||
|
||||
---
|
||||
|
||||
#### 4.2.3 Wait for Services to Start
|
||||
|
||||
**Steps**:
|
||||
1. Wait 60-90 seconds for all services to start completely
|
||||
2. You can run `make docker-logs` to monitor startup progress
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Service Health Check
|
||||
|
||||
### 5.1 Local Mode Health Check
|
||||
|
||||
#### 5.1.1 Check Process Status
|
||||
|
||||
**Steps**:
|
||||
1. Run the following command to check processes:
|
||||
```bash
|
||||
ps aux | grep -E "(langgraph|uvicorn|next|nginx)" | grep -v grep
|
||||
```
|
||||
|
||||
**Success Criteria**: Confirm that the following processes are running:
|
||||
- LangGraph (`langgraph dev`)
|
||||
- Gateway (`uvicorn app.gateway.app:app`)
|
||||
- Frontend (`next dev` or `next start`)
|
||||
- Nginx (`nginx`)
|
||||
|
||||
---
|
||||
|
||||
#### 5.1.2 Check Frontend Service
|
||||
|
||||
**Steps**:
|
||||
1. Use curl or a browser to visit `http://localhost:2026`
|
||||
2. Verify that the page loads normally
|
||||
|
||||
**Example curl command**:
|
||||
```bash
|
||||
curl -I http://localhost:2026
|
||||
```
|
||||
|
||||
**Success Criteria**: Returns an HTTP 200 status code.
|
||||
|
||||
---
|
||||
|
||||
#### 5.1.3 Check API Gateway
|
||||
|
||||
**Steps**:
|
||||
1. Visit `http://localhost:2026/health`
|
||||
|
||||
**Example curl command**:
|
||||
```bash
|
||||
curl http://localhost:2026/health
|
||||
```
|
||||
|
||||
**Success Criteria**: Returns health status JSON.
|
||||
|
||||
---
|
||||
|
||||
#### 5.1.4 Check LangGraph Service
|
||||
|
||||
**Steps**:
|
||||
1. Visit relevant LangGraph endpoints to verify availability
|
||||
|
||||
---
|
||||
|
||||
### 5.2 Docker Mode Health Check (When Using Docker)
|
||||
|
||||
#### 5.2.1 Check Container Status
|
||||
|
||||
**Steps**:
|
||||
1. Run `docker ps`
|
||||
2. Confirm that the following containers are running:
|
||||
- `deer-flow-nginx`
|
||||
- `deer-flow-frontend`
|
||||
- `deer-flow-gateway`
|
||||
- `deer-flow-langgraph` (if not in gateway mode)
|
||||
|
||||
---
|
||||
|
||||
#### 5.2.2 Check Frontend Service
|
||||
|
||||
**Steps**:
|
||||
1. Use curl or a browser to visit `http://localhost:2026`
|
||||
2. Verify that the page loads normally
|
||||
|
||||
**Example curl command**:
|
||||
```bash
|
||||
curl -I http://localhost:2026
|
||||
```
|
||||
|
||||
**Success Criteria**: Returns an HTTP 200 status code.
|
||||
|
||||
---
|
||||
|
||||
#### 5.2.3 Check API Gateway
|
||||
|
||||
**Steps**:
|
||||
1. Visit `http://localhost:2026/health`
|
||||
|
||||
**Example curl command**:
|
||||
```bash
|
||||
curl http://localhost:2026/health
|
||||
```
|
||||
|
||||
**Success Criteria**: Returns health status JSON.
|
||||
|
||||
---
|
||||
|
||||
#### 5.2.4 Check LangGraph Service
|
||||
|
||||
**Steps**:
|
||||
1. Visit relevant LangGraph endpoints to verify availability
|
||||
|
||||
---
|
||||
|
||||
## Optional Functional Verification
|
||||
|
||||
### 6.1 List Available Models
|
||||
|
||||
**Steps**: Verify the model list through the API or UI.
|
||||
|
||||
---
|
||||
|
||||
### 6.2 List Available Skills
|
||||
|
||||
**Steps**: Verify the skill list through the API or UI.
|
||||
|
||||
---
|
||||
|
||||
### 6.3 Simple Chat Test
|
||||
|
||||
**Steps**: Send a simple message to test the complete workflow.
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: Generate the Test Report
|
||||
|
||||
### 6.1 Collect Test Results
|
||||
|
||||
Summarize the execution status of each phase and record successful and failed items.
|
||||
|
||||
### 6.2 Record Issues
|
||||
|
||||
If anything fails, record detailed error information.
|
||||
|
||||
### 6.3 Generate the Report
|
||||
|
||||
Use the template to create a complete test report.
|
||||
|
||||
### 6.4 Provide Recommendations
|
||||
|
||||
Provide follow-up recommendations based on the test results.
|
||||
@@ -0,0 +1,612 @@
|
||||
# Troubleshooting Guide
|
||||
|
||||
This document lists common issues encountered during DeerFlow smoke testing and how to resolve them.
|
||||
|
||||
## Code Update Issues
|
||||
|
||||
### Issue: `git pull` Fails with a Merge Conflict Warning
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
error: Your local changes to the following files would be overwritten by merge
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Option A: Commit local changes first
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "Save local changes"
|
||||
git pull origin main
|
||||
```
|
||||
|
||||
2. Option B: Stash local changes
|
||||
```bash
|
||||
git stash
|
||||
git pull origin main
|
||||
git stash pop # Restore changes later if needed
|
||||
```
|
||||
|
||||
3. Option C: Discard local changes (use with caution)
|
||||
```bash
|
||||
git reset --hard HEAD
|
||||
git pull origin main
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Local Mode Environment Issues
|
||||
|
||||
### Issue: Node.js Version Is Too Old
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
Node.js version is too old. Requires 22+, got x.x.x
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Install or upgrade Node.js with nvm:
|
||||
```bash
|
||||
nvm install 22
|
||||
nvm use 22
|
||||
```
|
||||
|
||||
2. Or download and install it from the official website: https://nodejs.org/
|
||||
|
||||
3. Verify the version:
|
||||
```bash
|
||||
node --version
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: pnpm Is Not Installed
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
command not found: pnpm
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Install pnpm with npm:
|
||||
```bash
|
||||
npm install -g pnpm
|
||||
```
|
||||
|
||||
2. Or use the official installation script:
|
||||
```bash
|
||||
curl -fsSL https://get.pnpm.io/install.sh | sh -
|
||||
```
|
||||
|
||||
3. Verify the installation:
|
||||
```bash
|
||||
pnpm --version
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: uv Is Not Installed
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
command not found: uv
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Use the official installation script:
|
||||
```bash
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
```
|
||||
|
||||
2. macOS users can also install it with Homebrew:
|
||||
```bash
|
||||
brew install uv
|
||||
```
|
||||
|
||||
3. Verify the installation:
|
||||
```bash
|
||||
uv --version
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: nginx Is Not Installed
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
command not found: nginx
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. macOS (Homebrew):
|
||||
```bash
|
||||
brew install nginx
|
||||
```
|
||||
|
||||
2. Ubuntu/Debian:
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install nginx
|
||||
```
|
||||
|
||||
3. CentOS/RHEL:
|
||||
```bash
|
||||
sudo yum install nginx
|
||||
```
|
||||
|
||||
4. Verify the installation:
|
||||
```bash
|
||||
nginx -v
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: Port Is Already in Use
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
Error: listen EADDRINUSE: address already in use :::2026
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Find the process using the port:
|
||||
```bash
|
||||
lsof -i :2026 # macOS/Linux
|
||||
netstat -ano | findstr :2026 # Windows
|
||||
```
|
||||
|
||||
2. Stop that process:
|
||||
```bash
|
||||
kill -9 <PID> # macOS/Linux
|
||||
taskkill /PID <PID> /F # Windows
|
||||
```
|
||||
|
||||
3. Or stop DeerFlow services first:
|
||||
```bash
|
||||
make stop
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Local Mode Dependency Installation Issues
|
||||
|
||||
### Issue: `make install` Fails Due to Network Timeout
|
||||
|
||||
**Symptoms**:
|
||||
Network timeouts or connection failures occur during dependency installation.
|
||||
|
||||
**Solutions**:
|
||||
1. Configure pnpm to use a mirror registry:
|
||||
```bash
|
||||
pnpm config set registry https://registry.npmmirror.com
|
||||
```
|
||||
|
||||
2. Configure uv to use a mirror registry:
|
||||
```bash
|
||||
uv pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
3. Retry the installation:
|
||||
```bash
|
||||
make install
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: Python Dependency Installation Fails
|
||||
|
||||
**Symptoms**:
|
||||
Errors occur during `uv sync`.
|
||||
|
||||
**Solutions**:
|
||||
1. Clean the uv cache:
|
||||
```bash
|
||||
cd backend
|
||||
uv cache clean
|
||||
```
|
||||
|
||||
2. Resync dependencies:
|
||||
```bash
|
||||
cd backend
|
||||
uv sync
|
||||
```
|
||||
|
||||
3. View detailed error logs:
|
||||
```bash
|
||||
cd backend
|
||||
uv sync --verbose
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: Frontend Dependency Installation Fails
|
||||
|
||||
**Symptoms**:
|
||||
Errors occur during `pnpm install`.
|
||||
|
||||
**Solutions**:
|
||||
1. Clean the pnpm cache:
|
||||
```bash
|
||||
cd frontend
|
||||
pnpm store prune
|
||||
```
|
||||
|
||||
2. Remove node_modules and the lock file:
|
||||
```bash
|
||||
cd frontend
|
||||
rm -rf node_modules pnpm-lock.yaml
|
||||
```
|
||||
|
||||
3. Reinstall:
|
||||
```bash
|
||||
cd frontend
|
||||
pnpm install
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Local Mode Service Startup Issues
|
||||
|
||||
### Issue: Services Exit Immediately After Startup
|
||||
|
||||
**Symptoms**:
|
||||
Processes exit quickly after running `make dev-daemon`.
|
||||
|
||||
**Solutions**:
|
||||
1. Check log files:
|
||||
```bash
|
||||
tail -f logs/langgraph.log
|
||||
tail -f logs/gateway.log
|
||||
tail -f logs/frontend.log
|
||||
tail -f logs/nginx.log
|
||||
```
|
||||
|
||||
2. Check whether config.yaml is configured correctly
|
||||
3. Check environment variables in the .env file
|
||||
4. Confirm that required ports are not occupied
|
||||
5. Stop all services and restart:
|
||||
```bash
|
||||
make stop
|
||||
make dev-daemon
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: Nginx Fails to Start Because Temp Directories Do Not Exist
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
nginx: [emerg] mkdir() "/opt/homebrew/var/run/nginx/client_body_temp" failed (2: No such file or directory)
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
Add local temp directory configuration to `docker/nginx/nginx.local.conf` so nginx uses the repository's temp directory.
|
||||
|
||||
Add the following at the beginning of the `http` block:
|
||||
```nginx
|
||||
client_body_temp_path temp/client_body_temp;
|
||||
proxy_temp_path temp/proxy_temp;
|
||||
fastcgi_temp_path temp/fastcgi_temp;
|
||||
uwsgi_temp_path temp/uwsgi_temp;
|
||||
scgi_temp_path temp/scgi_temp;
|
||||
```
|
||||
|
||||
Note: The `temp/` directory under the repository root is created automatically by `make dev` or `make dev-daemon`.
|
||||
|
||||
---
|
||||
|
||||
### Issue: Nginx Fails to Start (General)
|
||||
|
||||
**Symptoms**:
|
||||
The nginx process fails to start or reports an error.
|
||||
|
||||
**Solutions**:
|
||||
1. Check the nginx configuration:
|
||||
```bash
|
||||
nginx -t -c docker/nginx/nginx.local.conf -p .
|
||||
```
|
||||
|
||||
2. Check nginx logs:
|
||||
```bash
|
||||
tail -f logs/nginx.log
|
||||
```
|
||||
|
||||
3. Ensure no other nginx process is running:
|
||||
```bash
|
||||
ps aux | grep nginx
|
||||
```
|
||||
|
||||
4. If needed, stop existing nginx processes:
|
||||
```bash
|
||||
pkill -9 nginx
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: Frontend Compilation Fails
|
||||
|
||||
**Symptoms**:
|
||||
Compilation errors appear in `frontend.log`.
|
||||
|
||||
**Solutions**:
|
||||
1. Check frontend logs:
|
||||
```bash
|
||||
tail -f logs/frontend.log
|
||||
```
|
||||
|
||||
2. Check whether Node.js version is 22+
|
||||
3. Reinstall frontend dependencies:
|
||||
```bash
|
||||
cd frontend
|
||||
rm -rf node_modules .next
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. Restart services:
|
||||
```bash
|
||||
make stop
|
||||
make dev-daemon
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Issue: Gateway Fails to Start
|
||||
|
||||
**Symptoms**:
|
||||
Errors appear in `gateway.log`.
|
||||
|
||||
**Solutions**:
|
||||
1. Check gateway logs:
|
||||
```bash
|
||||
tail -f logs/gateway.log
|
||||
```
|
||||
|
||||
2. Check whether config.yaml exists and has valid formatting
|
||||
3. Check whether Python dependencies are complete:
|
||||
```bash
|
||||
cd backend
|
||||
uv sync
|
||||
```
|
||||
|
||||
4. Confirm that the LangGraph service is running normally (if not in gateway mode)
|
||||
|
||||
---
|
||||
|
||||
### Issue: LangGraph Fails to Start
|
||||
|
||||
**Symptoms**:
|
||||
Errors appear in `langgraph.log`.
|
||||
|
||||
**Solutions**:
|
||||
1. Check LangGraph logs:
|
||||
```bash
|
||||
tail -f logs/langgraph.log
|
||||
```
|
||||
|
||||
2. Check config.yaml
|
||||
3. Check whether Python dependencies are complete
|
||||
4. Confirm that port 2024 is not occupied
|
||||
|
||||
---
|
||||
|
||||
## Docker-Related Issues
|
||||
|
||||
### Issue: Docker Commands Cannot Run
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
Cannot connect to the Docker daemon
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Confirm that Docker Desktop is running
|
||||
2. macOS: check whether the Docker icon appears in the top menu bar
|
||||
3. Linux: run `sudo systemctl start docker`
|
||||
4. Run `docker info` again to verify
|
||||
|
||||
---
|
||||
|
||||
### Issue: `make docker-init` Fails to Pull the Image
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
Error pulling image: connection refused
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Check network connectivity
|
||||
2. Configure a Docker image mirror if needed
|
||||
3. Check whether a proxy is required
|
||||
4. Switch to local installation mode if necessary (recommended)
|
||||
|
||||
---
|
||||
|
||||
## Configuration File Issues
|
||||
|
||||
### Issue: config.yaml Is Missing or Invalid
|
||||
|
||||
**Symptoms**:
|
||||
```
|
||||
Error: could not read config.yaml
|
||||
```
|
||||
|
||||
**Solutions**:
|
||||
1. Regenerate the configuration file:
|
||||
```bash
|
||||
make config
|
||||
```
|
||||
|
||||
2. Check YAML syntax:
|
||||
- Make sure indentation is correct (use 2 spaces)
|
||||
- Make sure there are no tab characters
|
||||
- Check that there is a space after each colon
|
||||
|
||||
3. Use a YAML validation tool to check the format
|
||||
|
||||
---
|
||||
|
||||
### Issue: Model API Key Is Not Configured
|
||||
|
||||
**Symptoms**:
|
||||
After services start, API requests fail with authentication errors.
|
||||
|
||||
**Solutions**:
|
||||
1. Edit the .env file and add the API key:
|
||||
```bash
|
||||
OPENAI_API_KEY=your-actual-api-key-here
|
||||
```
|
||||
|
||||
2. Restart services (local mode):
|
||||
```bash
|
||||
make stop
|
||||
make dev-daemon
|
||||
```
|
||||
|
||||
3. Restart services (Docker mode):
|
||||
```bash
|
||||
make docker-stop
|
||||
make docker-start
|
||||
```
|
||||
|
||||
4. Confirm that the model configuration in config.yaml references the environment variable correctly
|
||||
|
||||
---
|
||||
|
||||
## Service Health Check Issues
|
||||
|
||||
### Issue: Frontend Page Is Not Accessible
|
||||
|
||||
**Symptoms**:
|
||||
The browser shows a connection failure when visiting http://localhost:2026.
|
||||
|
||||
**Solutions** (local mode):
|
||||
1. Confirm that the nginx process is running:
|
||||
```bash
|
||||
ps aux | grep nginx
|
||||
```
|
||||
|
||||
2. Check nginx logs:
|
||||
```bash
|
||||
tail -f logs/nginx.log
|
||||
```
|
||||
|
||||
3. Check firewall settings
|
||||
|
||||
**Solutions** (Docker mode):
|
||||
1. Confirm that the nginx container is running:
|
||||
```bash
|
||||
docker ps | grep nginx
|
||||
```
|
||||
|
||||
2. Check nginx logs:
|
||||
```bash
|
||||
cd docker && docker compose -p deer-flow-dev -f docker-compose-dev.yaml logs nginx
|
||||
```
|
||||
|
||||
3. Check firewall settings
|
||||
|
||||
---
|
||||
|
||||
### Issue: API Gateway Health Check Fails
|
||||
|
||||
**Symptoms**:
|
||||
Accessing `/health` returns an error or times out.
|
||||
|
||||
**Solutions** (local mode):
|
||||
1. Check gateway logs:
|
||||
```bash
|
||||
tail -f logs/gateway.log
|
||||
```
|
||||
|
||||
2. Confirm that config.yaml exists and has valid formatting
|
||||
3. Check whether Python dependencies are complete
|
||||
4. Confirm that the LangGraph service is running normally
|
||||
|
||||
**Solutions** (Docker mode):
|
||||
1. Check gateway container logs:
|
||||
```bash
|
||||
make docker-logs-gateway
|
||||
```
|
||||
|
||||
2. Confirm that config.yaml is mounted correctly
|
||||
3. Check whether Python dependencies are complete
|
||||
4. Confirm that the LangGraph service is running normally
|
||||
|
||||
---
|
||||
|
||||
## Common Diagnostic Commands
|
||||
|
||||
### Local Mode Diagnostics
|
||||
|
||||
#### View All Service Processes
|
||||
```bash
|
||||
ps aux | grep -E "(langgraph|uvicorn|next|nginx)" | grep -v grep
|
||||
```
|
||||
|
||||
#### View Service Logs
|
||||
```bash
|
||||
# View all logs
|
||||
tail -f logs/*.log
|
||||
|
||||
# View specific service logs
|
||||
tail -f logs/langgraph.log
|
||||
tail -f logs/gateway.log
|
||||
tail -f logs/frontend.log
|
||||
tail -f logs/nginx.log
|
||||
```
|
||||
|
||||
#### Stop All Services
|
||||
```bash
|
||||
make stop
|
||||
```
|
||||
|
||||
#### Fully Reset the Local Environment
|
||||
```bash
|
||||
make stop
|
||||
make clean
|
||||
make config
|
||||
make install
|
||||
make dev-daemon
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Docker Mode Diagnostics
|
||||
|
||||
#### View All Container Status
|
||||
```bash
|
||||
docker ps -a
|
||||
```
|
||||
|
||||
#### View Container Resource Usage
|
||||
```bash
|
||||
docker stats
|
||||
```
|
||||
|
||||
#### Enter a Container for Debugging
|
||||
```bash
|
||||
docker exec -it deer-flow-gateway sh
|
||||
```
|
||||
|
||||
#### Clean Up All DeerFlow-Related Containers and Images
|
||||
```bash
|
||||
make docker-stop
|
||||
cd docker && docker compose -p deer-flow-dev -f docker-compose-dev.yaml down -v
|
||||
```
|
||||
|
||||
#### Fully Reset the Docker Environment
|
||||
```bash
|
||||
make docker-stop
|
||||
make clean
|
||||
make config
|
||||
make docker-init
|
||||
make docker-start
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Get More Help
|
||||
|
||||
If the solutions above do not resolve the issue:
|
||||
1. Check the GitHub issues for the project: https://github.com/bytedance/deer-flow/issues
|
||||
2. Review the project documentation: README.md and the `backend/docs/` directory
|
||||
3. Open a new issue and include detailed error logs
|
||||
+80
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
echo "=========================================="
|
||||
echo " Checking Docker Environment"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
# Check whether Docker is installed
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
echo "✓ Docker is installed"
|
||||
docker --version
|
||||
else
|
||||
echo "✗ Docker is not installed"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check the Docker daemon
|
||||
if docker info >/dev/null 2>&1; then
|
||||
echo "✓ Docker daemon is running normally"
|
||||
else
|
||||
echo "✗ Docker daemon is not running"
|
||||
echo " Please start Docker Desktop or the Docker service"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check Docker Compose
|
||||
if docker compose version >/dev/null 2>&1; then
|
||||
echo "✓ Docker Compose is available"
|
||||
docker compose version
|
||||
else
|
||||
echo "✗ Docker Compose is not available"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check port 2026
|
||||
if ! command -v lsof >/dev/null 2>&1; then
|
||||
echo "✗ lsof is required to check whether port 2026 is available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
port_2026_usage="$(lsof -nP -iTCP:2026 -sTCP:LISTEN 2>/dev/null || true)"
|
||||
if [ -n "$port_2026_usage" ]; then
|
||||
echo "⚠ Port 2026 is already in use"
|
||||
echo " Occupying process:"
|
||||
echo "$port_2026_usage"
|
||||
|
||||
deerflow_process_found=0
|
||||
while IFS= read -r pid; do
|
||||
if [ -z "$pid" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
process_command="$(ps -p "$pid" -o command= 2>/dev/null || true)"
|
||||
case "$process_command" in
|
||||
*[Dd]eer[Ff]low*|*[Dd]eerflow*|*[Nn]ginx*deerflow*|*deerflow/*[Nn]ginx*)
|
||||
deerflow_process_found=1
|
||||
;;
|
||||
esac
|
||||
done <<EOF
|
||||
$(printf '%s\n' "$port_2026_usage" | awk 'NR > 1 {print $2}')
|
||||
EOF
|
||||
|
||||
if [ "$deerflow_process_found" -eq 1 ]; then
|
||||
echo "✓ Port 2026 is occupied by DeerFlow"
|
||||
else
|
||||
echo "✗ Port 2026 must be free before starting DeerFlow"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "✓ Port 2026 is available"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo " Docker Environment Check Complete"
|
||||
echo "=========================================="
|
||||
+93
@@ -0,0 +1,93 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
echo "=========================================="
|
||||
echo " Checking Local Development Environment"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
all_passed=true
|
||||
|
||||
# Check Node.js
|
||||
echo "1. Checking Node.js..."
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
NODE_VERSION=$(node --version | sed 's/v//')
|
||||
NODE_MAJOR=$(echo "$NODE_VERSION" | cut -d. -f1)
|
||||
if [ "$NODE_MAJOR" -ge 22 ]; then
|
||||
echo "✓ Node.js is installed (version: $NODE_VERSION)"
|
||||
else
|
||||
echo "✗ Node.js version is too old (current: $NODE_VERSION, required: 22+)"
|
||||
all_passed=false
|
||||
fi
|
||||
else
|
||||
echo "✗ Node.js is not installed"
|
||||
all_passed=false
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check pnpm
|
||||
echo "2. Checking pnpm..."
|
||||
if command -v pnpm >/dev/null 2>&1; then
|
||||
echo "✓ pnpm is installed (version: $(pnpm --version))"
|
||||
else
|
||||
echo "✗ pnpm is not installed"
|
||||
echo " Install command: npm install -g pnpm"
|
||||
all_passed=false
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check uv
|
||||
echo "3. Checking uv..."
|
||||
if command -v uv >/dev/null 2>&1; then
|
||||
echo "✓ uv is installed (version: $(uv --version))"
|
||||
else
|
||||
echo "✗ uv is not installed"
|
||||
all_passed=false
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check nginx
|
||||
echo "4. Checking nginx..."
|
||||
if command -v nginx >/dev/null 2>&1; then
|
||||
echo "✓ nginx is installed (version: $(nginx -v 2>&1))"
|
||||
else
|
||||
echo "✗ nginx is not installed"
|
||||
echo " macOS: brew install nginx"
|
||||
echo " Linux: install it with the system package manager"
|
||||
all_passed=false
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check ports
|
||||
echo "5. Checking ports..."
|
||||
if ! command -v lsof >/dev/null 2>&1; then
|
||||
echo "✗ lsof is not installed, so port availability cannot be verified"
|
||||
echo " Install lsof and rerun this check"
|
||||
all_passed=false
|
||||
else
|
||||
for port in 2026 3000 8001 2024; do
|
||||
if lsof -i :$port >/dev/null 2>&1; then
|
||||
echo "⚠ Port $port is already in use:"
|
||||
lsof -i :$port | head -2
|
||||
all_passed=false
|
||||
else
|
||||
echo "✓ Port $port is available"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "=========================================="
|
||||
echo " Environment Check Summary"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
if [ "$all_passed" = true ]; then
|
||||
echo "✅ All environment checks passed!"
|
||||
echo ""
|
||||
echo "Next step: run make install to install dependencies"
|
||||
exit 0
|
||||
else
|
||||
echo "❌ Some checks failed. Please fix the issues above first"
|
||||
exit 1
|
||||
fi
|
||||
+65
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
echo "=========================================="
|
||||
echo " Docker Deployment"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
# Check config.yaml
|
||||
if [ ! -f "config.yaml" ]; then
|
||||
echo "config.yaml does not exist. Generating it..."
|
||||
make config
|
||||
echo ""
|
||||
echo "⚠ Please edit config.yaml to configure your models and API keys"
|
||||
echo " Then run this script again"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ config.yaml exists"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check the .env file
|
||||
if [ ! -f ".env" ]; then
|
||||
echo ".env does not exist. Copying it from the example..."
|
||||
if [ -f ".env.example" ]; then
|
||||
cp .env.example .env
|
||||
echo "✓ Created the .env file"
|
||||
else
|
||||
echo "⚠ .env.example does not exist. Please create the .env file manually"
|
||||
fi
|
||||
else
|
||||
echo "✓ .env file exists"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check the frontend .env file
|
||||
if [ ! -f "frontend/.env" ]; then
|
||||
echo "frontend/.env does not exist. Copying it from the example..."
|
||||
if [ -f "frontend/.env.example" ]; then
|
||||
cp frontend/.env.example frontend/.env
|
||||
echo "✓ Created the frontend/.env file"
|
||||
else
|
||||
echo "⚠ frontend/.env.example does not exist. Please create frontend/.env manually"
|
||||
fi
|
||||
else
|
||||
echo "✓ frontend/.env file exists"
|
||||
fi
|
||||
echo ""
|
||||
# Initialize the Docker environment
|
||||
echo "Initializing the Docker environment..."
|
||||
make docker-init
|
||||
echo ""
|
||||
|
||||
# Start Docker services
|
||||
echo "Starting Docker services..."
|
||||
make docker-start
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo " Deployment Complete"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "🌐 Access URL: http://localhost:2026"
|
||||
echo "📋 View logs: make docker-logs"
|
||||
echo "🛑 Stop services: make docker-stop"
|
||||
+63
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
echo "=========================================="
|
||||
echo " Local Mode Deployment"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
# Check config.yaml
|
||||
if [ ! -f "config.yaml" ]; then
|
||||
echo "config.yaml does not exist. Generating it..."
|
||||
make config
|
||||
echo ""
|
||||
echo "⚠ Please edit config.yaml to configure your models and API keys"
|
||||
echo " Then run this script again"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ config.yaml exists"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check the .env file
|
||||
if [ ! -f ".env" ]; then
|
||||
echo ".env does not exist. Copying it from the example..."
|
||||
if [ -f ".env.example" ]; then
|
||||
cp .env.example .env
|
||||
echo "✓ Created the .env file"
|
||||
else
|
||||
echo "⚠ .env.example does not exist. Please create the .env file manually"
|
||||
fi
|
||||
else
|
||||
echo "✓ .env file exists"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check dependencies
|
||||
echo "Checking dependencies..."
|
||||
make check
|
||||
echo ""
|
||||
|
||||
# Install dependencies
|
||||
echo "Installing dependencies..."
|
||||
make install
|
||||
echo ""
|
||||
|
||||
# Start services
|
||||
echo "Starting services (background mode)..."
|
||||
make dev-daemon
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo " Deployment Complete"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "🌐 Access URL: http://localhost:2026"
|
||||
echo "📋 View logs:"
|
||||
echo " - logs/langgraph.log"
|
||||
echo " - logs/gateway.log"
|
||||
echo " - logs/frontend.log"
|
||||
echo " - logs/nginx.log"
|
||||
echo "🛑 Stop services: make stop"
|
||||
echo ""
|
||||
echo "Please wait 90-120 seconds for all services to start completely, then run the health check"
|
||||
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env bash
|
||||
set +e
|
||||
|
||||
echo "=========================================="
|
||||
echo " Frontend Page Smoke Check"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
BASE_URL="${BASE_URL:-http://localhost:2026}"
|
||||
DOC_PATH="${DOC_PATH:-/en/docs}"
|
||||
|
||||
all_passed=true
|
||||
|
||||
check_status() {
|
||||
local name="$1"
|
||||
local url="$2"
|
||||
local expected_re="$3"
|
||||
|
||||
local status
|
||||
status="$(curl -s -o /dev/null -w "%{http_code}" -L "$url")"
|
||||
if echo "$status" | grep -Eq "$expected_re"; then
|
||||
echo "✓ $name ($url) -> $status"
|
||||
else
|
||||
echo "✗ $name ($url) -> $status (expected: $expected_re)"
|
||||
all_passed=false
|
||||
fi
|
||||
}
|
||||
|
||||
check_final_url() {
|
||||
local name="$1"
|
||||
local url="$2"
|
||||
local expected_path_re="$3"
|
||||
|
||||
local effective
|
||||
effective="$(curl -s -o /dev/null -w "%{url_effective}" -L "$url")"
|
||||
if echo "$effective" | grep -Eq "$expected_path_re"; then
|
||||
echo "✓ $name redirect target -> $effective"
|
||||
else
|
||||
echo "✗ $name redirect target -> $effective (expected path: $expected_path_re)"
|
||||
all_passed=false
|
||||
fi
|
||||
}
|
||||
|
||||
echo "1. Checking entry pages..."
|
||||
check_status "Landing page" "${BASE_URL}/" "200"
|
||||
check_status "Workspace redirect" "${BASE_URL}/workspace" "200|301|302|307|308"
|
||||
check_final_url "Workspace redirect" "${BASE_URL}/workspace" "/workspace/chats/"
|
||||
echo ""
|
||||
|
||||
echo "2. Checking key workspace routes..."
|
||||
check_status "New chat page" "${BASE_URL}/workspace/chats/new" "200"
|
||||
check_status "Chats list page" "${BASE_URL}/workspace/chats" "200"
|
||||
check_status "Agents gallery page" "${BASE_URL}/workspace/agents" "200"
|
||||
echo ""
|
||||
|
||||
echo "3. Checking docs route (optional)..."
|
||||
check_status "Docs page" "${BASE_URL}${DOC_PATH}" "200|404"
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo " Frontend Smoke Check Summary"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
if [ "$all_passed" = true ]; then
|
||||
echo "✅ Frontend smoke checks passed!"
|
||||
exit 0
|
||||
else
|
||||
echo "❌ Frontend smoke checks failed"
|
||||
exit 1
|
||||
fi
|
||||
+125
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env bash
|
||||
set +e
|
||||
|
||||
echo "=========================================="
|
||||
echo " Service Health Check"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
all_passed=true
|
||||
mode="${SMOKE_TEST_MODE:-auto}"
|
||||
summary_hint="make logs"
|
||||
|
||||
print_step() {
|
||||
echo "$1"
|
||||
}
|
||||
|
||||
check_http_status() {
|
||||
local name="$1"
|
||||
local url="$2"
|
||||
local expected_re="$3"
|
||||
local status
|
||||
|
||||
status="$(curl -s -o /dev/null -w "%{http_code}" "$url" 2>/dev/null)"
|
||||
if echo "$status" | grep -Eq "$expected_re"; then
|
||||
echo "✓ $name is accessible ($url -> $status)"
|
||||
else
|
||||
echo "✗ $name is not accessible ($url -> ${status:-000})"
|
||||
all_passed=false
|
||||
fi
|
||||
}
|
||||
|
||||
check_listen_port() {
|
||||
local name="$1"
|
||||
local port="$2"
|
||||
|
||||
if lsof -nP -iTCP:"$port" -sTCP:LISTEN >/dev/null 2>&1; then
|
||||
echo "✓ $name is listening on port $port"
|
||||
else
|
||||
echo "✗ $name is not listening on port $port"
|
||||
all_passed=false
|
||||
fi
|
||||
}
|
||||
|
||||
docker_available() {
|
||||
command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1
|
||||
}
|
||||
|
||||
detect_mode() {
|
||||
case "$mode" in
|
||||
local|docker)
|
||||
echo "$mode"
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
if docker_available && docker ps --format "{{.Names}}" | grep -q "deer-flow"; then
|
||||
echo "docker"
|
||||
else
|
||||
echo "local"
|
||||
fi
|
||||
}
|
||||
|
||||
mode="$(detect_mode)"
|
||||
|
||||
echo "Deployment mode: $mode"
|
||||
echo ""
|
||||
|
||||
if [ "$mode" = "docker" ]; then
|
||||
summary_hint="make docker-logs"
|
||||
print_step "1. Checking container status..."
|
||||
if docker ps --format "{{.Names}}" | grep -q "deer-flow"; then
|
||||
echo "✓ Containers are running:"
|
||||
docker ps --format " - {{.Names}} ({{.Status}})"
|
||||
else
|
||||
echo "✗ No DeerFlow-related containers are running"
|
||||
all_passed=false
|
||||
fi
|
||||
else
|
||||
summary_hint="logs/{langgraph,gateway,frontend,nginx}.log"
|
||||
print_step "1. Checking local service ports..."
|
||||
check_listen_port "Nginx" 2026
|
||||
check_listen_port "Frontend" 3000
|
||||
check_listen_port "Gateway" 8001
|
||||
check_listen_port "LangGraph" 2024
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "2. Waiting for services to fully start (30 seconds)..."
|
||||
sleep 30
|
||||
echo ""
|
||||
|
||||
echo "3. Checking frontend service..."
|
||||
check_http_status "Frontend service" "http://localhost:2026" "200|301|302|307|308"
|
||||
echo ""
|
||||
|
||||
echo "4. Checking API Gateway..."
|
||||
health_response=$(curl -s http://localhost:2026/health 2>/dev/null)
|
||||
if [ $? -eq 0 ] && [ -n "$health_response" ]; then
|
||||
echo "✓ API Gateway health check passed"
|
||||
echo " Response: $health_response"
|
||||
else
|
||||
echo "✗ API Gateway health check failed"
|
||||
all_passed=false
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "5. Checking LangGraph service..."
|
||||
check_http_status "LangGraph service" "http://localhost:2024/" "200|301|302|307|308|404"
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo " Health Check Summary"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
if [ "$all_passed" = true ]; then
|
||||
echo "✅ All checks passed!"
|
||||
echo ""
|
||||
echo "🌐 Application URL: http://localhost:2026"
|
||||
exit 0
|
||||
else
|
||||
echo "❌ Some checks failed"
|
||||
echo ""
|
||||
echo "Please review: $summary_hint"
|
||||
exit 1
|
||||
fi
|
||||
+49
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
echo "=========================================="
|
||||
echo " Pulling the Latest Code"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
# Check whether the current directory is a Git repository
|
||||
if [ ! -d ".git" ]; then
|
||||
echo "✗ The current directory is not a Git repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Git status
|
||||
echo "Checking Git status..."
|
||||
if git status --porcelain | grep -q .; then
|
||||
echo "⚠ Uncommitted changes detected:"
|
||||
git status --short
|
||||
echo ""
|
||||
echo "Please commit or stash your changes before continuing"
|
||||
echo "Options:"
|
||||
echo " 1. git add . && git commit -m 'Save changes'"
|
||||
echo " 2. git stash (stash changes and restore them later)"
|
||||
echo " 3. git reset --hard HEAD (discard local changes - use with caution)"
|
||||
exit 1
|
||||
else
|
||||
echo "✓ Working tree is clean"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Fetch remote updates
|
||||
echo "Fetching remote updates..."
|
||||
git fetch origin main
|
||||
echo ""
|
||||
|
||||
# Pull the latest code
|
||||
echo "Pulling the latest code..."
|
||||
git pull origin main
|
||||
echo ""
|
||||
|
||||
# Show the latest commit
|
||||
echo "Latest commit:"
|
||||
git log -1 --oneline
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo " Code Update Complete"
|
||||
echo "=========================================="
|
||||
@@ -0,0 +1,180 @@
|
||||
# DeerFlow Smoke Test Report
|
||||
|
||||
**Test Date**: {{test_date}}
|
||||
**Test Environment**: {{test_environment}}
|
||||
**Deployment Mode**: Docker
|
||||
**Test Version**: {{git_commit}}
|
||||
|
||||
---
|
||||
|
||||
## Execution Summary
|
||||
|
||||
| Metric | Status |
|
||||
|------|------|
|
||||
| Total Test Phases | 6 |
|
||||
| Passed Phases | {{passed_stages}} |
|
||||
| Failed Phases | {{failed_stages}} |
|
||||
| Overall Conclusion | **{{overall_status}}** |
|
||||
|
||||
### Key Test Cases
|
||||
|
||||
| Case | Result | Details |
|
||||
|------|--------|---------|
|
||||
| Code update check | {{case_code_update}} | {{case_code_update_details}} |
|
||||
| Environment check | {{case_env_check}} | {{case_env_check_details}} |
|
||||
| Configuration preparation | {{case_config_prep}} | {{case_config_prep_details}} |
|
||||
| Deployment | {{case_deploy}} | {{case_deploy_details}} |
|
||||
| Health check | {{case_health_check}} | {{case_health_check_details}} |
|
||||
| Frontend routes | {{case_frontend_routes_overall}} | {{case_frontend_routes_details}} |
|
||||
|
||||
---
|
||||
|
||||
## Detailed Test Results
|
||||
|
||||
### Phase 1: Code Update Check
|
||||
|
||||
- [x] Confirm current directory - {{status_dir_check}}
|
||||
- [x] Check Git status - {{status_git_status}}
|
||||
- [x] Pull latest code - {{status_git_pull}}
|
||||
- [x] Confirm code update - {{status_git_verify}}
|
||||
|
||||
**Phase Status**: {{stage1_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Docker Environment Check
|
||||
|
||||
- [x] Docker version - {{status_docker_version}}
|
||||
- [x] Docker daemon - {{status_docker_daemon}}
|
||||
- [x] Docker Compose - {{status_docker_compose}}
|
||||
- [x] Port check - {{status_port_check}}
|
||||
|
||||
**Phase Status**: {{stage2_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Configuration Preparation
|
||||
|
||||
- [x] config.yaml - {{status_config_yaml}}
|
||||
- [x] .env file - {{status_env_file}}
|
||||
- [x] Model configuration - {{status_model_config}}
|
||||
|
||||
**Phase Status**: {{stage3_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Docker Deployment
|
||||
|
||||
- [x] docker-init - {{status_docker_init}}
|
||||
- [x] docker-start - {{status_docker_start}}
|
||||
- [x] Service startup wait - {{status_wait_startup}}
|
||||
|
||||
**Phase Status**: {{stage4_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Service Health Check
|
||||
|
||||
- [x] Container status - {{status_containers}}
|
||||
- [x] Frontend service - {{status_frontend}}
|
||||
- [x] API Gateway - {{status_api_gateway}}
|
||||
- [x] LangGraph service - {{status_langgraph}}
|
||||
|
||||
**Phase Status**: {{stage5_status}}
|
||||
|
||||
---
|
||||
|
||||
### Frontend Routes Smoke Results
|
||||
|
||||
| Route | Status | Details |
|
||||
|-------|--------|---------|
|
||||
| Landing `/` | {{landing_status}} | {{landing_details}} |
|
||||
| Workspace redirect `/workspace` | {{workspace_redirect_status}} | target {{workspace_redirect_target}} |
|
||||
| New chat `/workspace/chats/new` | {{new_chat_status}} | {{new_chat_details}} |
|
||||
| Chats list `/workspace/chats` | {{chats_list_status}} | {{chats_list_details}} |
|
||||
| Agents gallery `/workspace/agents` | {{agents_gallery_status}} | {{agents_gallery_details}} |
|
||||
| Docs `{{docs_path}}` | {{docs_status}} | {{docs_details}} |
|
||||
|
||||
**Summary**: {{frontend_routes_summary}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 6: Test Report Generation
|
||||
|
||||
- [x] Result summary - {{status_summary}}
|
||||
- [x] Issue log - {{status_issues}}
|
||||
- [x] Report generation - {{status_report}}
|
||||
|
||||
**Phase Status**: {{stage6_status}}
|
||||
|
||||
---
|
||||
|
||||
## Issue Log
|
||||
|
||||
### Issue 1
|
||||
**Description**: {{issue1_description}}
|
||||
**Severity**: {{issue1_severity}}
|
||||
**Solution**: {{issue1_solution}}
|
||||
|
||||
---
|
||||
|
||||
## Environment Information
|
||||
|
||||
### Docker Version
|
||||
```text
|
||||
{{docker_version_output}}
|
||||
```
|
||||
|
||||
### Git Information
|
||||
```text
|
||||
Repository: {{git_repo}}
|
||||
Branch: {{git_branch}}
|
||||
Commit: {{git_commit}}
|
||||
Commit Message: {{git_commit_message}}
|
||||
```
|
||||
|
||||
### Configuration Summary
|
||||
- config.yaml exists: {{config_exists}}
|
||||
- .env file exists: {{env_exists}}
|
||||
- Number of configured models: {{model_count}}
|
||||
|
||||
---
|
||||
|
||||
## Container Status
|
||||
|
||||
| Container Name | Status | Uptime |
|
||||
|----------|------|----------|
|
||||
| deer-flow-nginx | {{nginx_status}} | {{nginx_uptime}} |
|
||||
| deer-flow-frontend | {{frontend_status}} | {{frontend_uptime}} |
|
||||
| deer-flow-gateway | {{gateway_status}} | {{gateway_uptime}} |
|
||||
| deer-flow-langgraph | {{langgraph_status}} | {{langgraph_uptime}} |
|
||||
|
||||
---
|
||||
|
||||
## Recommendations and Next Steps
|
||||
|
||||
### If the Test Passes
|
||||
1. [ ] Visit http://localhost:2026 to start using DeerFlow
|
||||
2. [ ] Configure your preferred model if it is not configured yet
|
||||
3. [ ] Explore available skills
|
||||
4. [ ] Refer to the documentation to learn more features
|
||||
|
||||
### If the Test Fails
|
||||
1. [ ] Review references/troubleshooting.md for common solutions
|
||||
2. [ ] Check Docker logs: `make docker-logs`
|
||||
3. [ ] Verify configuration file format and content
|
||||
4. [ ] If needed, fully reset the environment: `make clean && make config && make docker-init && make docker-start`
|
||||
|
||||
---
|
||||
|
||||
## Appendix
|
||||
|
||||
### Full Logs
|
||||
{{full_logs}}
|
||||
|
||||
### Tester
|
||||
{{tester_name}}
|
||||
|
||||
---
|
||||
|
||||
*Report generated at: {{report_time}}*
|
||||
@@ -0,0 +1,185 @@
|
||||
# DeerFlow Smoke Test Report
|
||||
|
||||
**Test Date**: {{test_date}}
|
||||
**Test Environment**: {{test_environment}}
|
||||
**Deployment Mode**: Local
|
||||
**Test Version**: {{git_commit}}
|
||||
|
||||
---
|
||||
|
||||
## Execution Summary
|
||||
|
||||
| Metric | Status |
|
||||
|------|------|
|
||||
| Total Test Phases | 6 |
|
||||
| Passed Phases | {{passed_stages}} |
|
||||
| Failed Phases | {{failed_stages}} |
|
||||
| Overall Conclusion | **{{overall_status}}** |
|
||||
|
||||
### Key Test Cases
|
||||
|
||||
| Case | Result | Details |
|
||||
|------|--------|---------|
|
||||
| Code update check | {{case_code_update}} | {{case_code_update_details}} |
|
||||
| Environment check | {{case_env_check}} | {{case_env_check_details}} |
|
||||
| Configuration preparation | {{case_config_prep}} | {{case_config_prep_details}} |
|
||||
| Deployment | {{case_deploy}} | {{case_deploy_details}} |
|
||||
| Health check | {{case_health_check}} | {{case_health_check_details}} |
|
||||
| Frontend routes | {{case_frontend_routes_overall}} | {{case_frontend_routes_details}} |
|
||||
|
||||
---
|
||||
|
||||
## Detailed Test Results
|
||||
|
||||
### Phase 1: Code Update Check
|
||||
|
||||
- [x] Confirm current directory - {{status_dir_check}}
|
||||
- [x] Check Git status - {{status_git_status}}
|
||||
- [x] Pull latest code - {{status_git_pull}}
|
||||
- [x] Confirm code update - {{status_git_verify}}
|
||||
|
||||
**Phase Status**: {{stage1_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Local Environment Check
|
||||
|
||||
- [x] Node.js version - {{status_node_version}}
|
||||
- [x] pnpm - {{status_pnpm}}
|
||||
- [x] uv - {{status_uv}}
|
||||
- [x] nginx - {{status_nginx}}
|
||||
- [x] Port check - {{status_port_check}}
|
||||
|
||||
**Phase Status**: {{stage2_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Configuration Preparation
|
||||
|
||||
- [x] config.yaml - {{status_config_yaml}}
|
||||
- [x] .env file - {{status_env_file}}
|
||||
- [x] Model configuration - {{status_model_config}}
|
||||
|
||||
**Phase Status**: {{stage3_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Local Deployment
|
||||
|
||||
- [x] make check - {{status_make_check}}
|
||||
- [x] make install - {{status_make_install}}
|
||||
- [x] make dev-daemon / make dev - {{status_local_start}}
|
||||
- [x] Service startup wait - {{status_wait_startup}}
|
||||
|
||||
**Phase Status**: {{stage4_status}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Service Health Check
|
||||
|
||||
- [x] Process status - {{status_processes}}
|
||||
- [x] Frontend service - {{status_frontend}}
|
||||
- [x] API Gateway - {{status_api_gateway}}
|
||||
- [x] LangGraph service - {{status_langgraph}}
|
||||
|
||||
**Phase Status**: {{stage5_status}}
|
||||
|
||||
---
|
||||
|
||||
### Frontend Routes Smoke Results
|
||||
|
||||
| Route | Status | Details |
|
||||
|-------|--------|---------|
|
||||
| Landing `/` | {{landing_status}} | {{landing_details}} |
|
||||
| Workspace redirect `/workspace` | {{workspace_redirect_status}} | target {{workspace_redirect_target}} |
|
||||
| New chat `/workspace/chats/new` | {{new_chat_status}} | {{new_chat_details}} |
|
||||
| Chats list `/workspace/chats` | {{chats_list_status}} | {{chats_list_details}} |
|
||||
| Agents gallery `/workspace/agents` | {{agents_gallery_status}} | {{agents_gallery_details}} |
|
||||
| Docs `{{docs_path}}` | {{docs_status}} | {{docs_details}} |
|
||||
|
||||
**Summary**: {{frontend_routes_summary}}
|
||||
|
||||
---
|
||||
|
||||
### Phase 6: Test Report Generation
|
||||
|
||||
- [x] Result summary - {{status_summary}}
|
||||
- [x] Issue log - {{status_issues}}
|
||||
- [x] Report generation - {{status_report}}
|
||||
|
||||
**Phase Status**: {{stage6_status}}
|
||||
|
||||
---
|
||||
|
||||
## Issue Log
|
||||
|
||||
### Issue 1
|
||||
**Description**: {{issue1_description}}
|
||||
**Severity**: {{issue1_severity}}
|
||||
**Solution**: {{issue1_solution}}
|
||||
|
||||
---
|
||||
|
||||
## Environment Information
|
||||
|
||||
### Local Dependency Versions
|
||||
```text
|
||||
Node.js: {{node_version_output}}
|
||||
pnpm: {{pnpm_version_output}}
|
||||
uv: {{uv_version_output}}
|
||||
nginx: {{nginx_version_output}}
|
||||
```
|
||||
|
||||
### Git Information
|
||||
```text
|
||||
Repository: {{git_repo}}
|
||||
Branch: {{git_branch}}
|
||||
Commit: {{git_commit}}
|
||||
Commit Message: {{git_commit_message}}
|
||||
```
|
||||
|
||||
### Configuration Summary
|
||||
- config.yaml exists: {{config_exists}}
|
||||
- .env file exists: {{env_exists}}
|
||||
- Number of configured models: {{model_count}}
|
||||
|
||||
---
|
||||
|
||||
## Local Service Status
|
||||
|
||||
| Service | Status | Endpoint |
|
||||
|---------|--------|----------|
|
||||
| Nginx | {{nginx_status}} | {{nginx_endpoint}} |
|
||||
| Frontend | {{frontend_status}} | {{frontend_endpoint}} |
|
||||
| Gateway | {{gateway_status}} | {{gateway_endpoint}} |
|
||||
| LangGraph | {{langgraph_status}} | {{langgraph_endpoint}} |
|
||||
|
||||
---
|
||||
|
||||
## Recommendations and Next Steps
|
||||
|
||||
### If the Test Passes
|
||||
1. [ ] Visit http://localhost:2026 to start using DeerFlow
|
||||
2. [ ] Configure your preferred model if it is not configured yet
|
||||
3. [ ] Explore available skills
|
||||
4. [ ] Refer to the documentation to learn more features
|
||||
|
||||
### If the Test Fails
|
||||
1. [ ] Review references/troubleshooting.md for common solutions
|
||||
2. [ ] Check local logs: `logs/{langgraph,gateway,frontend,nginx}.log`
|
||||
3. [ ] Verify configuration file format and content
|
||||
4. [ ] If needed, fully reset the environment: `make stop && make clean && make install && make dev-daemon`
|
||||
|
||||
---
|
||||
|
||||
## Appendix
|
||||
|
||||
### Full Logs
|
||||
{{full_logs}}
|
||||
|
||||
### Tester
|
||||
{{tester_name}}
|
||||
|
||||
---
|
||||
|
||||
*Report generated at: {{report_time}}*
|
||||
@@ -17,12 +17,14 @@ INFOQUEST_API_KEY=your-infoquest-api-key
|
||||
# DEEPSEEK_API_KEY=your-deepseek-api-key
|
||||
# NOVITA_API_KEY=your-novita-api-key # OpenAI-compatible, see https://novita.ai
|
||||
# MINIMAX_API_KEY=your-minimax-api-key # OpenAI-compatible, see https://platform.minimax.io
|
||||
# VLLM_API_KEY=your-vllm-api-key # OpenAI-compatible
|
||||
# FEISHU_APP_ID=your-feishu-app-id
|
||||
# FEISHU_APP_SECRET=your-feishu-app-secret
|
||||
|
||||
# SLACK_BOT_TOKEN=your-slack-bot-token
|
||||
# SLACK_APP_TOKEN=your-slack-app-token
|
||||
# TELEGRAM_BOT_TOKEN=your-telegram-bot-token
|
||||
# DISCORD_BOT_TOKEN=your-discord-bot-token
|
||||
|
||||
# Enable LangSmith to monitor and debug your LLM calls, agent runs, and tool executions.
|
||||
# LANGSMITH_TRACING=true
|
||||
@@ -32,3 +34,9 @@ INFOQUEST_API_KEY=your-infoquest-api-key
|
||||
|
||||
# GitHub API Token
|
||||
# GITHUB_TOKEN=your-github-token
|
||||
|
||||
# Database (only needed when config.yaml has database.backend: postgres)
|
||||
# DATABASE_URL=postgresql://deerflow:password@localhost:5432/deerflow
|
||||
#
|
||||
# WECOM_BOT_ID=your-wecom-bot-id
|
||||
# WECOM_BOT_SECRET=your-wecom-bot-secret
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
name: E2E Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main' ]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/e2e-tests.yml'
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/e2e-tests.yml'
|
||||
|
||||
concurrency:
|
||||
group: e2e-tests-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
e2e-tests:
|
||||
if: ${{ github.event_name != 'pull_request' || github.event.pull_request.draft == false }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
- name: Enable Corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Use pinned pnpm version
|
||||
run: corepack prepare pnpm@10.26.2 --activate
|
||||
|
||||
- name: Install frontend dependencies
|
||||
working-directory: frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Install Playwright Chromium
|
||||
working-directory: frontend
|
||||
run: npx playwright install chromium --with-deps
|
||||
|
||||
- name: Run E2E tests
|
||||
working-directory: frontend
|
||||
run: pnpm exec playwright test
|
||||
env:
|
||||
SKIP_ENV_VALIDATION: '1'
|
||||
|
||||
- name: Upload Playwright report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: playwright-report
|
||||
path: frontend/playwright-report/
|
||||
retention-days: 7
|
||||
@@ -0,0 +1,43 @@
|
||||
name: Frontend Unit Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main' ]
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
|
||||
concurrency:
|
||||
group: frontend-unit-tests-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
frontend-unit-tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
- name: Enable Corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Use pinned pnpm version
|
||||
run: corepack prepare pnpm@10.26.2 --activate
|
||||
|
||||
- name: Install frontend dependencies
|
||||
working-directory: frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run unit tests of frontend
|
||||
working-directory: frontend
|
||||
run: make test
|
||||
@@ -40,6 +40,7 @@ coverage/
|
||||
skills/custom/*
|
||||
logs/
|
||||
log/
|
||||
debug.log
|
||||
|
||||
# Local git hooks (keep only on this machine, do not push)
|
||||
.githooks/
|
||||
@@ -54,3 +55,8 @@ web/
|
||||
# Deployment artifacts
|
||||
backend/Dockerfile.langgraph
|
||||
config.yaml.bak
|
||||
.playwright-mcp
|
||||
/frontend/test-results/
|
||||
/frontend/playwright-report/
|
||||
.gstack/
|
||||
.worktrees
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
repos:
|
||||
# Backend: ruff lint + format via uv (uses the same ruff version as backend deps)
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: ruff lint
|
||||
entry: bash -c 'cd backend && uv run ruff check --fix "${@/#backend\//}"' --
|
||||
language: system
|
||||
types_or: [python]
|
||||
files: ^backend/
|
||||
- id: ruff-format
|
||||
name: ruff format
|
||||
entry: bash -c 'cd backend && uv run ruff format "${@/#backend\//}"' --
|
||||
language: system
|
||||
types_or: [python]
|
||||
files: ^backend/
|
||||
|
||||
# Frontend: eslint + prettier (must run from frontend/ for node_modules resolution)
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: frontend-eslint
|
||||
name: eslint (frontend)
|
||||
entry: bash -c 'cd frontend && npx eslint --fix "${@/#frontend\//}"' --
|
||||
language: system
|
||||
types_or: [javascript, tsx, ts]
|
||||
files: ^frontend/
|
||||
|
||||
- id: frontend-prettier
|
||||
name: prettier (frontend)
|
||||
entry: bash -c 'cd frontend && npx prettier --write "${@/#frontend\//}"' --
|
||||
language: system
|
||||
files: ^frontend/
|
||||
types_or: [javascript, tsx, ts, json, css]
|
||||
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
willem.jiang@gmail.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
+25
-8
@@ -77,6 +77,18 @@ export UV_INDEX_URL=https://pypi.org/simple
|
||||
export NPM_REGISTRY=https://registry.npmjs.org
|
||||
```
|
||||
|
||||
#### Recommended host resources
|
||||
|
||||
Use these as practical starting points for development and review environments:
|
||||
|
||||
| Scenario | Starting point | Recommended | Notes |
|
||||
|---------|-----------|------------|-------|
|
||||
| `make dev` on one machine | 4 vCPU, 8 GB RAM | 8 vCPU, 16 GB RAM | Best when DeerFlow uses hosted model APIs. |
|
||||
| `make docker-start` review environment | 4 vCPU, 8 GB RAM | 8 vCPU, 16 GB RAM | Docker image builds and sandbox containers need extra headroom. |
|
||||
| Shared Linux test server | 8 vCPU, 16 GB RAM | 16 vCPU, 32 GB RAM | Prefer this for heavier multi-agent runs or multiple reviewers. |
|
||||
|
||||
`2 vCPU / 4 GB` environments often fail to start reliably or become unresponsive under normal DeerFlow workloads.
|
||||
|
||||
#### Linux: Docker daemon permission denied
|
||||
|
||||
If `make docker-init`, `make docker-start`, or `make docker-stop` fails on Linux with an error like below, your current user likely does not have permission to access the Docker daemon socket:
|
||||
@@ -154,7 +166,7 @@ Required tools:
|
||||
|
||||
1. **Configure the application** (same as Docker setup above)
|
||||
|
||||
2. **Install dependencies**:
|
||||
2. **Install dependencies** (this also sets up pre-commit hooks):
|
||||
```bash
|
||||
make install
|
||||
```
|
||||
@@ -286,19 +298,24 @@ Nginx (port 2026) ← Unified entry point
|
||||
```bash
|
||||
# Backend tests
|
||||
cd backend
|
||||
uv run pytest
|
||||
make test
|
||||
|
||||
# Frontend checks
|
||||
# Frontend unit tests
|
||||
cd frontend
|
||||
pnpm check
|
||||
make test
|
||||
|
||||
# Frontend E2E tests (requires Chromium; builds and auto-starts the Next.js production server)
|
||||
cd frontend
|
||||
make test-e2e
|
||||
```
|
||||
|
||||
### PR Regression Checks
|
||||
|
||||
Every pull request runs the backend regression workflow at [.github/workflows/backend-unit-tests.yml](.github/workflows/backend-unit-tests.yml), including:
|
||||
Every pull request triggers the following CI workflows:
|
||||
|
||||
- `tests/test_provisioner_kubeconfig.py`
|
||||
- `tests/test_docker_sandbox_mode_detection.py`
|
||||
- **Backend unit tests** — [.github/workflows/backend-unit-tests.yml](.github/workflows/backend-unit-tests.yml)
|
||||
- **Frontend unit tests** — [.github/workflows/frontend-unit-tests.yml](.github/workflows/frontend-unit-tests.yml)
|
||||
- **Frontend E2E tests** — [.github/workflows/e2e-tests.yml](.github/workflows/e2e-tests.yml) (triggered only when `frontend/` files change)
|
||||
|
||||
## Code Style
|
||||
|
||||
@@ -310,7 +327,7 @@ Every pull request runs the backend regression workflow at [.github/workflows/ba
|
||||
|
||||
- [Configuration Guide](backend/docs/CONFIGURATION.md) - Setup and configuration
|
||||
- [Architecture Overview](backend/CLAUDE.md) - Technical architecture
|
||||
- [MCP Setup Guide](MCP_SETUP.md) - Model Context Protocol configuration
|
||||
- [MCP Setup Guide](backend/docs/MCP_SERVER.md) - Model Context Protocol configuration
|
||||
|
||||
## Need Help?
|
||||
|
||||
|
||||
@@ -1,45 +1,67 @@
|
||||
# DeerFlow - Unified Development Environment
|
||||
|
||||
.PHONY: help config config-upgrade check install dev dev-daemon start stop up down clean docker-init docker-start docker-stop docker-logs docker-logs-frontend docker-logs-gateway
|
||||
.PHONY: help config config-upgrade check install setup doctor dev dev-pro dev-daemon dev-daemon-pro start start-pro start-daemon start-daemon-pro stop up up-pro down clean docker-init docker-start docker-start-pro docker-stop docker-logs docker-logs-frontend docker-logs-gateway
|
||||
|
||||
PYTHON ?= python
|
||||
BASH ?= bash
|
||||
BACKEND_UV_RUN = cd backend && uv run
|
||||
|
||||
# Detect OS for Windows compatibility
|
||||
ifeq ($(OS),Windows_NT)
|
||||
SHELL := cmd.exe
|
||||
PYTHON ?= python
|
||||
# Run repo shell scripts through Git Bash when Make is launched from cmd.exe / PowerShell.
|
||||
RUN_WITH_GIT_BASH = call scripts\run-with-git-bash.cmd
|
||||
else
|
||||
PYTHON ?= python3
|
||||
RUN_WITH_GIT_BASH =
|
||||
endif
|
||||
|
||||
help:
|
||||
@echo "DeerFlow Development Commands:"
|
||||
@echo " make setup - Interactive setup wizard (recommended for new users)"
|
||||
@echo " make doctor - Check configuration and system requirements"
|
||||
@echo " make config - Generate local config files (aborts if config already exists)"
|
||||
@echo " make config-upgrade - Merge new fields from config.example.yaml into config.yaml"
|
||||
@echo " make check - Check if all required tools are installed"
|
||||
@echo " make install - Install all dependencies (frontend + backend)"
|
||||
@echo " make install - Install all dependencies (frontend + backend + pre-commit hooks)"
|
||||
@echo " make setup-sandbox - Pre-pull sandbox container image (recommended)"
|
||||
@echo " make dev - Start all services in development mode (with hot-reloading)"
|
||||
@echo " make dev-daemon - Start all services in background (daemon mode)"
|
||||
@echo " make dev-pro - Start in dev + Gateway mode (experimental, no LangGraph server)"
|
||||
@echo " make dev-daemon - Start dev services in background (daemon mode)"
|
||||
@echo " make dev-daemon-pro - Start dev daemon + Gateway mode (experimental)"
|
||||
@echo " make start - Start all services in production mode (optimized, no hot-reloading)"
|
||||
@echo " make start-pro - Start in prod + Gateway mode (experimental)"
|
||||
@echo " make start-daemon - Start prod services in background (daemon mode)"
|
||||
@echo " make start-daemon-pro - Start prod daemon + Gateway mode (experimental)"
|
||||
@echo " make stop - Stop all running services"
|
||||
@echo " make clean - Clean up processes and temporary files"
|
||||
@echo ""
|
||||
@echo "Docker Production Commands:"
|
||||
@echo " make up - Build and start production Docker services (localhost:2026)"
|
||||
@echo " make up-pro - Build and start production Docker in Gateway mode (experimental)"
|
||||
@echo " make down - Stop and remove production Docker containers"
|
||||
@echo ""
|
||||
@echo "Docker Development Commands:"
|
||||
@echo " make docker-init - Pull the sandbox image"
|
||||
@echo " make docker-start - Start Docker services (mode-aware from config.yaml, localhost:2026)"
|
||||
@echo " make docker-start-pro - Start Docker in Gateway mode (experimental, no LangGraph container)"
|
||||
@echo " make docker-stop - Stop Docker development services"
|
||||
@echo " make docker-logs - View Docker development logs"
|
||||
@echo " make docker-logs-frontend - View Docker frontend logs"
|
||||
@echo " make docker-logs-gateway - View Docker gateway logs"
|
||||
|
||||
## Setup & Diagnosis
|
||||
setup:
|
||||
@$(BACKEND_UV_RUN) python ../scripts/setup_wizard.py
|
||||
|
||||
doctor:
|
||||
@$(BACKEND_UV_RUN) python ../scripts/doctor.py
|
||||
|
||||
config:
|
||||
@$(PYTHON) ./scripts/configure.py
|
||||
|
||||
config-upgrade:
|
||||
@./scripts/config-upgrade.sh
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/config-upgrade.sh
|
||||
|
||||
# Check required tools
|
||||
check:
|
||||
@@ -51,6 +73,8 @@ install:
|
||||
@cd backend && uv sync
|
||||
@echo "Installing frontend dependencies..."
|
||||
@cd frontend && pnpm install
|
||||
@echo "Installing pre-commit hooks..."
|
||||
@$(BACKEND_UV_RUN) --with pre-commit pre-commit install
|
||||
@echo "✓ All dependencies installed"
|
||||
@echo ""
|
||||
@echo "=========================================="
|
||||
@@ -77,7 +101,7 @@ setup-sandbox:
|
||||
echo ""; \
|
||||
if command -v container >/dev/null 2>&1 && [ "$$(uname)" = "Darwin" ]; then \
|
||||
echo "Detected Apple Container on macOS, pulling image..."; \
|
||||
container pull "$$IMAGE" || echo "⚠ Apple Container pull failed, will try Docker"; \
|
||||
container image pull "$$IMAGE" || echo "⚠ Apple Container pull failed, will try Docker"; \
|
||||
fi; \
|
||||
if command -v docker >/dev/null 2>&1; then \
|
||||
echo "Pulling image using Docker..."; \
|
||||
@@ -96,39 +120,47 @@ setup-sandbox:
|
||||
|
||||
# Start all services in development mode (with hot-reloading)
|
||||
dev:
|
||||
ifeq ($(OS),Windows_NT)
|
||||
@call scripts\run-with-git-bash.cmd ./scripts/serve.sh --dev
|
||||
else
|
||||
@./scripts/serve.sh --dev
|
||||
endif
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev
|
||||
|
||||
# Start all services in dev + Gateway mode (experimental: agent runtime embedded in Gateway)
|
||||
dev-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --gateway
|
||||
|
||||
# Start all services in production mode (with optimizations)
|
||||
start:
|
||||
ifeq ($(OS),Windows_NT)
|
||||
@call scripts\run-with-git-bash.cmd ./scripts/serve.sh --prod
|
||||
else
|
||||
@./scripts/serve.sh --prod
|
||||
endif
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod
|
||||
|
||||
# Start all services in prod + Gateway mode (experimental)
|
||||
start-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --gateway
|
||||
|
||||
# Start all services in daemon mode (background)
|
||||
dev-daemon:
|
||||
@./scripts/start-daemon.sh
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --daemon
|
||||
|
||||
# Start daemon + Gateway mode (experimental)
|
||||
dev-daemon-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --dev --gateway --daemon
|
||||
|
||||
# Start prod services in daemon mode (background)
|
||||
start-daemon:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --daemon
|
||||
|
||||
# Start prod daemon + Gateway mode (experimental)
|
||||
start-daemon-pro:
|
||||
@$(PYTHON) ./scripts/check.py
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --prod --gateway --daemon
|
||||
|
||||
# Stop all services
|
||||
stop:
|
||||
@echo "Stopping all services..."
|
||||
@-pkill -f "langgraph dev" 2>/dev/null || true
|
||||
@-pkill -f "uvicorn app.gateway.app:app" 2>/dev/null || true
|
||||
@-pkill -f "next dev" 2>/dev/null || true
|
||||
@-pkill -f "next start" 2>/dev/null || true
|
||||
@-pkill -f "next-server" 2>/dev/null || true
|
||||
@-pkill -f "next-server" 2>/dev/null || true
|
||||
@-nginx -c $(PWD)/docker/nginx/nginx.local.conf -p $(PWD) -s quit 2>/dev/null || true
|
||||
@sleep 1
|
||||
@-pkill -9 nginx 2>/dev/null || true
|
||||
@echo "Cleaning up sandbox containers..."
|
||||
@-./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true
|
||||
@echo "✓ All services stopped"
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/serve.sh --stop
|
||||
|
||||
# Clean up
|
||||
clean: stop
|
||||
@@ -144,25 +176,29 @@ clean: stop
|
||||
|
||||
# Initialize Docker containers and install dependencies
|
||||
docker-init:
|
||||
@./scripts/docker.sh init
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh init
|
||||
|
||||
# Start Docker development environment
|
||||
docker-start:
|
||||
@./scripts/docker.sh start
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh start
|
||||
|
||||
# Start Docker in Gateway mode (experimental)
|
||||
docker-start-pro:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh start --gateway
|
||||
|
||||
# Stop Docker development environment
|
||||
docker-stop:
|
||||
@./scripts/docker.sh stop
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh stop
|
||||
|
||||
# View Docker development logs
|
||||
docker-logs:
|
||||
@./scripts/docker.sh logs
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh logs
|
||||
|
||||
# View Docker development logs
|
||||
docker-logs-frontend:
|
||||
@./scripts/docker.sh logs --frontend
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh logs --frontend
|
||||
docker-logs-gateway:
|
||||
@./scripts/docker.sh logs --gateway
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/docker.sh logs --gateway
|
||||
|
||||
# ==========================================
|
||||
# Production Docker Commands
|
||||
@@ -170,8 +206,12 @@ docker-logs-gateway:
|
||||
|
||||
# Build and start production services
|
||||
up:
|
||||
@./scripts/deploy.sh
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh
|
||||
|
||||
# Build and start production services in Gateway mode
|
||||
up-pro:
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh --gateway
|
||||
|
||||
# Stop and remove production containers
|
||||
down:
|
||||
@./scripts/deploy.sh down
|
||||
@$(RUN_WITH_GIT_BASH) ./scripts/deploy.sh down
|
||||
|
||||
@@ -46,12 +46,14 @@ DeerFlow has newly integrated the intelligent search and crawling toolset indepe
|
||||
|
||||
- [🦌 DeerFlow - 2.0](#-deerflow---20)
|
||||
- [Official Website](#official-website)
|
||||
- [Coding Plan from ByteDance Volcengine](#coding-plan-from-bytedance-volcengine)
|
||||
- [InfoQuest](#infoquest)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [One-Line Agent Setup](#one-line-agent-setup)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Configuration](#configuration)
|
||||
- [Running the Application](#running-the-application)
|
||||
- [Deployment Sizing](#deployment-sizing)
|
||||
- [Option 1: Docker (Recommended)](#option-1-docker-recommended)
|
||||
- [Option 2: Local Development](#option-2-local-development)
|
||||
- [Advanced](#advanced)
|
||||
@@ -59,6 +61,8 @@ DeerFlow has newly integrated the intelligent search and crawling toolset indepe
|
||||
- [MCP Server](#mcp-server)
|
||||
- [IM Channels](#im-channels)
|
||||
- [LangSmith Tracing](#langsmith-tracing)
|
||||
- [Langfuse Tracing](#langfuse-tracing)
|
||||
- [Using Both Providers](#using-both-providers)
|
||||
- [From Deep Research to Super Agent Harness](#from-deep-research-to-super-agent-harness)
|
||||
- [Core Features](#core-features)
|
||||
- [Skills \& Tools](#skills--tools)
|
||||
@@ -71,6 +75,8 @@ DeerFlow has newly integrated the intelligent search and crawling toolset indepe
|
||||
- [Embedded Python Client](#embedded-python-client)
|
||||
- [Documentation](#documentation)
|
||||
- [⚠️ Security Notice](#️-security-notice)
|
||||
- [Improper Deployment May Introduce Security Risks](#improper-deployment-may-introduce-security-risks)
|
||||
- [Security Recommendations](#security-recommendations)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
- [Acknowledgments](#acknowledgments)
|
||||
@@ -98,35 +104,38 @@ That prompt is intended for coding agents. It tells the agent to clone the repo
|
||||
cd deer-flow
|
||||
```
|
||||
|
||||
2. **Generate local configuration files**
|
||||
2. **Run the setup wizard**
|
||||
|
||||
From the project root directory (`deer-flow/`), run:
|
||||
|
||||
```bash
|
||||
make config
|
||||
make setup
|
||||
```
|
||||
|
||||
This command creates local configuration files based on the provided example templates.
|
||||
This launches an interactive wizard that guides you through choosing an LLM provider, optional web search, and execution/safety preferences such as sandbox mode, bash access, and file-write tools. It generates a minimal `config.yaml` and writes your keys to `.env`. Takes about 2 minutes.
|
||||
|
||||
3. **Configure your preferred model(s)**
|
||||
The wizard also lets you configure an optional web search provider, or skip it for now.
|
||||
|
||||
Edit `config.yaml` and define at least one model:
|
||||
Run `make doctor` at any time to verify your setup and get actionable fix hints.
|
||||
|
||||
> **Advanced / manual configuration**: If you prefer to edit `config.yaml` directly, run `make config` instead to copy the full template. See `config.example.yaml` for the complete reference including CLI-backed providers (Codex CLI, Claude Code OAuth), OpenRouter, Responses API, and more.
|
||||
|
||||
<details>
|
||||
<summary>Manual model configuration examples</summary>
|
||||
|
||||
```yaml
|
||||
models:
|
||||
- name: gpt-4 # Internal identifier
|
||||
display_name: GPT-4 # Human-readable name
|
||||
use: langchain_openai:ChatOpenAI # LangChain class path
|
||||
model: gpt-4 # Model identifier for API
|
||||
api_key: $OPENAI_API_KEY # API key (recommended: use env var)
|
||||
max_tokens: 4096 # Maximum tokens per request
|
||||
temperature: 0.7 # Sampling temperature
|
||||
- name: gpt-4o
|
||||
display_name: GPT-4o
|
||||
use: langchain_openai:ChatOpenAI
|
||||
model: gpt-4o
|
||||
api_key: $OPENAI_API_KEY
|
||||
|
||||
- name: openrouter-gemini-2.5-flash
|
||||
display_name: Gemini 2.5 Flash (OpenRouter)
|
||||
use: langchain_openai:ChatOpenAI
|
||||
model: google/gemini-2.5-flash-preview
|
||||
api_key: $OPENAI_API_KEY # OpenRouter still uses the OpenAI-compatible field name here
|
||||
api_key: $OPENROUTER_API_KEY
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
|
||||
- name: gpt-5-responses
|
||||
@@ -136,12 +145,26 @@ That prompt is intended for coding agents. It tells the agent to clone the repo
|
||||
api_key: $OPENAI_API_KEY
|
||||
use_responses_api: true
|
||||
output_version: responses/v1
|
||||
|
||||
- name: qwen3-32b-vllm
|
||||
display_name: Qwen3 32B (vLLM)
|
||||
use: deerflow.models.vllm_provider:VllmChatModel
|
||||
model: Qwen/Qwen3-32B
|
||||
api_key: $VLLM_API_KEY
|
||||
base_url: http://localhost:8000/v1
|
||||
supports_thinking: true
|
||||
when_thinking_enabled:
|
||||
extra_body:
|
||||
chat_template_kwargs:
|
||||
enable_thinking: true
|
||||
```
|
||||
|
||||
OpenRouter and similar OpenAI-compatible gateways should be configured with `langchain_openai:ChatOpenAI` plus `base_url`. If you prefer a provider-specific environment variable name, point `api_key` at that variable explicitly (for example `api_key: $OPENROUTER_API_KEY`).
|
||||
|
||||
To route OpenAI models through `/v1/responses`, keep using `langchain_openai:ChatOpenAI` and set `use_responses_api: true` with `output_version: responses/v1`.
|
||||
|
||||
For vLLM 0.19.0, use `deerflow.models.vllm_provider:VllmChatModel`. For Qwen-style reasoning models, DeerFlow toggles reasoning with `extra_body.chat_template_kwargs.enable_thinking` and preserves vLLM's non-standard `reasoning` field across multi-turn tool-call conversations. Legacy `thinking` configs are normalized automatically for backward compatibility. Reasoning models may also require the server to be started with `--reasoning-parser ...`. If your local vLLM deployment accepts any non-empty API key, you can still set `VLLM_API_KEY` to a placeholder value.
|
||||
|
||||
CLI-backed provider examples:
|
||||
|
||||
```yaml
|
||||
@@ -162,50 +185,39 @@ That prompt is intended for coding agents. It tells the agent to clone the repo
|
||||
```
|
||||
|
||||
- Codex CLI reads `~/.codex/auth.json`
|
||||
- The Codex Responses endpoint currently rejects `max_tokens` and `max_output_tokens`, so `CodexChatModel` does not expose a request-level token cap
|
||||
- Claude Code accepts `CLAUDE_CODE_OAUTH_TOKEN`, `ANTHROPIC_AUTH_TOKEN`, `CLAUDE_CODE_OAUTH_TOKEN_FILE_DESCRIPTOR`, `CLAUDE_CODE_CREDENTIALS_PATH`, or plaintext `~/.claude/.credentials.json`
|
||||
- ACP agent entries are separate from model providers. If you configure `acp_agents.codex`, point it at a Codex ACP adapter such as `npx -y @zed-industries/codex-acp`; the standard `codex` CLI binary is not ACP-compatible by itself
|
||||
- On macOS, DeerFlow does not probe Keychain automatically. Export Claude Code auth explicitly if needed:
|
||||
- Claude Code accepts `CLAUDE_CODE_OAUTH_TOKEN`, `ANTHROPIC_AUTH_TOKEN`, `CLAUDE_CODE_CREDENTIALS_PATH`, or `~/.claude/.credentials.json`
|
||||
- ACP agent entries are separate from model providers — if you configure `acp_agents.codex`, point it at a Codex ACP adapter such as `npx -y @zed-industries/codex-acp`
|
||||
- On macOS, export Claude Code auth explicitly if needed:
|
||||
|
||||
```bash
|
||||
eval "$(python3 scripts/export_claude_code_oauth.py --print-export)"
|
||||
```
|
||||
|
||||
4. **Set API keys for your configured model(s)**
|
||||
|
||||
Choose one of the following methods:
|
||||
|
||||
- Option A: Edit the `.env` file in the project root (Recommended)
|
||||
|
||||
API keys can also be set manually in `.env` (recommended) or exported in your shell:
|
||||
|
||||
```bash
|
||||
TAVILY_API_KEY=your-tavily-api-key
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# OpenRouter also uses OPENAI_API_KEY when your config uses langchain_openai:ChatOpenAI + base_url.
|
||||
# Add other provider keys as needed
|
||||
INFOQUEST_API_KEY=your-infoquest-api-key
|
||||
TAVILY_API_KEY=your-tavily-api-key
|
||||
```
|
||||
|
||||
- Option B: Export environment variables in your shell
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY=your-openai-api-key
|
||||
```
|
||||
|
||||
For CLI-backed providers:
|
||||
- Codex CLI: `~/.codex/auth.json`
|
||||
- Claude Code OAuth: explicit env/file handoff or `~/.claude/.credentials.json`
|
||||
|
||||
- Option C: Edit `config.yaml` directly (Not recommended for production)
|
||||
|
||||
```yaml
|
||||
models:
|
||||
- name: gpt-4
|
||||
api_key: your-actual-api-key-here # Replace placeholder
|
||||
```
|
||||
</details>
|
||||
|
||||
### Running the Application
|
||||
|
||||
#### Deployment Sizing
|
||||
|
||||
Use the table below as a practical starting point when choosing how to run DeerFlow:
|
||||
|
||||
| Deployment target | Starting point | Recommended | Notes |
|
||||
|---------|-----------|------------|-------|
|
||||
| Local evaluation / `make dev` | 4 vCPU, 8 GB RAM, 20 GB free SSD | 8 vCPU, 16 GB RAM | Good for one developer or one light session with hosted model APIs. `2 vCPU / 4 GB` is usually not enough. |
|
||||
| Docker development / `make docker-start` | 4 vCPU, 8 GB RAM, 25 GB free SSD | 8 vCPU, 16 GB RAM | Image builds, bind mounts, and sandbox containers need more headroom than pure local dev. |
|
||||
| Long-running server / `make up` | 8 vCPU, 16 GB RAM, 40 GB free SSD | 16 vCPU, 32 GB RAM | Preferred for shared use, multi-agent runs, report generation, or heavier sandbox workloads. |
|
||||
|
||||
- These numbers cover DeerFlow itself. If you also host a local LLM, size that service separately.
|
||||
- Linux plus Docker is the recommended deployment target for a persistent server. macOS and Windows are best treated as development or evaluation environments.
|
||||
- If CPU or memory usage stays pinned, reduce concurrent runs first, then move to the next sizing tier.
|
||||
|
||||
#### Option 1: Docker (Recommended)
|
||||
|
||||
**Development** (hot-reload, source mounts):
|
||||
@@ -242,7 +254,8 @@ See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed Docker development guide.
|
||||
|
||||
If you prefer running services locally:
|
||||
|
||||
Prerequisite: complete the "Configuration" steps above first (`make config` and model API keys). `make dev` requires a valid configuration file (defaults to `config.yaml` in the project root; can be overridden via `DEER_FLOW_CONFIG_PATH`).
|
||||
Prerequisite: complete the "Configuration" steps above first (`make setup`). `make dev` requires a valid `config.yaml` in the project root (can be overridden via `DEER_FLOW_CONFIG_PATH`). Run `make doctor` to verify your setup before starting.
|
||||
On Windows, run the local development flow from Git Bash. Native `cmd.exe` and PowerShell shells are not supported for the bash-based service scripts, and WSL is not guaranteed because some scripts rely on Git for Windows utilities such as `cygpath`.
|
||||
|
||||
1. **Check prerequisites**:
|
||||
```bash
|
||||
@@ -251,7 +264,7 @@ Prerequisite: complete the "Configuration" steps above first (`make config` and
|
||||
|
||||
2. **Install dependencies**:
|
||||
```bash
|
||||
make install # Install backend + frontend dependencies
|
||||
make install # Install backend + frontend dependencies + pre-commit hooks
|
||||
```
|
||||
|
||||
3. **(Optional) Pre-pull sandbox image**:
|
||||
@@ -274,6 +287,60 @@ Prerequisite: complete the "Configuration" steps above first (`make config` and
|
||||
|
||||
6. **Access**: http://localhost:2026
|
||||
|
||||
#### Startup Modes
|
||||
|
||||
DeerFlow supports multiple startup modes across two dimensions:
|
||||
|
||||
- **Dev / Prod** — dev enables hot-reload; prod uses pre-built frontend
|
||||
- **Standard / Gateway** — standard uses a separate LangGraph server (4 processes); Gateway mode (experimental) embeds the agent runtime in the Gateway API (3 processes)
|
||||
|
||||
| | **Local Foreground** | **Local Daemon** | **Docker Dev** | **Docker Prod** |
|
||||
|---|---|---|---|---|
|
||||
| **Dev** | `./scripts/serve.sh --dev`<br/>`make dev` | `./scripts/serve.sh --dev --daemon`<br/>`make dev-daemon` | `./scripts/docker.sh start`<br/>`make docker-start` | — |
|
||||
| **Dev + Gateway** | `./scripts/serve.sh --dev --gateway`<br/>`make dev-pro` | `./scripts/serve.sh --dev --gateway --daemon`<br/>`make dev-daemon-pro` | `./scripts/docker.sh start --gateway`<br/>`make docker-start-pro` | — |
|
||||
| **Prod** | `./scripts/serve.sh --prod`<br/>`make start` | `./scripts/serve.sh --prod --daemon`<br/>`make start-daemon` | — | `./scripts/deploy.sh`<br/>`make up` |
|
||||
| **Prod + Gateway** | `./scripts/serve.sh --prod --gateway`<br/>`make start-pro` | `./scripts/serve.sh --prod --gateway --daemon`<br/>`make start-daemon-pro` | — | `./scripts/deploy.sh --gateway`<br/>`make up-pro` |
|
||||
|
||||
| Action | Local | Docker Dev | Docker Prod |
|
||||
|---|---|---|---|
|
||||
| **Stop** | `./scripts/serve.sh --stop`<br/>`make stop` | `./scripts/docker.sh stop`<br/>`make docker-stop` | `./scripts/deploy.sh down`<br/>`make down` |
|
||||
| **Restart** | `./scripts/serve.sh --restart [flags]` | `./scripts/docker.sh restart` | — |
|
||||
|
||||
> **Gateway mode** eliminates the LangGraph server process — the Gateway API handles agent execution directly via async tasks, managing its own concurrency.
|
||||
|
||||
#### Why Gateway Mode?
|
||||
|
||||
In standard mode, DeerFlow runs a dedicated [LangGraph Platform](https://langchain-ai.github.io/langgraph/) server alongside the Gateway API. This architecture works well but has trade-offs:
|
||||
|
||||
| | Standard Mode | Gateway Mode |
|
||||
|---|---|---|
|
||||
| **Architecture** | Gateway (REST API) + LangGraph (agent runtime) | Gateway embeds agent runtime |
|
||||
| **Concurrency** | `--n-jobs-per-worker` per worker (requires license) | `--workers` × async tasks (no per-worker cap) |
|
||||
| **Containers / Processes** | 4 (frontend, gateway, langgraph, nginx) | 3 (frontend, gateway, nginx) |
|
||||
| **Resource usage** | Higher (two Python runtimes) | Lower (single Python runtime) |
|
||||
| **LangGraph Platform license** | Required for production images | Not required |
|
||||
| **Cold start** | Slower (two services to initialize) | Faster |
|
||||
|
||||
Both modes are functionally equivalent — the same agents, tools, and skills work in either mode.
|
||||
|
||||
#### Docker Production Deployment
|
||||
|
||||
`deploy.sh` supports building and starting separately. Images are mode-agnostic — runtime mode is selected at start time:
|
||||
|
||||
```bash
|
||||
# One-step (build + start)
|
||||
deploy.sh # standard mode (default)
|
||||
deploy.sh --gateway # gateway mode
|
||||
|
||||
# Two-step (build once, start with any mode)
|
||||
deploy.sh build # build all images
|
||||
deploy.sh start # start in standard mode
|
||||
deploy.sh start --gateway # start in gateway mode
|
||||
|
||||
# Stop
|
||||
deploy.sh down
|
||||
```
|
||||
|
||||
### Advanced
|
||||
#### Sandbox Mode
|
||||
|
||||
@@ -301,6 +368,8 @@ DeerFlow supports receiving tasks from messaging apps. Channels auto-start when
|
||||
| Telegram | Bot API (long-polling) | Easy |
|
||||
| Slack | Socket Mode | Moderate |
|
||||
| Feishu / Lark | WebSocket | Moderate |
|
||||
| WeChat | Tencent iLink (long-polling) | Moderate |
|
||||
| WeCom | WebSocket | Moderate |
|
||||
|
||||
**Configuration in `config.yaml`:**
|
||||
|
||||
@@ -328,6 +397,11 @@ channels:
|
||||
# domain: https://open.feishu.cn # China (default)
|
||||
# domain: https://open.larksuite.com # International
|
||||
|
||||
wecom:
|
||||
enabled: true
|
||||
bot_id: $WECOM_BOT_ID
|
||||
bot_secret: $WECOM_BOT_SECRET
|
||||
|
||||
slack:
|
||||
enabled: true
|
||||
bot_token: $SLACK_BOT_TOKEN # xoxb-...
|
||||
@@ -339,6 +413,19 @@ channels:
|
||||
bot_token: $TELEGRAM_BOT_TOKEN
|
||||
allowed_users: [] # empty = allow all
|
||||
|
||||
wechat:
|
||||
enabled: false
|
||||
bot_token: $WECHAT_BOT_TOKEN
|
||||
ilink_bot_id: $WECHAT_ILINK_BOT_ID
|
||||
qrcode_login_enabled: true # optional: allow first-time QR bootstrap when bot_token is absent
|
||||
allowed_users: [] # empty = allow all
|
||||
polling_timeout: 35
|
||||
state_dir: ./.deer-flow/wechat/state
|
||||
max_inbound_image_bytes: 20971520
|
||||
max_outbound_image_bytes: 20971520
|
||||
max_inbound_file_bytes: 52428800
|
||||
max_outbound_file_bytes: 52428800
|
||||
|
||||
# Optional: per-channel / per-user session settings
|
||||
session:
|
||||
assistant_id: mobile-agent # custom agent names are also supported here
|
||||
@@ -371,6 +458,14 @@ SLACK_APP_TOKEN=xapp-...
|
||||
# Feishu / Lark
|
||||
FEISHU_APP_ID=cli_xxxx
|
||||
FEISHU_APP_SECRET=your_app_secret
|
||||
|
||||
# WeChat iLink
|
||||
WECHAT_BOT_TOKEN=your_ilink_bot_token
|
||||
WECHAT_ILINK_BOT_ID=your_ilink_bot_id
|
||||
|
||||
# WeCom
|
||||
WECOM_BOT_ID=your_bot_id
|
||||
WECOM_BOT_SECRET=your_bot_secret
|
||||
```
|
||||
|
||||
**Telegram Setup**
|
||||
@@ -393,6 +488,22 @@ FEISHU_APP_SECRET=your_app_secret
|
||||
3. Under **Events**, subscribe to `im.message.receive_v1` and select **Long Connection** mode.
|
||||
4. Copy the App ID and App Secret. Set `FEISHU_APP_ID` and `FEISHU_APP_SECRET` in `.env` and enable the channel in `config.yaml`.
|
||||
|
||||
**WeChat Setup**
|
||||
|
||||
1. Enable the `wechat` channel in `config.yaml`.
|
||||
2. Either set `WECHAT_BOT_TOKEN` in `.env`, or set `qrcode_login_enabled: true` for first-time QR bootstrap.
|
||||
3. When `bot_token` is absent and QR bootstrap is enabled, watch backend logs for the QR content returned by iLink and complete the binding flow.
|
||||
4. After the QR flow succeeds, DeerFlow persists the acquired token under `state_dir` for later restarts.
|
||||
5. For Docker Compose deployments, keep `state_dir` on a persistent volume so the `get_updates_buf` cursor and saved auth state survive restarts.
|
||||
|
||||
**WeCom Setup**
|
||||
|
||||
1. Create a bot on the WeCom AI Bot platform and obtain the `bot_id` and `bot_secret`.
|
||||
2. Enable `channels.wecom` in `config.yaml` and fill in `bot_id` / `bot_secret`.
|
||||
3. Set `WECOM_BOT_ID` and `WECOM_BOT_SECRET` in `.env`.
|
||||
4. Make sure backend dependencies include `wecom-aibot-python-sdk`. The channel uses a WebSocket long connection and does not require a public callback URL.
|
||||
5. The current integration supports inbound text, image, and file messages. Final images/files generated by the agent are also sent back to the WeCom conversation.
|
||||
|
||||
When DeerFlow runs in Docker Compose, IM channels execute inside the `gateway` container. In that case, do not point `channels.langgraph_url` or `channels.gateway_url` at `localhost`; use container service names such as `http://langgraph:2024` and `http://gateway:8001`, or set `DEER_FLOW_CHANNELS_LANGGRAPH_URL` and `DEER_FLOW_CHANNELS_GATEWAY_URL`.
|
||||
|
||||
**Commands**
|
||||
@@ -422,6 +533,27 @@ LANGSMITH_API_KEY=lsv2_pt_xxxxxxxxxxxxxxxx
|
||||
LANGSMITH_PROJECT=xxx
|
||||
```
|
||||
|
||||
#### Langfuse Tracing
|
||||
|
||||
DeerFlow also supports [Langfuse](https://langfuse.com) observability for LangChain-compatible runs.
|
||||
|
||||
Add the following to your `.env` file:
|
||||
|
||||
```bash
|
||||
LANGFUSE_TRACING=true
|
||||
LANGFUSE_PUBLIC_KEY=pk-lf-xxxxxxxxxxxxxxxx
|
||||
LANGFUSE_SECRET_KEY=sk-lf-xxxxxxxxxxxxxxxx
|
||||
LANGFUSE_BASE_URL=https://cloud.langfuse.com
|
||||
```
|
||||
|
||||
If you are using a self-hosted Langfuse instance, set `LANGFUSE_BASE_URL` to your deployment URL.
|
||||
|
||||
#### Using Both Providers
|
||||
|
||||
If both LangSmith and Langfuse are enabled, DeerFlow attaches both tracing callbacks and reports the same model activity to both systems.
|
||||
|
||||
If a provider is explicitly enabled but missing required credentials, or if its callback fails to initialize, DeerFlow fails fast when tracing is initialized during model creation and the error message names the provider that caused the failure.
|
||||
|
||||
For Docker deployments, tracing is disabled by default. Set `LANGSMITH_TRACING=true` and `LANGSMITH_API_KEY` in your `.env` to enable it.
|
||||
|
||||
## From Deep Research to Super Agent Harness
|
||||
@@ -526,6 +658,8 @@ This is the difference between a chatbot with tool access and an agent with an a
|
||||
|
||||
**Summarization**: Within a session, DeerFlow manages context aggressively — summarizing completed sub-tasks, offloading intermediate results to the filesystem, compressing what's no longer immediately relevant. This lets it stay sharp across long, multi-step tasks without blowing the context window.
|
||||
|
||||
**Strict Tool-Call Recovery**: When a provider or middleware interrupts a tool-call loop, DeerFlow now strips provider-level raw tool-call metadata on forced-stop assistant messages and injects placeholder tool results for dangling calls before the next model invocation. This keeps OpenAI-compatible reasoning models that strictly validate `tool_call_id` sequences from failing with malformed history errors.
|
||||
|
||||
### Long-Term Memory
|
||||
|
||||
Most agents forget everything the moment a conversation ends. DeerFlow remembers.
|
||||
|
||||
@@ -40,6 +40,7 @@ https://github.com/user-attachments/assets/a8bcadc4-e040-4cf2-8fda-dd768b999c18
|
||||
- [快速开始](#快速开始)
|
||||
- [配置](#配置)
|
||||
- [运行应用](#运行应用)
|
||||
- [部署建议与资源规划](#部署建议与资源规划)
|
||||
- [方式一:Docker(推荐)](#方式一docker推荐)
|
||||
- [方式二:本地开发](#方式二本地开发)
|
||||
- [进阶配置](#进阶配置)
|
||||
@@ -150,6 +151,20 @@ https://github.com/user-attachments/assets/a8bcadc4-e040-4cf2-8fda-dd768b999c18
|
||||
|
||||
### 运行应用
|
||||
|
||||
#### 部署建议与资源规划
|
||||
|
||||
可以先按下面的资源档位来选择 DeerFlow 的运行方式:
|
||||
|
||||
| 部署场景 | 起步配置 | 推荐配置 | 说明 |
|
||||
|---------|-----------|------------|-------|
|
||||
| 本地体验 / `make dev` | 4 vCPU、8 GB 内存、20 GB SSD 可用空间 | 8 vCPU、16 GB 内存 | 适合单个开发者或单个轻量会话,且模型走外部 API。`2 核 / 4 GB` 通常跑不稳。 |
|
||||
| Docker 开发 / `make docker-start` | 4 vCPU、8 GB 内存、25 GB SSD 可用空间 | 8 vCPU、16 GB 内存 | 镜像构建、源码挂载和 sandbox 容器都会比纯本地模式更吃资源。 |
|
||||
| 长期运行服务 / `make up` | 8 vCPU、16 GB 内存、40 GB SSD 可用空间 | 16 vCPU、32 GB 内存 | 更适合共享环境、多 agent 任务、报告生成或更重的 sandbox 负载。 |
|
||||
|
||||
- 上面的配置只覆盖 DeerFlow 本身;如果你还要本机部署本地大模型,请单独为模型服务预留资源。
|
||||
- 持续运行的服务更推荐使用 Linux + Docker。macOS 和 Windows 更适合作为开发机或体验环境。
|
||||
- 如果 CPU 或内存长期打满,先降低并发会话或重任务数量,再考虑升级到更高一档配置。
|
||||
|
||||
#### 方式一:Docker(推荐)
|
||||
|
||||
**开发模式**(支持热更新,挂载源码):
|
||||
@@ -180,6 +195,7 @@ make down # 停止并移除容器
|
||||
如果你更希望直接在本地启动各个服务:
|
||||
|
||||
前提:先完成上面的“配置”步骤(`make config` 和模型 API key 配置)。`make dev` 需要有效配置文件,默认读取项目根目录下的 `config.yaml`,也可以通过 `DEER_FLOW_CONFIG_PATH` 覆盖。
|
||||
在 Windows 上,请使用 Git Bash 运行本地开发流程。基于 bash 的服务脚本不支持直接在原生 `cmd.exe` 或 PowerShell 中执行,且 WSL 也不保证可用,因为部分脚本依赖 Git for Windows 的 `cygpath` 等工具。
|
||||
|
||||
1. **检查依赖环境**:
|
||||
```bash
|
||||
@@ -231,6 +247,7 @@ DeerFlow 支持从即时通讯应用接收任务。只要配置完成,对应
|
||||
| Telegram | Bot API(long-polling) | 简单 |
|
||||
| Slack | Socket Mode | 中等 |
|
||||
| Feishu / Lark | WebSocket | 中等 |
|
||||
| 企业微信智能机器人 | WebSocket | 中等 |
|
||||
|
||||
**`config.yaml` 中的配置示例:**
|
||||
|
||||
@@ -258,6 +275,11 @@ channels:
|
||||
# domain: https://open.feishu.cn # 国内版(默认)
|
||||
# domain: https://open.larksuite.com # 国际版
|
||||
|
||||
wecom:
|
||||
enabled: true
|
||||
bot_id: $WECOM_BOT_ID
|
||||
bot_secret: $WECOM_BOT_SECRET
|
||||
|
||||
slack:
|
||||
enabled: true
|
||||
bot_token: $SLACK_BOT_TOKEN # xoxb-...
|
||||
@@ -301,6 +323,10 @@ SLACK_APP_TOKEN=xapp-...
|
||||
# Feishu / Lark
|
||||
FEISHU_APP_ID=cli_xxxx
|
||||
FEISHU_APP_SECRET=your_app_secret
|
||||
|
||||
# 企业微信智能机器人
|
||||
WECOM_BOT_ID=your_bot_id
|
||||
WECOM_BOT_SECRET=your_bot_secret
|
||||
```
|
||||
|
||||
**Telegram 配置**
|
||||
@@ -323,6 +349,14 @@ FEISHU_APP_SECRET=your_app_secret
|
||||
3. 在 **事件订阅** 中订阅 `im.message.receive_v1`,连接方式选择 **长连接**。
|
||||
4. 复制 App ID 和 App Secret,在 `.env` 中设置 `FEISHU_APP_ID` 和 `FEISHU_APP_SECRET`,并在 `config.yaml` 中启用该渠道。
|
||||
|
||||
**企业微信智能机器人配置**
|
||||
|
||||
1. 在企业微信智能机器人平台创建机器人,获取 `bot_id` 和 `bot_secret`。
|
||||
2. 在 `config.yaml` 中启用 `channels.wecom`,并填入 `bot_id` / `bot_secret`。
|
||||
3. 在 `.env` 中设置 `WECOM_BOT_ID` 和 `WECOM_BOT_SECRET`。
|
||||
4. 安装后端依赖时确保包含 `wecom-aibot-python-sdk`,渠道会通过 WebSocket 长连接接收消息,无需公网回调地址。
|
||||
5. 当前支持文本、图片和文件入站消息;agent 生成的最终图片/文件也会回传到企业微信会话中。
|
||||
|
||||
**命令**
|
||||
|
||||
渠道连接完成后,你可以直接在聊天窗口里和 DeerFlow 交互:
|
||||
|
||||
+87
-27
@@ -13,6 +13,10 @@ DeerFlow is a LangGraph-based AI super agent system with a full-stack architectu
|
||||
- **Nginx** (port 2026): Unified reverse proxy entry point
|
||||
- **Provisioner** (port 8002, optional in Docker dev): Started only when sandbox is configured for provisioner/Kubernetes mode
|
||||
|
||||
**Runtime Modes**:
|
||||
- **Standard mode** (`make dev`): LangGraph Server handles agent execution as a separate process. 4 processes total.
|
||||
- **Gateway mode** (`make dev-pro`, experimental): Agent runtime embedded in Gateway via `RunManager` + `run_agent()` + `StreamBridge` (`packages/harness/deerflow/runtime/`). Service manages its own concurrency via async tasks. 3 processes total, no LangGraph Server.
|
||||
|
||||
**Project Structure**:
|
||||
```
|
||||
deer-flow/
|
||||
@@ -80,6 +84,8 @@ When making code changes, you MUST update the relevant documentation:
|
||||
make check # Check system requirements
|
||||
make install # Install all dependencies (frontend + backend)
|
||||
make dev # Start all services (LangGraph + Gateway + Frontend + Nginx), with config.yaml preflight
|
||||
make dev-pro # Gateway mode (experimental): skip LangGraph, agent runtime embedded in Gateway
|
||||
make start-pro # Production + Gateway mode (experimental)
|
||||
make stop # Stop all services
|
||||
```
|
||||
|
||||
@@ -124,7 +130,7 @@ from app.gateway.app import app
|
||||
from app.channels.service import start_channel_service
|
||||
|
||||
# App → Harness (allowed)
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
# Harness → App (FORBIDDEN — enforced by test_harness_boundary.py)
|
||||
# from app.gateway.routers.uploads import ... # ← will fail CI
|
||||
@@ -150,20 +156,26 @@ from deerflow.config import get_app_config
|
||||
|
||||
### Middleware Chain
|
||||
|
||||
Middlewares execute in strict order in `packages/harness/deerflow/agents/lead_agent/agent.py`:
|
||||
Lead-agent middlewares are assembled in strict append order across `packages/harness/deerflow/agents/middlewares/tool_error_handling_middleware.py` (`build_lead_runtime_middlewares`) and `packages/harness/deerflow/agents/lead_agent/agent.py` (`_build_middlewares`):
|
||||
|
||||
1. **ThreadDataMiddleware** - Creates per-thread directories (`backend/.deer-flow/threads/{thread_id}/user-data/{workspace,uploads,outputs}`); Web UI thread deletion now follows LangGraph thread removal with Gateway cleanup of the local `.deer-flow/threads/{thread_id}` directory
|
||||
1. **ThreadDataMiddleware** - Creates per-thread directories under the user's isolation scope (`backend/.deer-flow/users/{user_id}/threads/{thread_id}/user-data/{workspace,uploads,outputs}`); resolves `user_id` via `get_effective_user_id()` (falls back to `"default"` in no-auth mode); Web UI thread deletion now follows LangGraph thread removal with Gateway cleanup of the local thread directory
|
||||
2. **UploadsMiddleware** - Tracks and injects newly uploaded files into conversation
|
||||
3. **SandboxMiddleware** - Acquires sandbox, stores `sandbox_id` in state
|
||||
4. **DanglingToolCallMiddleware** - Injects placeholder ToolMessages for AIMessage tool_calls that lack responses (e.g., due to user interruption)
|
||||
5. **GuardrailMiddleware** - Pre-tool-call authorization via pluggable `GuardrailProvider` protocol (optional, if `guardrails.enabled` in config). Evaluates each tool call and returns error ToolMessage on deny. Three provider options: built-in `AllowlistProvider` (zero deps), OAP policy providers (e.g. `aport-agent-guardrails`), or custom providers. See [docs/GUARDRAILS.md](docs/GUARDRAILS.md) for setup, usage, and how to implement a provider.
|
||||
6. **SummarizationMiddleware** - Context reduction when approaching token limits (optional, if enabled)
|
||||
7. **TodoListMiddleware** - Task tracking with `write_todos` tool (optional, if plan_mode)
|
||||
8. **TitleMiddleware** - Auto-generates thread title after first complete exchange and normalizes structured message content before prompting the title model
|
||||
9. **MemoryMiddleware** - Queues conversations for async memory update (filters to user + final AI responses)
|
||||
10. **ViewImageMiddleware** - Injects base64 image data before LLM call (conditional on vision support)
|
||||
11. **SubagentLimitMiddleware** - Truncates excess `task` tool calls from model response to enforce `MAX_CONCURRENT_SUBAGENTS` limit (optional, if subagent_enabled)
|
||||
12. **ClarificationMiddleware** - Intercepts `ask_clarification` tool calls, interrupts via `Command(goto=END)` (must be last)
|
||||
4. **DanglingToolCallMiddleware** - Injects placeholder ToolMessages for AIMessage tool_calls that lack responses (e.g., due to user interruption), including raw provider tool-call payloads preserved only in `additional_kwargs["tool_calls"]`
|
||||
5. **LLMErrorHandlingMiddleware** - Normalizes provider/model invocation failures into recoverable assistant-facing errors before later middleware/tool stages run
|
||||
6. **GuardrailMiddleware** - Pre-tool-call authorization via pluggable `GuardrailProvider` protocol (optional, if `guardrails.enabled` in config). Evaluates each tool call and returns error ToolMessage on deny. Three provider options: built-in `AllowlistProvider` (zero deps), OAP policy providers (e.g. `aport-agent-guardrails`), or custom providers. See [docs/GUARDRAILS.md](docs/GUARDRAILS.md) for setup, usage, and how to implement a provider.
|
||||
7. **SandboxAuditMiddleware** - Audits sandboxed shell/file operations for security logging before tool execution continues
|
||||
8. **ToolErrorHandlingMiddleware** - Converts tool exceptions into error `ToolMessage`s so the run can continue instead of aborting
|
||||
9. **SummarizationMiddleware** - Context reduction when approaching token limits (optional, if enabled)
|
||||
10. **TodoListMiddleware** - Task tracking with `write_todos` tool (optional, if plan_mode)
|
||||
11. **TokenUsageMiddleware** - Records token usage metrics when token tracking is enabled (optional)
|
||||
12. **TitleMiddleware** - Auto-generates thread title after first complete exchange and normalizes structured message content before prompting the title model
|
||||
13. **MemoryMiddleware** - Queues conversations for async memory update (filters to user + final AI responses)
|
||||
14. **ViewImageMiddleware** - Injects base64 image data before LLM call (conditional on vision support)
|
||||
15. **DeferredToolFilterMiddleware** - Hides deferred tool schemas from the bound model until tool search is enabled (optional)
|
||||
16. **SubagentLimitMiddleware** - Truncates excess `task` tool calls from model response to enforce `MAX_CONCURRENT_SUBAGENTS` limit (optional, if `subagent_enabled`)
|
||||
17. **LoopDetectionMiddleware** - Detects repeated tool-call loops; hard-stop responses clear both structured `tool_calls` and raw provider tool-call metadata before forcing a final text answer
|
||||
18. **ClarificationMiddleware** - Intercepts `ask_clarification` tool calls, interrupts via `Command(goto=END)` (must be last)
|
||||
|
||||
### Configuration System
|
||||
|
||||
@@ -173,7 +185,16 @@ Setup: Copy `config.example.yaml` to `config.yaml` in the **project root** direc
|
||||
|
||||
**Config Versioning**: `config.example.yaml` has a `config_version` field. On startup, `AppConfig.from_file()` compares user version vs example version and emits a warning if outdated. Missing `config_version` = version 0. Run `make config-upgrade` to auto-merge missing fields. When changing the config schema, bump `config_version` in `config.example.yaml`.
|
||||
|
||||
**Config Caching**: `get_app_config()` caches the parsed config, but automatically reloads it when the resolved config path changes or the file's mtime increases. This keeps Gateway and LangGraph reads aligned with `config.yaml` edits without requiring a manual process restart.
|
||||
**Config Lifecycle**: All config models are `frozen=True` (immutable after construction). `AppConfig.from_file()` is a pure function — no side effects, no process-global state. The resolved `AppConfig` is passed as an explicit parameter down every consumer lane:
|
||||
|
||||
- **Gateway**: `app.state.config` populated in lifespan; routers receive it via `Depends(get_config)` from `app/gateway/deps.py`.
|
||||
- **Client**: `DeerFlowClient._app_config` captured in the constructor; every method reads `self._app_config`.
|
||||
- **Agent run**: wrapped in `DeerFlowContext(app_config=…)` and injected via LangGraph `Runtime[DeerFlowContext].context`. Middleware and tools read `runtime.context.app_config` directly or via `resolve_context(runtime)`.
|
||||
- **LangGraph Server bootstrap**: `make_lead_agent` (registered in `langgraph.json`) calls `AppConfig.from_file()` itself — the only place in production that loads from disk at agent-build time.
|
||||
|
||||
To update config at runtime (Gateway API mutations for MCP/Skills), write the new file and call `AppConfig.from_file()` to build a fresh snapshot, then swap `app.state.config`. No mtime detection, no auto-reload, no ambient ContextVar lookup (`AppConfig.current()` has been removed).
|
||||
|
||||
**DeerFlowContext**: Per-invocation typed context for the agent execution path, injected via LangGraph `Runtime[DeerFlowContext]`. Holds `app_config: AppConfig`, `thread_id: str`, `agent_name: str | None`. Gateway runtime and `DeerFlowClient` construct full `DeerFlowContext` at invoke time; the LangGraph Server boundary builds one inside `make_lead_agent`. Middleware and tools access context through `resolve_context(runtime)` which returns the typed `DeerFlowContext` — legacy dict/None shapes are rejected. Mutable runtime state (`sandbox_id`) flows through `ThreadState.sandbox`, not context.
|
||||
|
||||
Configuration priority:
|
||||
1. Explicit `config_path` argument
|
||||
@@ -210,6 +231,9 @@ FastAPI application on port 8001 with health check at `GET /health`.
|
||||
| **Threads** (`/api/threads/{id}`) | `DELETE /` - remove DeerFlow-managed local thread data after LangGraph thread deletion; unexpected failures are logged server-side and return a generic 500 detail |
|
||||
| **Artifacts** (`/api/threads/{id}/artifacts`) | `GET /{path}` - serve artifacts; active content types (`text/html`, `application/xhtml+xml`, `image/svg+xml`) are always forced as download attachments to reduce XSS risk; `?download=true` still forces download for other file types |
|
||||
| **Suggestions** (`/api/threads/{id}/suggestions`) | `POST /` - generate follow-up questions; rich list/block model content is normalized before JSON parsing |
|
||||
| **Thread Runs** (`/api/threads/{id}/runs`) | `POST /` - create background run; `POST /stream` - create + SSE stream; `POST /wait` - create + block; `GET /` - list runs; `GET /{rid}` - run details; `POST /{rid}/cancel` - cancel; `GET /{rid}/join` - join SSE; `GET /{rid}/messages` - paginated messages `{data, has_more}`; `GET /{rid}/events` - full event stream; `GET /../messages` - thread messages with feedback; `GET /../token-usage` - aggregate tokens |
|
||||
| **Feedback** (`/api/threads/{id}/runs/{rid}/feedback`) | `PUT /` - upsert feedback; `DELETE /` - delete user feedback; `POST /` - create feedback; `GET /` - list feedback; `GET /stats` - aggregate stats; `DELETE /{fid}` - delete specific |
|
||||
| **Runs** (`/api/runs`) | `POST /stream` - stateless run + SSE; `POST /wait` - stateless run + block; `GET /{rid}/messages` - paginated messages by run_id `{data, has_more}` (cursor: `after_seq`/`before_seq`); `GET /{rid}/feedback` - list feedback by run_id |
|
||||
|
||||
Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` → Gateway.
|
||||
|
||||
@@ -223,7 +247,7 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
|
||||
**Virtual Path System**:
|
||||
- Agent sees: `/mnt/user-data/{workspace,uploads,outputs}`, `/mnt/skills`
|
||||
- Physical: `backend/.deer-flow/threads/{thread_id}/user-data/...`, `deer-flow/skills/`
|
||||
- Physical: `backend/.deer-flow/users/{user_id}/threads/{thread_id}/user-data/...`, `deer-flow/skills/`
|
||||
- Translation: `replace_virtual_path()` / `replace_virtual_paths_in_command()`
|
||||
- Detection: `is_local_sandbox()` checks `sandbox_id == "local"`
|
||||
|
||||
@@ -232,7 +256,7 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
- `ls` - Directory listing (tree format, max 2 levels)
|
||||
- `read_file` - Read file contents with optional line range
|
||||
- `write_file` - Write/append to files, creates directories
|
||||
- `str_replace` - Substring replacement (single or all occurrences)
|
||||
- `str_replace` - Substring replacement (single or all occurrences); same-path serialization is scoped to `(sandbox.id, path)` so isolated sandboxes do not contend on identical virtual paths inside one process
|
||||
|
||||
### Subagent System (`packages/harness/deerflow/subagents/`)
|
||||
|
||||
@@ -263,7 +287,7 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
- `invoke_acp_agent` - Invokes external ACP-compatible agents from `config.yaml`
|
||||
- ACP launchers must be real ACP adapters. The standard `codex` CLI is not ACP-compatible by itself; configure a wrapper such as `npx -y @zed-industries/codex-acp` or an installed `codex-acp` binary
|
||||
- Missing ACP executables now return an actionable error message instead of a raw `[Errno 2]`
|
||||
- Each ACP agent uses a per-thread workspace at `{base_dir}/threads/{thread_id}/acp-workspace/`. The workspace is accessible to the lead agent via the virtual path `/mnt/acp-workspace/` (read-only). In docker sandbox mode, the directory is volume-mounted into the container at `/mnt/acp-workspace` (read-only); in local sandbox mode, path translation is handled by `tools.py`
|
||||
- Each ACP agent uses a per-thread workspace at `{base_dir}/users/{user_id}/threads/{thread_id}/acp-workspace/`. The workspace is accessible to the lead agent via the virtual path `/mnt/acp-workspace/` (read-only). In docker sandbox mode, the directory is volume-mounted into the container at `/mnt/acp-workspace` (read-only); in local sandbox mode, path translation is handled by `tools.py`
|
||||
- `image_search/` - Image search via DuckDuckGo
|
||||
|
||||
### MCP System (`packages/harness/deerflow/mcp/`)
|
||||
@@ -287,10 +311,17 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
|
||||
- `create_chat_model(name, thinking_enabled)` instantiates LLM from config via reflection
|
||||
- Supports `thinking_enabled` flag with per-model `when_thinking_enabled` overrides
|
||||
- Supports vLLM-style thinking toggles via `when_thinking_enabled.extra_body.chat_template_kwargs.enable_thinking` for Qwen reasoning models, while normalizing legacy `thinking` configs for backward compatibility
|
||||
- Supports `supports_vision` flag for image understanding models
|
||||
- Config values starting with `$` resolved as environment variables
|
||||
- Missing provider modules surface actionable install hints from reflection resolvers (for example `uv add langchain-google-genai`)
|
||||
|
||||
### vLLM Provider (`packages/harness/deerflow/models/vllm_provider.py`)
|
||||
|
||||
- `VllmChatModel` subclasses `langchain_openai:ChatOpenAI` for vLLM 0.19.0 OpenAI-compatible endpoints
|
||||
- Preserves vLLM's non-standard assistant `reasoning` field on full responses, streaming deltas, and follow-up tool-call turns
|
||||
- Designed for configs that enable thinking through `extra_body.chat_template_kwargs.enable_thinking` on vLLM 0.19.0 Qwen reasoning models, while accepting the older `thinking` alias
|
||||
|
||||
### IM Channels System (`app/channels/`)
|
||||
|
||||
Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow agent via the LangGraph Server.
|
||||
@@ -325,18 +356,27 @@ Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow a
|
||||
|
||||
**Components**:
|
||||
- `updater.py` - LLM-based memory updates with fact extraction, whitespace-normalized fact deduplication (trims leading/trailing whitespace before comparing), and atomic file I/O
|
||||
- `queue.py` - Debounced update queue (per-thread deduplication, configurable wait time)
|
||||
- `queue.py` - Debounced update queue (per-thread deduplication, configurable wait time); captures `user_id` at enqueue time so it survives the `threading.Timer` boundary
|
||||
- `prompt.py` - Prompt templates for memory updates
|
||||
- `storage.py` - File-based storage with per-user isolation; cache keyed by `(user_id, agent_name)` tuple
|
||||
|
||||
**Data Structure** (stored in `backend/.deer-flow/memory.json`):
|
||||
**Per-User Isolation**:
|
||||
- Memory is stored per-user at `{base_dir}/users/{user_id}/memory.json`
|
||||
- Per-agent per-user memory at `{base_dir}/users/{user_id}/agents/{agent_name}/memory.json`
|
||||
- `user_id` is resolved via `get_effective_user_id()` from `deerflow.runtime.user_context`
|
||||
- In no-auth mode, `user_id` defaults to `"default"` (constant `DEFAULT_USER_ID`)
|
||||
- Absolute `storage_path` in config opts out of per-user isolation
|
||||
- **Migration**: Run `PYTHONPATH=. python scripts/migrate_user_isolation.py` to move legacy `memory.json` and `threads/` into per-user layout; supports `--dry-run`
|
||||
|
||||
**Data Structure** (stored in `{base_dir}/users/{user_id}/memory.json`):
|
||||
- **User Context**: `workContext`, `personalContext`, `topOfMind` (1-3 sentence summaries)
|
||||
- **History**: `recentMonths`, `earlierContext`, `longTermBackground`
|
||||
- **Facts**: Discrete facts with `id`, `content`, `category` (preference/knowledge/context/behavior/goal), `confidence` (0-1), `createdAt`, `source`
|
||||
|
||||
**Workflow**:
|
||||
1. `MemoryMiddleware` filters messages (user inputs + final AI responses) and queues conversation
|
||||
1. `MemoryMiddleware` filters messages (user inputs + final AI responses), captures `user_id` via `get_effective_user_id()`, and queues conversation with the captured `user_id`
|
||||
2. Queue debounces (30s default), batches updates, deduplicates per-thread
|
||||
3. Background thread invokes LLM to extract context updates and facts
|
||||
3. Background thread invokes LLM to extract context updates and facts, using the stored `user_id` (not the contextvar, which is unavailable on timer threads)
|
||||
4. Applies updates atomically (temp file + rename) with cache invalidation, skipping duplicate fact content before append
|
||||
5. Next interaction injects top 15 facts + context into `<memory>` tags in system prompt
|
||||
|
||||
@@ -344,7 +384,7 @@ Focused regression coverage for the updater lives in `backend/tests/test_memory_
|
||||
|
||||
**Configuration** (`config.yaml` → `memory`):
|
||||
- `enabled` / `injection_enabled` - Master switches
|
||||
- `storage_path` - Path to memory.json
|
||||
- `storage_path` - Path to memory.json (absolute path opts out of per-user isolation)
|
||||
- `debounce_seconds` - Wait time before processing (default: 30)
|
||||
- `model_name` - LLM for updates (null = default model)
|
||||
- `max_facts` / `fact_confidence_threshold` - Fact storage limits (100 / 0.7)
|
||||
@@ -359,6 +399,7 @@ Focused regression coverage for the updater lives in `backend/tests/test_memory_
|
||||
|
||||
**`config.yaml`** key sections:
|
||||
- `models[]` - LLM configs with `use` class path, `supports_thinking`, `supports_vision`, provider-specific fields
|
||||
- vLLM reasoning models should use `deerflow.models.vllm_provider:VllmChatModel`; for Qwen-style parsers prefer `when_thinking_enabled.extra_body.chat_template_kwargs.enable_thinking`, and DeerFlow will also normalize the older `thinking` alias
|
||||
- `tools[]` - Tool configs with `use` variable path and `group`
|
||||
- `tool_groups[]` - Logical groupings for tools
|
||||
- `sandbox.use` - Sandbox provider class path
|
||||
@@ -381,14 +422,16 @@ Both can be modified at runtime via Gateway API endpoints or `DeerFlowClient` me
|
||||
**Architecture**: Imports the same `deerflow` modules that LangGraph Server and Gateway API use. Shares the same config files and data directories. No FastAPI dependency.
|
||||
|
||||
**Agent Conversation** (replaces LangGraph Server):
|
||||
- `chat(message, thread_id)` — synchronous, returns final text
|
||||
- `stream(message, thread_id)` — yields `StreamEvent` aligned with LangGraph SSE protocol:
|
||||
- `"values"` — full state snapshot (title, messages, artifacts)
|
||||
- `"messages-tuple"` — per-message update (AI text, tool calls, tool results)
|
||||
- `"end"` — stream finished
|
||||
- `chat(message, thread_id)` — synchronous, accumulates streaming deltas per message-id and returns the final AI text
|
||||
- `stream(message, thread_id)` — subscribes to LangGraph `stream_mode=["values", "messages", "custom"]` and yields `StreamEvent`:
|
||||
- `"values"` — full state snapshot (title, messages, artifacts); AI text already delivered via `messages` mode is **not** re-synthesized here to avoid duplicate deliveries
|
||||
- `"messages-tuple"` — per-chunk update: for AI text this is a **delta** (concat per `id` to rebuild the full message); tool calls and tool results are emitted once each
|
||||
- `"custom"` — forwarded from `StreamWriter`
|
||||
- `"end"` — stream finished (carries cumulative `usage` counted once per message id)
|
||||
- Agent created lazily via `create_agent()` + `_build_middlewares()`, same as `make_lead_agent`
|
||||
- Supports `checkpointer` parameter for state persistence across turns
|
||||
- `reset_agent()` forces agent recreation (e.g. after memory or skill changes)
|
||||
- See [docs/STREAMING.md](docs/STREAMING.md) for the full design: why Gateway and DeerFlowClient are parallel paths, LangGraph's `stream_mode` semantics, the per-id dedup invariants, and regression testing strategy
|
||||
|
||||
**Gateway Equivalent Methods** (replaces Gateway API):
|
||||
|
||||
@@ -436,8 +479,25 @@ make dev
|
||||
|
||||
This starts all services and makes the application available at `http://localhost:2026`.
|
||||
|
||||
**All startup modes:**
|
||||
|
||||
| | **Local Foreground** | **Local Daemon** | **Docker Dev** | **Docker Prod** |
|
||||
|---|---|---|---|---|
|
||||
| **Dev** | `./scripts/serve.sh --dev`<br/>`make dev` | `./scripts/serve.sh --dev --daemon`<br/>`make dev-daemon` | `./scripts/docker.sh start`<br/>`make docker-start` | — |
|
||||
| **Dev + Gateway** | `./scripts/serve.sh --dev --gateway`<br/>`make dev-pro` | `./scripts/serve.sh --dev --gateway --daemon`<br/>`make dev-daemon-pro` | `./scripts/docker.sh start --gateway`<br/>`make docker-start-pro` | — |
|
||||
| **Prod** | `./scripts/serve.sh --prod`<br/>`make start` | `./scripts/serve.sh --prod --daemon`<br/>`make start-daemon` | — | `./scripts/deploy.sh`<br/>`make up` |
|
||||
| **Prod + Gateway** | `./scripts/serve.sh --prod --gateway`<br/>`make start-pro` | `./scripts/serve.sh --prod --gateway --daemon`<br/>`make start-daemon-pro` | — | `./scripts/deploy.sh --gateway`<br/>`make up-pro` |
|
||||
|
||||
| Action | Local | Docker Dev | Docker Prod |
|
||||
|---|---|---|---|
|
||||
| **Stop** | `./scripts/serve.sh --stop`<br/>`make stop` | `./scripts/docker.sh stop`<br/>`make docker-stop` | `./scripts/deploy.sh down`<br/>`make down` |
|
||||
| **Restart** | `./scripts/serve.sh --restart [flags]` | `./scripts/docker.sh restart` | — |
|
||||
|
||||
Gateway mode embeds the agent runtime in Gateway, no LangGraph server.
|
||||
|
||||
**Nginx routing**:
|
||||
- `/api/langgraph/*` → LangGraph Server (2024)
|
||||
- Standard mode: `/api/langgraph/*` → LangGraph Server (2024)
|
||||
- Gateway mode: `/api/langgraph/*` → Gateway embedded runtime (8001) (via envsubst)
|
||||
- `/api/*` (other) → Gateway API (8001)
|
||||
- `/` (non-API) → Frontend (3000)
|
||||
|
||||
|
||||
+49
-10
@@ -1,14 +1,21 @@
|
||||
# Backend Development Dockerfile
|
||||
# Backend Dockerfile — multi-stage build
|
||||
# Stage 1 (builder): compiles native Python extensions with build-essential
|
||||
# Stage 2 (dev): retains toolchain for dev containers (uv sync at startup)
|
||||
# Stage 3 (runtime): clean image without compiler toolchain for production
|
||||
|
||||
# UV source image (override for restricted networks that cannot reach ghcr.io)
|
||||
ARG UV_IMAGE=ghcr.io/astral-sh/uv:0.7.20
|
||||
FROM ${UV_IMAGE} AS uv-source
|
||||
|
||||
FROM python:3.12-slim-bookworm
|
||||
# ── Stage 1: Builder ──────────────────────────────────────────────────────────
|
||||
FROM python:3.12-slim-bookworm AS builder
|
||||
|
||||
ARG NODE_MAJOR=22
|
||||
ARG APT_MIRROR
|
||||
ARG UV_INDEX_URL
|
||||
# Optional extras to install (e.g. "postgres" for PostgreSQL support)
|
||||
# Usage: docker build --build-arg UV_EXTRAS=postgres ...
|
||||
ARG UV_EXTRAS
|
||||
|
||||
# Optionally override apt mirror for restricted networks (e.g. APT_MIRROR=mirrors.aliyun.com)
|
||||
RUN if [ -n "${APT_MIRROR}" ]; then \
|
||||
@@ -16,7 +23,7 @@ RUN if [ -n "${APT_MIRROR}" ]; then \
|
||||
sed -i "s|deb.debian.org|${APT_MIRROR}|g" /etc/apt/sources.list 2>/dev/null || true; \
|
||||
fi
|
||||
|
||||
# Install system dependencies + Node.js (provides npx for MCP servers)
|
||||
# Install build tools + Node.js (build-essential needed for native Python extensions)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
build-essential \
|
||||
@@ -29,6 +36,42 @@ RUN apt-get update && apt-get install -y \
|
||||
&& apt-get install -y nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install uv (source image overridable via UV_IMAGE build arg)
|
||||
COPY --from=uv-source /uv /uvx /usr/local/bin/
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy backend source code
|
||||
COPY backend ./backend
|
||||
|
||||
# Install dependencies with cache mount
|
||||
# When UV_EXTRAS is set (e.g. "postgres"), installs optional dependencies.
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
sh -c "cd backend && UV_INDEX_URL=${UV_INDEX_URL:-https://pypi.org/simple} uv sync ${UV_EXTRAS:+--extra $UV_EXTRAS}"
|
||||
|
||||
# ── Stage 2: Dev ──────────────────────────────────────────────────────────────
|
||||
# Retains compiler toolchain from builder so startup-time `uv sync` can build
|
||||
# source distributions in development containers.
|
||||
FROM builder AS dev
|
||||
|
||||
# Install Docker CLI (for DooD: allows starting sandbox containers via host Docker socket)
|
||||
COPY --from=docker:cli /usr/local/bin/docker /usr/local/bin/docker
|
||||
|
||||
EXPOSE 8001 2024
|
||||
|
||||
CMD ["sh", "-c", "cd backend && PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001"]
|
||||
|
||||
# ── Stage 3: Runtime ──────────────────────────────────────────────────────────
|
||||
# Clean image without build-essential — reduces size (~200 MB) and attack surface.
|
||||
FROM python:3.12-slim-bookworm
|
||||
|
||||
# Copy Node.js runtime from builder (provides npx for MCP servers)
|
||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||
RUN ln -s ../lib/node_modules/npm/bin/npm-cli.js /usr/bin/npm \
|
||||
&& ln -s ../lib/node_modules/npm/bin/npx-cli.js /usr/bin/npx
|
||||
|
||||
# Install Docker CLI (for DooD: allows starting sandbox containers via host Docker socket)
|
||||
COPY --from=docker:cli /usr/local/bin/docker /usr/local/bin/docker
|
||||
|
||||
@@ -38,15 +81,11 @@ COPY --from=uv-source /uv /uvx /usr/local/bin/
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy frontend source code
|
||||
COPY backend ./backend
|
||||
|
||||
# Install dependencies with cache mount
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
sh -c "cd backend && UV_INDEX_URL=${UV_INDEX_URL:-https://pypi.org/simple} uv sync"
|
||||
# Copy backend with pre-built virtualenv from builder
|
||||
COPY --from=builder /app/backend ./backend
|
||||
|
||||
# Expose ports (gateway: 8001, langgraph: 2024)
|
||||
EXPOSE 8001 2024
|
||||
|
||||
# Default command (can be overridden in docker-compose)
|
||||
CMD ["sh", "-c", "cd backend && PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001"]
|
||||
CMD ["sh", "-c", "cd backend && PYTHONPATH=. uv run --no-sync uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001"]
|
||||
|
||||
+1
-1
@@ -2,7 +2,7 @@ install:
|
||||
uv sync
|
||||
|
||||
dev:
|
||||
uv run langgraph dev --no-browser --allow-blocking --no-reload
|
||||
uv run langgraph dev --no-browser --no-reload --n-jobs-per-worker 10
|
||||
|
||||
gateway:
|
||||
PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001
|
||||
|
||||
+23
-1
@@ -78,6 +78,7 @@ Per-thread isolated execution with virtual path translation:
|
||||
- **Virtual paths**: `/mnt/user-data/{workspace,uploads,outputs}` → thread-specific physical directories
|
||||
- **Skills path**: `/mnt/skills` → `deer-flow/skills/` directory
|
||||
- **Skills loading**: Recursively discovers nested `SKILL.md` files under `skills/{public,custom}` and preserves nested container paths
|
||||
- **File-write safety**: `str_replace` serializes read-modify-write per `(sandbox.id, path)` so isolated sandboxes keep concurrency even when virtual paths match
|
||||
- **Tools**: `bash`, `ls`, `read_file`, `write_file`, `str_replace` (`bash` is disabled by default when using `LocalSandboxProvider`; use `AioSandboxProvider` for isolated shell access)
|
||||
|
||||
### Subagent System
|
||||
@@ -330,7 +331,28 @@ LANGSMITH_PROJECT=xxx
|
||||
|
||||
**Legacy variables:** The `LANGCHAIN_TRACING_V2`, `LANGCHAIN_API_KEY`, `LANGCHAIN_PROJECT`, and `LANGCHAIN_ENDPOINT` variables are also supported for backward compatibility. `LANGSMITH_*` variables take precedence when both are set.
|
||||
|
||||
**Docker:** In `docker-compose.yaml`, tracing is disabled by default (`LANGSMITH_TRACING=false`). Set `LANGSMITH_TRACING=true` and provide `LANGSMITH_API_KEY` in your `.env` to enable it in containerized deployments.
|
||||
### Langfuse Tracing
|
||||
|
||||
DeerFlow also supports [Langfuse](https://langfuse.com) observability for LangChain-compatible runs.
|
||||
|
||||
Add the following to your `.env` file:
|
||||
|
||||
```bash
|
||||
LANGFUSE_TRACING=true
|
||||
LANGFUSE_PUBLIC_KEY=pk-lf-xxxxxxxxxxxxxxxx
|
||||
LANGFUSE_SECRET_KEY=sk-lf-xxxxxxxxxxxxxxxx
|
||||
LANGFUSE_BASE_URL=https://cloud.langfuse.com
|
||||
```
|
||||
|
||||
If you are using a self-hosted Langfuse deployment, set `LANGFUSE_BASE_URL` to your Langfuse host.
|
||||
|
||||
### Dual Provider Behavior
|
||||
|
||||
If both LangSmith and Langfuse are enabled, DeerFlow initializes and attaches both callbacks so the same run data is reported to both systems.
|
||||
|
||||
If a provider is explicitly enabled but required credentials are missing, or the provider callback cannot be initialized, DeerFlow raises an error when tracing is initialized during model creation instead of silently disabling tracing.
|
||||
|
||||
**Docker:** In `docker-compose.yaml`, tracing is disabled by default (`LANGSMITH_TRACING=false`). Set `LANGSMITH_TRACING=true` and/or `LANGFUSE_TRACING=true` in your `.env`, together with the required credentials, to enable tracing in containerized deployments.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -106,3 +106,21 @@ class Channel(ABC):
|
||||
logger.warning("[%s] file upload skipped for %s", self.name, attachment.filename)
|
||||
except Exception:
|
||||
logger.exception("[%s] failed to upload file %s", self.name, attachment.filename)
|
||||
|
||||
async def receive_file(self, msg: InboundMessage, thread_id: str) -> InboundMessage:
|
||||
"""
|
||||
Optionally process and materialize inbound file attachments for this channel.
|
||||
|
||||
By default, this method does nothing and simply returns the original message.
|
||||
Subclasses (e.g. FeishuChannel) may override this to download files (images, documents, etc)
|
||||
referenced in msg.files, save them to the sandbox, and update msg.text to include
|
||||
the sandbox file paths for downstream model consumption.
|
||||
|
||||
Args:
|
||||
msg: The inbound message, possibly containing file metadata in msg.files.
|
||||
thread_id: The resolved DeerFlow thread ID for sandbox path context.
|
||||
|
||||
Returns:
|
||||
The (possibly modified) InboundMessage, with text and/or files updated as needed.
|
||||
"""
|
||||
return msg
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
"""Shared command definitions used by all channel implementations.
|
||||
|
||||
Keeping the authoritative command set in one place ensures that channel
|
||||
parsers (e.g. Feishu) and the ChannelManager dispatcher stay in sync
|
||||
automatically — adding or removing a command here is the single edit
|
||||
required.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
KNOWN_CHANNEL_COMMANDS: frozenset[str] = frozenset(
|
||||
{
|
||||
"/bootstrap",
|
||||
"/new",
|
||||
"/status",
|
||||
"/models",
|
||||
"/memory",
|
||||
"/help",
|
||||
}
|
||||
)
|
||||
@@ -0,0 +1,273 @@
|
||||
"""Discord channel integration using discord.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from typing import Any
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_DISCORD_MAX_MESSAGE_LEN = 2000
|
||||
|
||||
|
||||
class DiscordChannel(Channel):
|
||||
"""Discord bot channel.
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.discord``):
|
||||
- ``bot_token``: Discord Bot token.
|
||||
- ``allowed_guilds``: (optional) List of allowed Discord guild IDs. Empty = allow all.
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="discord", bus=bus, config=config)
|
||||
self._bot_token = str(config.get("bot_token", "")).strip()
|
||||
self._allowed_guilds: set[int] = set()
|
||||
for guild_id in config.get("allowed_guilds", []):
|
||||
try:
|
||||
self._allowed_guilds.add(int(guild_id))
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
|
||||
self._client = None
|
||||
self._thread: threading.Thread | None = None
|
||||
self._discord_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._main_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._discord_module = None
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
import discord
|
||||
except ImportError:
|
||||
logger.error("discord.py is not installed. Install it with: uv add discord.py")
|
||||
return
|
||||
|
||||
if not self._bot_token:
|
||||
logger.error("Discord channel requires bot_token")
|
||||
return
|
||||
|
||||
intents = discord.Intents.default()
|
||||
intents.messages = True
|
||||
intents.guilds = True
|
||||
intents.message_content = True
|
||||
|
||||
client = discord.Client(
|
||||
intents=intents,
|
||||
allowed_mentions=discord.AllowedMentions.none(),
|
||||
)
|
||||
self._client = client
|
||||
self._discord_module = discord
|
||||
self._main_loop = asyncio.get_event_loop()
|
||||
|
||||
@client.event
|
||||
async def on_message(message) -> None:
|
||||
await self._on_message(message)
|
||||
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
|
||||
self._thread = threading.Thread(target=self._run_client, daemon=True)
|
||||
self._thread.start()
|
||||
logger.info("Discord channel started")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
|
||||
if self._client and self._discord_loop and self._discord_loop.is_running():
|
||||
close_future = asyncio.run_coroutine_threadsafe(self._client.close(), self._discord_loop)
|
||||
try:
|
||||
await asyncio.wait_for(asyncio.wrap_future(close_future), timeout=10)
|
||||
except TimeoutError:
|
||||
logger.warning("[Discord] client close timed out after 10s")
|
||||
except Exception:
|
||||
logger.exception("[Discord] error while closing client")
|
||||
|
||||
if self._thread:
|
||||
self._thread.join(timeout=10)
|
||||
self._thread = None
|
||||
|
||||
self._client = None
|
||||
self._discord_loop = None
|
||||
self._discord_module = None
|
||||
logger.info("Discord channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage) -> None:
|
||||
target = await self._resolve_target(msg)
|
||||
if target is None:
|
||||
logger.error("[Discord] target not found for chat_id=%s thread_ts=%s", msg.chat_id, msg.thread_ts)
|
||||
return
|
||||
|
||||
text = msg.text or ""
|
||||
for chunk in self._split_text(text):
|
||||
send_future = asyncio.run_coroutine_threadsafe(target.send(chunk), self._discord_loop)
|
||||
await asyncio.wrap_future(send_future)
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
target = await self._resolve_target(msg)
|
||||
if target is None:
|
||||
logger.error("[Discord] target not found for file upload chat_id=%s thread_ts=%s", msg.chat_id, msg.thread_ts)
|
||||
return False
|
||||
|
||||
if self._discord_module is None:
|
||||
return False
|
||||
|
||||
try:
|
||||
fp = open(str(attachment.actual_path), "rb") # noqa: SIM115
|
||||
file = self._discord_module.File(fp, filename=attachment.filename)
|
||||
send_future = asyncio.run_coroutine_threadsafe(target.send(file=file), self._discord_loop)
|
||||
await asyncio.wrap_future(send_future)
|
||||
logger.info("[Discord] file uploaded: %s", attachment.filename)
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception("[Discord] failed to upload file: %s", attachment.filename)
|
||||
return False
|
||||
|
||||
async def _on_message(self, message) -> None:
|
||||
if not self._running or not self._client:
|
||||
return
|
||||
|
||||
if message.author.bot:
|
||||
return
|
||||
|
||||
if self._client.user and message.author.id == self._client.user.id:
|
||||
return
|
||||
|
||||
guild = message.guild
|
||||
if self._allowed_guilds:
|
||||
if guild is None or guild.id not in self._allowed_guilds:
|
||||
return
|
||||
|
||||
text = (message.content or "").strip()
|
||||
if not text:
|
||||
return
|
||||
|
||||
if self._discord_module is None:
|
||||
return
|
||||
|
||||
if isinstance(message.channel, self._discord_module.Thread):
|
||||
chat_id = str(message.channel.parent_id or message.channel.id)
|
||||
thread_id = str(message.channel.id)
|
||||
else:
|
||||
thread = await self._create_thread(message)
|
||||
if thread is None:
|
||||
return
|
||||
chat_id = str(message.channel.id)
|
||||
thread_id = str(thread.id)
|
||||
|
||||
msg_type = InboundMessageType.COMMAND if text.startswith("/") else InboundMessageType.CHAT
|
||||
inbound = self._make_inbound(
|
||||
chat_id=chat_id,
|
||||
user_id=str(message.author.id),
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=thread_id,
|
||||
metadata={
|
||||
"guild_id": str(guild.id) if guild else None,
|
||||
"channel_id": str(message.channel.id),
|
||||
"message_id": str(message.id),
|
||||
},
|
||||
)
|
||||
inbound.topic_id = thread_id
|
||||
|
||||
if self._main_loop and self._main_loop.is_running():
|
||||
future = asyncio.run_coroutine_threadsafe(self.bus.publish_inbound(inbound), self._main_loop)
|
||||
future.add_done_callback(lambda f: logger.exception("[Discord] publish_inbound failed", exc_info=f.exception()) if f.exception() else None)
|
||||
|
||||
def _run_client(self) -> None:
|
||||
self._discord_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self._discord_loop)
|
||||
try:
|
||||
self._discord_loop.run_until_complete(self._client.start(self._bot_token))
|
||||
except Exception:
|
||||
if self._running:
|
||||
logger.exception("Discord client error")
|
||||
finally:
|
||||
try:
|
||||
if self._client and not self._client.is_closed():
|
||||
self._discord_loop.run_until_complete(self._client.close())
|
||||
except Exception:
|
||||
logger.exception("Error during Discord shutdown")
|
||||
|
||||
async def _create_thread(self, message):
|
||||
try:
|
||||
thread_name = f"deerflow-{message.author.display_name}-{message.id}"[:100]
|
||||
return await message.create_thread(name=thread_name)
|
||||
except Exception:
|
||||
logger.exception("[Discord] failed to create thread for message=%s (threads may be disabled or missing permissions)", message.id)
|
||||
try:
|
||||
await message.channel.send("Could not create a thread for your message. Please check that threads are enabled in this channel.")
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
async def _resolve_target(self, msg: OutboundMessage):
|
||||
if not self._client or not self._discord_loop:
|
||||
return None
|
||||
|
||||
target_ids: list[str] = []
|
||||
if msg.thread_ts:
|
||||
target_ids.append(msg.thread_ts)
|
||||
if msg.chat_id and msg.chat_id not in target_ids:
|
||||
target_ids.append(msg.chat_id)
|
||||
|
||||
for raw_id in target_ids:
|
||||
target = await self._get_channel_or_thread(raw_id)
|
||||
if target is not None:
|
||||
return target
|
||||
return None
|
||||
|
||||
async def _get_channel_or_thread(self, raw_id: str):
|
||||
if not self._client or not self._discord_loop:
|
||||
return None
|
||||
|
||||
try:
|
||||
target_id = int(raw_id)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
get_future = asyncio.run_coroutine_threadsafe(self._fetch_channel(target_id), self._discord_loop)
|
||||
try:
|
||||
return await asyncio.wrap_future(get_future)
|
||||
except Exception:
|
||||
logger.exception("[Discord] failed to resolve target id=%s", raw_id)
|
||||
return None
|
||||
|
||||
async def _fetch_channel(self, target_id: int):
|
||||
if not self._client:
|
||||
return None
|
||||
|
||||
channel = self._client.get_channel(target_id)
|
||||
if channel is not None:
|
||||
return channel
|
||||
|
||||
try:
|
||||
return await self._client.fetch_channel(target_id)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _split_text(text: str) -> list[str]:
|
||||
if not text:
|
||||
return [""]
|
||||
|
||||
chunks: list[str] = []
|
||||
remaining = text
|
||||
while len(remaining) > _DISCORD_MAX_MESSAGE_LEN:
|
||||
split_at = remaining.rfind("\n", 0, _DISCORD_MAX_MESSAGE_LEN)
|
||||
if split_at <= 0:
|
||||
split_at = _DISCORD_MAX_MESSAGE_LEN
|
||||
chunks.append(remaining[:split_at])
|
||||
remaining = remaining[split_at:].lstrip("\n")
|
||||
|
||||
if remaining:
|
||||
chunks.append(remaining)
|
||||
|
||||
return chunks
|
||||
@@ -5,15 +5,26 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import threading
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
from app.channels.commands import KNOWN_CHANNEL_COMMANDS
|
||||
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
from deerflow.sandbox.sandbox_provider import get_sandbox_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _is_feishu_command(text: str) -> bool:
|
||||
if not text.startswith("/"):
|
||||
return False
|
||||
return text.split(maxsplit=1)[0].lower() in KNOWN_CHANNEL_COMMANDS
|
||||
|
||||
|
||||
class FeishuChannel(Channel):
|
||||
"""Feishu/Lark IM channel using the ``lark-oapi`` WebSocket client.
|
||||
|
||||
@@ -49,6 +60,8 @@ class FeishuChannel(Channel):
|
||||
self._CreateFileRequestBody = None
|
||||
self._CreateImageRequest = None
|
||||
self._CreateImageRequestBody = None
|
||||
self._GetMessageResourceRequest = None
|
||||
self._thread_lock = threading.Lock()
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
@@ -66,6 +79,7 @@ class FeishuChannel(Channel):
|
||||
CreateMessageRequest,
|
||||
CreateMessageRequestBody,
|
||||
Emoji,
|
||||
GetMessageResourceRequest,
|
||||
PatchMessageRequest,
|
||||
PatchMessageRequestBody,
|
||||
ReplyMessageRequest,
|
||||
@@ -89,6 +103,7 @@ class FeishuChannel(Channel):
|
||||
self._CreateFileRequestBody = CreateFileRequestBody
|
||||
self._CreateImageRequest = CreateImageRequest
|
||||
self._CreateImageRequestBody = CreateImageRequestBody
|
||||
self._GetMessageResourceRequest = GetMessageResourceRequest
|
||||
|
||||
app_id = self.config.get("app_id", "")
|
||||
app_secret = self.config.get("app_secret", "")
|
||||
@@ -199,7 +214,9 @@ class FeishuChannel(Channel):
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Feishu] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
raise last_exc # type: ignore[misc]
|
||||
if last_exc is None:
|
||||
raise RuntimeError("Feishu send failed without an exception from any attempt")
|
||||
raise last_exc
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
if not self._api_client:
|
||||
@@ -266,6 +283,115 @@ class FeishuChannel(Channel):
|
||||
raise RuntimeError(f"Feishu file upload failed: code={response.code}, msg={response.msg}")
|
||||
return response.data.file_key
|
||||
|
||||
async def receive_file(self, msg: InboundMessage, thread_id: str) -> InboundMessage:
|
||||
"""Download a Feishu file into the thread uploads directory.
|
||||
|
||||
Returns the sandbox virtual path when the image is persisted successfully.
|
||||
"""
|
||||
if not msg.thread_ts:
|
||||
logger.warning("[Feishu] received file message without thread_ts, cannot associate with conversation: %s", msg)
|
||||
return msg
|
||||
files = msg.files
|
||||
if not files:
|
||||
logger.warning("[Feishu] received message with no files: %s", msg)
|
||||
return msg
|
||||
text = msg.text
|
||||
for file in files:
|
||||
if file.get("image_key"):
|
||||
virtual_path = await self._receive_single_file(msg.thread_ts, file["image_key"], "image", thread_id)
|
||||
text = text.replace("[image]", virtual_path, 1)
|
||||
elif file.get("file_key"):
|
||||
virtual_path = await self._receive_single_file(msg.thread_ts, file["file_key"], "file", thread_id)
|
||||
text = text.replace("[file]", virtual_path, 1)
|
||||
msg.text = text
|
||||
return msg
|
||||
|
||||
async def _receive_single_file(self, message_id: str, file_key: str, type: Literal["image", "file"], thread_id: str) -> str:
|
||||
request = self._GetMessageResourceRequest.builder().message_id(message_id).file_key(file_key).type(type).build()
|
||||
|
||||
def inner():
|
||||
return self._api_client.im.v1.message_resource.get(request)
|
||||
|
||||
try:
|
||||
response = await asyncio.to_thread(inner)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] resource get request failed for resource_key=%s type=%s", file_key, type)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
if not response.success():
|
||||
logger.warning(
|
||||
"[Feishu] resource get failed: resource_key=%s, type=%s, code=%s, msg=%s, log_id=%s ",
|
||||
file_key,
|
||||
type,
|
||||
response.code,
|
||||
response.msg,
|
||||
response.get_log_id(),
|
||||
)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
image_stream = getattr(response, "file", None)
|
||||
if image_stream is None:
|
||||
logger.warning("[Feishu] resource get returned no file stream: resource_key=%s, type=%s", file_key, type)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
try:
|
||||
content: bytes = await asyncio.to_thread(image_stream.read)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to read resource stream: resource_key=%s, type=%s", file_key, type)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
if not content:
|
||||
logger.warning("[Feishu] empty resource content: resource_key=%s, type=%s", file_key, type)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
paths = get_paths()
|
||||
user_id = get_effective_user_id()
|
||||
paths.ensure_thread_dirs(thread_id, user_id=user_id)
|
||||
uploads_dir = paths.sandbox_uploads_dir(thread_id, user_id=user_id).resolve()
|
||||
|
||||
ext = "png" if type == "image" else "bin"
|
||||
raw_filename = getattr(response, "file_name", "") or f"feishu_{file_key[-12:]}.{ext}"
|
||||
|
||||
# Sanitize filename: preserve extension, replace path chars in name part
|
||||
if "." in raw_filename:
|
||||
name_part, ext = raw_filename.rsplit(".", 1)
|
||||
name_part = re.sub(r"[./\\]", "_", name_part)
|
||||
filename = f"{name_part}.{ext}"
|
||||
else:
|
||||
filename = re.sub(r"[./\\]", "_", raw_filename)
|
||||
resolved_target = uploads_dir / filename
|
||||
|
||||
def down_load():
|
||||
# use thread_lock to avoid filename conflicts when writing
|
||||
with self._thread_lock:
|
||||
resolved_target.write_bytes(content)
|
||||
|
||||
try:
|
||||
await asyncio.to_thread(down_load)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to persist downloaded resource: %s, type=%s", resolved_target, type)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
virtual_path = f"{VIRTUAL_PATH_PREFIX}/uploads/{resolved_target.name}"
|
||||
|
||||
try:
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
sandbox_provider = get_sandbox_provider(AppConfig.from_file())
|
||||
sandbox_id = sandbox_provider.acquire(thread_id)
|
||||
if sandbox_id != "local":
|
||||
sandbox = sandbox_provider.get(sandbox_id)
|
||||
if sandbox is None:
|
||||
logger.warning("[Feishu] sandbox not found for thread_id=%s", thread_id)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
sandbox.update_file(virtual_path, content)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to sync resource into non-local sandbox: %s", virtual_path)
|
||||
return f"Failed to obtain the [{type}]"
|
||||
|
||||
logger.info("[Feishu] downloaded resource mapped: file_key=%s -> %s", file_key, virtual_path)
|
||||
return virtual_path
|
||||
|
||||
# -- message formatting ------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
@@ -470,9 +596,28 @@ class FeishuChannel(Channel):
|
||||
# Parse message content
|
||||
content = json.loads(message.content)
|
||||
|
||||
# files_list store the any-file-key in feishu messages, which can be used to download the file content later
|
||||
# In Feishu channel, image_keys are independent of file_keys.
|
||||
# The file_key includes files, videos, and audio, but does not include stickers.
|
||||
files_list = []
|
||||
|
||||
if "text" in content:
|
||||
# Handle plain text messages
|
||||
text = content["text"]
|
||||
elif "file_key" in content:
|
||||
file_key = content.get("file_key")
|
||||
if isinstance(file_key, str) and file_key:
|
||||
files_list.append({"file_key": file_key})
|
||||
text = "[file]"
|
||||
else:
|
||||
text = ""
|
||||
elif "image_key" in content:
|
||||
image_key = content.get("image_key")
|
||||
if isinstance(image_key, str) and image_key:
|
||||
files_list.append({"image_key": image_key})
|
||||
text = "[image]"
|
||||
else:
|
||||
text = ""
|
||||
elif "content" in content and isinstance(content["content"], list):
|
||||
# Handle rich-text messages with a top-level "content" list (e.g., topic groups/posts)
|
||||
text_paragraphs: list[str] = []
|
||||
@@ -486,6 +631,16 @@ class FeishuChannel(Channel):
|
||||
text_value = element.get("text", "")
|
||||
if text_value:
|
||||
paragraph_text_parts.append(text_value)
|
||||
elif element.get("tag") == "img":
|
||||
image_key = element.get("image_key")
|
||||
if isinstance(image_key, str) and image_key:
|
||||
files_list.append({"image_key": image_key})
|
||||
paragraph_text_parts.append("[image]")
|
||||
elif element.get("tag") in ("file", "media"):
|
||||
file_key = element.get("file_key")
|
||||
if isinstance(file_key, str) and file_key:
|
||||
files_list.append({"file_key": file_key})
|
||||
paragraph_text_parts.append("[file]")
|
||||
if paragraph_text_parts:
|
||||
# Join text segments within a paragraph with spaces to avoid "helloworld"
|
||||
text_paragraphs.append(" ".join(paragraph_text_parts))
|
||||
@@ -505,12 +660,13 @@ class FeishuChannel(Channel):
|
||||
text[:100] if text else "",
|
||||
)
|
||||
|
||||
if not text:
|
||||
if not (text or files_list):
|
||||
logger.info("[Feishu] empty text, ignoring message")
|
||||
return
|
||||
|
||||
# Check if it's a command
|
||||
if text.startswith("/"):
|
||||
# Only treat known slash commands as commands; absolute paths and
|
||||
# other slash-prefixed text should be handled as normal chat.
|
||||
if _is_feishu_command(text):
|
||||
msg_type = InboundMessageType.COMMAND
|
||||
else:
|
||||
msg_type = InboundMessageType.CHAT
|
||||
@@ -524,6 +680,7 @@ class FeishuChannel(Channel):
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=msg_id,
|
||||
files=files_list,
|
||||
metadata={"message_id": msg_id, "root_id": root_id},
|
||||
)
|
||||
inbound.topic_id = topic_id
|
||||
|
||||
@@ -7,13 +7,17 @@ import logging
|
||||
import mimetypes
|
||||
import re
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
from collections.abc import Awaitable, Callable, Mapping
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
from langgraph_sdk.errors import ConflictError
|
||||
|
||||
from app.channels.commands import KNOWN_CHANNEL_COMMANDS
|
||||
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
from app.channels.store import ChannelStore
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -32,11 +36,71 @@ STREAM_UPDATE_MIN_INTERVAL_SECONDS = 0.35
|
||||
THREAD_BUSY_MESSAGE = "This conversation is already processing another request. Please wait for it to finish and try again."
|
||||
|
||||
CHANNEL_CAPABILITIES = {
|
||||
"discord": {"supports_streaming": False},
|
||||
"feishu": {"supports_streaming": True},
|
||||
"slack": {"supports_streaming": False},
|
||||
"telegram": {"supports_streaming": False},
|
||||
"wechat": {"supports_streaming": False},
|
||||
"wecom": {"supports_streaming": True},
|
||||
}
|
||||
|
||||
InboundFileReader = Callable[[dict[str, Any], httpx.AsyncClient], Awaitable[bytes | None]]
|
||||
|
||||
|
||||
INBOUND_FILE_READERS: dict[str, InboundFileReader] = {}
|
||||
|
||||
|
||||
def register_inbound_file_reader(channel_name: str, reader: InboundFileReader) -> None:
|
||||
INBOUND_FILE_READERS[channel_name] = reader
|
||||
|
||||
|
||||
async def _read_http_inbound_file(file_info: dict[str, Any], client: httpx.AsyncClient) -> bytes | None:
|
||||
url = file_info.get("url")
|
||||
if not isinstance(url, str) or not url:
|
||||
return None
|
||||
|
||||
resp = await client.get(url)
|
||||
resp.raise_for_status()
|
||||
return resp.content
|
||||
|
||||
|
||||
async def _read_wecom_inbound_file(file_info: dict[str, Any], client: httpx.AsyncClient) -> bytes | None:
|
||||
data = await _read_http_inbound_file(file_info, client)
|
||||
if data is None:
|
||||
return None
|
||||
|
||||
aeskey = file_info.get("aeskey") if isinstance(file_info.get("aeskey"), str) else None
|
||||
if not aeskey:
|
||||
return data
|
||||
|
||||
try:
|
||||
from aibot.crypto_utils import decrypt_file
|
||||
except Exception:
|
||||
logger.exception("[Manager] failed to import WeCom decrypt_file")
|
||||
return None
|
||||
|
||||
return decrypt_file(data, aeskey)
|
||||
|
||||
|
||||
async def _read_wechat_inbound_file(file_info: dict[str, Any], client: httpx.AsyncClient) -> bytes | None:
|
||||
raw_path = file_info.get("path")
|
||||
if isinstance(raw_path, str) and raw_path.strip():
|
||||
try:
|
||||
return await asyncio.to_thread(Path(raw_path).read_bytes)
|
||||
except OSError:
|
||||
logger.exception("[Manager] failed to read WeChat inbound file from local path: %s", raw_path)
|
||||
return None
|
||||
|
||||
full_url = file_info.get("full_url")
|
||||
if isinstance(full_url, str) and full_url.strip():
|
||||
return await _read_http_inbound_file({"url": full_url}, client)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
register_inbound_file_reader("wecom", _read_wecom_inbound_file)
|
||||
register_inbound_file_reader("wechat", _read_wechat_inbound_file)
|
||||
|
||||
|
||||
class InvalidChannelSessionConfigError(ValueError):
|
||||
"""Raised when IM channel session overrides contain invalid agent config."""
|
||||
@@ -279,14 +343,15 @@ def _resolve_attachments(thread_id: str, artifacts: list[str]) -> list[ResolvedA
|
||||
|
||||
attachments: list[ResolvedAttachment] = []
|
||||
paths = get_paths()
|
||||
outputs_dir = paths.sandbox_outputs_dir(thread_id).resolve()
|
||||
user_id = get_effective_user_id()
|
||||
outputs_dir = paths.sandbox_outputs_dir(thread_id, user_id=user_id).resolve()
|
||||
for virtual_path in artifacts:
|
||||
# Security: only allow files from the agent outputs directory
|
||||
if not virtual_path.startswith(_OUTPUTS_VIRTUAL_PREFIX):
|
||||
logger.warning("[Manager] rejected non-outputs artifact path: %s", virtual_path)
|
||||
continue
|
||||
try:
|
||||
actual = paths.resolve_virtual_path(thread_id, virtual_path)
|
||||
actual = paths.resolve_virtual_path(thread_id, virtual_path, user_id=user_id)
|
||||
# Verify the resolved path is actually under the outputs directory
|
||||
# (guards against path-traversal even after prefix check)
|
||||
try:
|
||||
@@ -341,6 +406,105 @@ def _prepare_artifact_delivery(
|
||||
return response_text, attachments
|
||||
|
||||
|
||||
async def _ingest_inbound_files(thread_id: str, msg: InboundMessage) -> list[dict[str, Any]]:
|
||||
if not msg.files:
|
||||
return []
|
||||
|
||||
from deerflow.uploads.manager import claim_unique_filename, ensure_uploads_dir, normalize_filename
|
||||
|
||||
uploads_dir = ensure_uploads_dir(thread_id)
|
||||
seen_names = {entry.name for entry in uploads_dir.iterdir() if entry.is_file()}
|
||||
|
||||
created: list[dict[str, Any]] = []
|
||||
file_reader = INBOUND_FILE_READERS.get(msg.channel_name, _read_http_inbound_file)
|
||||
async with httpx.AsyncClient(timeout=httpx.Timeout(20.0)) as client:
|
||||
for idx, f in enumerate(msg.files):
|
||||
if not isinstance(f, dict):
|
||||
continue
|
||||
|
||||
ftype = f.get("type") if isinstance(f.get("type"), str) else "file"
|
||||
filename = f.get("filename") if isinstance(f.get("filename"), str) else ""
|
||||
|
||||
try:
|
||||
data = await file_reader(f, client)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"[Manager] failed to read inbound file: channel=%s, file=%s",
|
||||
msg.channel_name,
|
||||
f.get("url") or filename or idx,
|
||||
)
|
||||
continue
|
||||
|
||||
if data is None:
|
||||
logger.warning(
|
||||
"[Manager] inbound file reader returned no data: channel=%s, file=%s",
|
||||
msg.channel_name,
|
||||
f.get("url") or filename or idx,
|
||||
)
|
||||
continue
|
||||
|
||||
if not filename:
|
||||
ext = ".bin"
|
||||
if ftype == "image":
|
||||
ext = ".png"
|
||||
filename = f"{msg.thread_ts or 'msg'}_{idx}{ext}"
|
||||
|
||||
try:
|
||||
safe_name = claim_unique_filename(normalize_filename(filename), seen_names)
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
"[Manager] skipping inbound file with unsafe filename: channel=%s, file=%r",
|
||||
msg.channel_name,
|
||||
filename,
|
||||
)
|
||||
continue
|
||||
|
||||
dest = uploads_dir / safe_name
|
||||
try:
|
||||
dest.write_bytes(data)
|
||||
except Exception:
|
||||
logger.exception("[Manager] failed to write inbound file: %s", dest)
|
||||
continue
|
||||
|
||||
created.append(
|
||||
{
|
||||
"filename": safe_name,
|
||||
"size": len(data),
|
||||
"path": f"/mnt/user-data/uploads/{safe_name}",
|
||||
"is_image": ftype == "image",
|
||||
}
|
||||
)
|
||||
|
||||
return created
|
||||
|
||||
|
||||
def _format_uploaded_files_block(files: list[dict[str, Any]]) -> str:
|
||||
lines = [
|
||||
"<uploaded_files>",
|
||||
"The following files were uploaded in this message:",
|
||||
"",
|
||||
]
|
||||
if not files:
|
||||
lines.append("(empty)")
|
||||
else:
|
||||
for f in files:
|
||||
filename = f.get("filename", "")
|
||||
size = int(f.get("size") or 0)
|
||||
size_kb = size / 1024 if size else 0
|
||||
size_str = f"{size_kb:.1f} KB" if size_kb < 1024 else f"{size_kb / 1024:.1f} MB"
|
||||
path = f.get("path", "")
|
||||
is_image = bool(f.get("is_image"))
|
||||
file_kind = "image" if is_image else "file"
|
||||
lines.append(f"- {filename} ({size_str})")
|
||||
lines.append(f" Type: {file_kind}")
|
||||
lines.append(f" Path: {path}")
|
||||
lines.append("")
|
||||
lines.append("Use `read_file` for text-based files and documents.")
|
||||
lines.append("Use `view_image` for image files (jpg, jpeg, png, webp) so the model can inspect the image content.")
|
||||
lines.append("</uploaded_files>")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
class ChannelManager:
|
||||
"""Core dispatcher that bridges IM channels to the DeerFlow agent.
|
||||
|
||||
@@ -533,8 +697,25 @@ class ChannelManager:
|
||||
thread_id = await self._create_thread(client, msg)
|
||||
|
||||
assistant_id, run_config, run_context = self._resolve_run_params(msg, thread_id)
|
||||
|
||||
# If the inbound message contains file attachments, let the channel
|
||||
# materialize (download) them and update msg.text to include sandbox file paths.
|
||||
# This enables downstream models to access user-uploaded files by path.
|
||||
# Channels that do not support file download will simply return the original message.
|
||||
if msg.files:
|
||||
from .service import get_channel_service
|
||||
|
||||
service = get_channel_service()
|
||||
channel = service.get_channel(msg.channel_name) if service else None
|
||||
logger.info("[Manager] preparing receive file context for %d attachments", len(msg.files))
|
||||
msg = await channel.receive_file(msg, thread_id) if channel else msg
|
||||
if extra_context:
|
||||
run_context.update(extra_context)
|
||||
|
||||
uploaded = await _ingest_inbound_files(thread_id, msg)
|
||||
if uploaded:
|
||||
msg.text = f"{_format_uploaded_files_block(uploaded)}\n\n{msg.text}".strip()
|
||||
|
||||
if self._channel_supports_streaming(msg.channel_name):
|
||||
await self._handle_streaming_chat(
|
||||
client,
|
||||
@@ -735,7 +916,8 @@ class ChannelManager:
|
||||
"/help — Show this help"
|
||||
)
|
||||
else:
|
||||
reply = f"Unknown command: /{command}. Type /help for available commands."
|
||||
available = " | ".join(sorted(KNOWN_CHANNEL_COMMANDS))
|
||||
reply = f"Unknown command: /{command}. Available commands: {available}"
|
||||
|
||||
outbound = OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
|
||||
@@ -4,19 +4,36 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.manager import DEFAULT_GATEWAY_URL, DEFAULT_LANGGRAPH_URL, ChannelManager
|
||||
from app.channels.message_bus import MessageBus
|
||||
from app.channels.store import ChannelStore
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Channel name → import path for lazy loading
|
||||
_CHANNEL_REGISTRY: dict[str, str] = {
|
||||
"discord": "app.channels.discord:DiscordChannel",
|
||||
"feishu": "app.channels.feishu:FeishuChannel",
|
||||
"slack": "app.channels.slack:SlackChannel",
|
||||
"telegram": "app.channels.telegram:TelegramChannel",
|
||||
"wechat": "app.channels.wechat:WechatChannel",
|
||||
"wecom": "app.channels.wecom:WeComChannel",
|
||||
}
|
||||
|
||||
# Keys that indicate a user has configured credentials for a channel.
|
||||
_CHANNEL_CREDENTIAL_KEYS: dict[str, list[str]] = {
|
||||
"discord": ["bot_token"],
|
||||
"feishu": ["app_id", "app_secret"],
|
||||
"slack": ["bot_token", "app_token"],
|
||||
"telegram": ["bot_token"],
|
||||
"wecom": ["bot_id", "bot_secret"],
|
||||
"wechat": ["bot_token"],
|
||||
}
|
||||
|
||||
_CHANNELS_LANGGRAPH_URL_ENV = "DEER_FLOW_CHANNELS_LANGGRAPH_URL"
|
||||
@@ -61,14 +78,11 @@ class ChannelService:
|
||||
self._running = False
|
||||
|
||||
@classmethod
|
||||
def from_app_config(cls) -> ChannelService:
|
||||
"""Create a ChannelService from the application config."""
|
||||
from deerflow.config.app_config import get_app_config
|
||||
|
||||
config = get_app_config()
|
||||
def from_app_config(cls, app_config: AppConfig) -> ChannelService:
|
||||
"""Create a ChannelService from an explicit application config."""
|
||||
channels_config = {}
|
||||
# extra fields are allowed by AppConfig (extra="allow")
|
||||
extra = config.model_extra or {}
|
||||
extra = app_config.model_extra or {}
|
||||
if "channels" in extra:
|
||||
channels_config = extra["channels"]
|
||||
return cls(channels_config=channels_config)
|
||||
@@ -84,7 +98,16 @@ class ChannelService:
|
||||
if not isinstance(channel_config, dict):
|
||||
continue
|
||||
if not channel_config.get("enabled", False):
|
||||
logger.info("Channel %s is disabled, skipping", name)
|
||||
cred_keys = _CHANNEL_CREDENTIAL_KEYS.get(name, [])
|
||||
has_creds = any(not isinstance(channel_config.get(k), bool) and channel_config.get(k) is not None and str(channel_config[k]).strip() for k in cred_keys)
|
||||
if has_creds:
|
||||
logger.warning(
|
||||
"Channel '%s' has credentials configured but is disabled. Set enabled: true under channels.%s in config.yaml to activate it.",
|
||||
name,
|
||||
name,
|
||||
)
|
||||
else:
|
||||
logger.info("Channel %s is disabled, skipping", name)
|
||||
continue
|
||||
|
||||
await self._start_channel(name, channel_config)
|
||||
@@ -163,6 +186,10 @@ class ChannelService:
|
||||
"channels": channels_status,
|
||||
}
|
||||
|
||||
def get_channel(self, name: str) -> Channel | None:
|
||||
"""Return a running channel instance by name when available."""
|
||||
return self._channels.get(name)
|
||||
|
||||
|
||||
# -- singleton access -------------------------------------------------------
|
||||
|
||||
@@ -174,12 +201,12 @@ def get_channel_service() -> ChannelService | None:
|
||||
return _channel_service
|
||||
|
||||
|
||||
async def start_channel_service() -> ChannelService:
|
||||
async def start_channel_service(app_config: AppConfig) -> ChannelService:
|
||||
"""Create and start the global ChannelService from app config."""
|
||||
global _channel_service
|
||||
if _channel_service is not None:
|
||||
return _channel_service
|
||||
_channel_service = ChannelService.from_app_config()
|
||||
_channel_service = ChannelService.from_app_config(app_config)
|
||||
await _channel_service.start()
|
||||
return _channel_service
|
||||
|
||||
|
||||
@@ -16,13 +16,31 @@ logger = logging.getLogger(__name__)
|
||||
_slack_md_converter = SlackMarkdownConverter()
|
||||
|
||||
|
||||
def _normalize_allowed_users(allowed_users: Any) -> set[str]:
|
||||
if allowed_users is None:
|
||||
return set()
|
||||
if isinstance(allowed_users, str):
|
||||
values = [allowed_users]
|
||||
elif isinstance(allowed_users, list | tuple | set):
|
||||
values = allowed_users
|
||||
else:
|
||||
logger.warning(
|
||||
"Slack allowed_users should be a list of Slack user IDs or a single Slack user ID string; treating %s as one string value",
|
||||
type(allowed_users).__name__,
|
||||
)
|
||||
values = [allowed_users]
|
||||
return {str(user_id) for user_id in values if str(user_id)}
|
||||
|
||||
|
||||
class SlackChannel(Channel):
|
||||
"""Slack IM channel using Socket Mode (WebSocket, no public IP).
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.slack``):
|
||||
- ``bot_token``: Slack Bot User OAuth Token (xoxb-...).
|
||||
- ``app_token``: Slack App-Level Token (xapp-...) for Socket Mode.
|
||||
- ``allowed_users``: (optional) List of allowed Slack user IDs. Empty = allow all.
|
||||
- ``allowed_users``: (optional) List of allowed Slack user IDs, or a
|
||||
single Slack user ID string as shorthand. Empty = allow all. Other
|
||||
scalar values are treated as a single string with a warning.
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
@@ -30,7 +48,7 @@ class SlackChannel(Channel):
|
||||
self._socket_client = None
|
||||
self._web_client = None
|
||||
self._loop: asyncio.AbstractEventLoop | None = None
|
||||
self._allowed_users: set[str] = set(config.get("allowed_users", []))
|
||||
self._allowed_users = _normalize_allowed_users(config.get("allowed_users", []))
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
@@ -126,7 +144,9 @@ class SlackChannel(Channel):
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
raise last_exc # type: ignore[misc]
|
||||
if last_exc is None:
|
||||
raise RuntimeError("Slack send failed without an exception from any attempt")
|
||||
raise last_exc
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
if not self._web_client:
|
||||
|
||||
@@ -125,7 +125,9 @@ class TelegramChannel(Channel):
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Telegram] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
raise last_exc # type: ignore[misc]
|
||||
if last_exc is None:
|
||||
raise RuntimeError("Telegram send failed without an exception from any attempt")
|
||||
raise last_exc
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
if not self._application:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,394 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import hashlib
|
||||
import logging
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import Any, cast
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.message_bus import (
|
||||
InboundMessageType,
|
||||
MessageBus,
|
||||
OutboundMessage,
|
||||
ResolvedAttachment,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WeComChannel(Channel):
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="wecom", bus=bus, config=config)
|
||||
self._bot_id: str | None = None
|
||||
self._bot_secret: str | None = None
|
||||
self._ws_client = None
|
||||
self._ws_task: asyncio.Task | None = None
|
||||
self._ws_frames: dict[str, dict[str, Any]] = {}
|
||||
self._ws_stream_ids: dict[str, str] = {}
|
||||
self._working_message = "Working on it..."
|
||||
|
||||
def _clear_ws_context(self, thread_ts: str | None) -> None:
|
||||
if not thread_ts:
|
||||
return
|
||||
self._ws_frames.pop(thread_ts, None)
|
||||
self._ws_stream_ids.pop(thread_ts, None)
|
||||
|
||||
async def _send_ws_upload_command(self, req_id: str, body: dict[str, Any], cmd: str) -> dict[str, Any]:
|
||||
if not self._ws_client:
|
||||
raise RuntimeError("WeCom WebSocket client is not available")
|
||||
|
||||
ws_manager = getattr(self._ws_client, "_ws_manager", None)
|
||||
send_reply = getattr(ws_manager, "send_reply", None)
|
||||
if not callable(send_reply):
|
||||
raise RuntimeError("Installed wecom-aibot-python-sdk does not expose the WebSocket media upload API expected by DeerFlow. Use wecom-aibot-python-sdk==0.1.6 or update the adapter.")
|
||||
|
||||
send_reply_async = cast(Callable[[str, dict[str, Any], str], Awaitable[dict[str, Any]]], send_reply)
|
||||
return await send_reply_async(req_id, body, cmd)
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
bot_id = self.config.get("bot_id")
|
||||
bot_secret = self.config.get("bot_secret")
|
||||
working_message = self.config.get("working_message")
|
||||
|
||||
self._bot_id = bot_id if isinstance(bot_id, str) and bot_id else None
|
||||
self._bot_secret = bot_secret if isinstance(bot_secret, str) and bot_secret else None
|
||||
self._working_message = working_message if isinstance(working_message, str) and working_message else "Working on it..."
|
||||
|
||||
if not self._bot_id or not self._bot_secret:
|
||||
logger.error("WeCom channel requires bot_id and bot_secret")
|
||||
return
|
||||
|
||||
try:
|
||||
from aibot import WSClient, WSClientOptions
|
||||
except ImportError:
|
||||
logger.error("wecom-aibot-python-sdk is not installed. Install it with: uv add wecom-aibot-python-sdk")
|
||||
return
|
||||
else:
|
||||
self._ws_client = WSClient(WSClientOptions(bot_id=self._bot_id, secret=self._bot_secret, logger=logger))
|
||||
self._ws_client.on("message.text", self._on_ws_text)
|
||||
self._ws_client.on("message.mixed", self._on_ws_mixed)
|
||||
self._ws_client.on("message.image", self._on_ws_image)
|
||||
self._ws_client.on("message.file", self._on_ws_file)
|
||||
self._ws_task = asyncio.create_task(self._ws_client.connect())
|
||||
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
logger.info("WeCom channel started")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
if self._ws_task:
|
||||
try:
|
||||
self._ws_task.cancel()
|
||||
except Exception:
|
||||
pass
|
||||
self._ws_task = None
|
||||
if self._ws_client:
|
||||
try:
|
||||
self._ws_client.disconnect()
|
||||
except Exception:
|
||||
pass
|
||||
self._ws_client = None
|
||||
self._ws_frames.clear()
|
||||
self._ws_stream_ids.clear()
|
||||
logger.info("WeCom channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if self._ws_client:
|
||||
await self._send_ws(msg, _max_retries=_max_retries)
|
||||
return
|
||||
logger.warning("[WeCom] send called but WebSocket client is not available")
|
||||
|
||||
async def _on_outbound(self, msg: OutboundMessage) -> None:
|
||||
if msg.channel_name != self.name:
|
||||
return
|
||||
|
||||
try:
|
||||
await self.send(msg)
|
||||
except Exception:
|
||||
logger.exception("Failed to send outbound message on channel %s", self.name)
|
||||
if msg.is_final:
|
||||
self._clear_ws_context(msg.thread_ts)
|
||||
return
|
||||
|
||||
for attachment in msg.attachments:
|
||||
try:
|
||||
success = await self.send_file(msg, attachment)
|
||||
if not success:
|
||||
logger.warning("[%s] file upload skipped for %s", self.name, attachment.filename)
|
||||
except Exception:
|
||||
logger.exception("[%s] failed to upload file %s", self.name, attachment.filename)
|
||||
|
||||
if msg.is_final:
|
||||
self._clear_ws_context(msg.thread_ts)
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
if not msg.is_final:
|
||||
return True
|
||||
if not self._ws_client:
|
||||
return False
|
||||
if not msg.thread_ts:
|
||||
return False
|
||||
frame = self._ws_frames.get(msg.thread_ts)
|
||||
if not frame:
|
||||
return False
|
||||
|
||||
media_type = "image" if attachment.is_image else "file"
|
||||
size_limit = 2 * 1024 * 1024 if attachment.is_image else 20 * 1024 * 1024
|
||||
if attachment.size > size_limit:
|
||||
logger.warning(
|
||||
"[WeCom] %s too large (%d bytes), skipping: %s",
|
||||
media_type,
|
||||
attachment.size,
|
||||
attachment.filename,
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
media_id = await self._upload_media_ws(
|
||||
media_type=media_type,
|
||||
filename=attachment.filename,
|
||||
path=str(attachment.actual_path),
|
||||
size=attachment.size,
|
||||
)
|
||||
if not media_id:
|
||||
return False
|
||||
|
||||
body = {media_type: {"media_id": media_id}, "msgtype": media_type}
|
||||
await self._ws_client.reply(frame, body)
|
||||
logger.debug("[WeCom] %s sent via ws: %s", media_type, attachment.filename)
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception("[WeCom] failed to upload/send file via ws: %s", attachment.filename)
|
||||
return False
|
||||
|
||||
async def _on_ws_text(self, frame: dict[str, Any]) -> None:
|
||||
body = frame.get("body", {}) or {}
|
||||
text = ((body.get("text") or {}).get("content") or "").strip()
|
||||
quote = body.get("quote", {}).get("text", {}).get("content", "").strip()
|
||||
if not text and not quote:
|
||||
return
|
||||
await self._publish_ws_inbound(frame, text + (f"\nQuote message: {quote}" if quote else ""))
|
||||
|
||||
async def _on_ws_mixed(self, frame: dict[str, Any]) -> None:
|
||||
body = frame.get("body", {}) or {}
|
||||
mixed = body.get("mixed") or {}
|
||||
items = mixed.get("msg_item") or []
|
||||
parts: list[str] = []
|
||||
files: list[dict[str, Any]] = []
|
||||
for item in items:
|
||||
item_type = (item or {}).get("msgtype")
|
||||
if item_type == "text":
|
||||
content = (((item or {}).get("text") or {}).get("content") or "").strip()
|
||||
if content:
|
||||
parts.append(content)
|
||||
elif item_type in ("image", "file"):
|
||||
payload = (item or {}).get(item_type) or {}
|
||||
url = payload.get("url")
|
||||
aeskey = payload.get("aeskey")
|
||||
if isinstance(url, str) and url:
|
||||
files.append(
|
||||
{
|
||||
"type": item_type,
|
||||
"url": url,
|
||||
"aeskey": (aeskey if isinstance(aeskey, str) and aeskey else None),
|
||||
}
|
||||
)
|
||||
text = "\n\n".join(parts).strip()
|
||||
if not text and not files:
|
||||
return
|
||||
if not text:
|
||||
text = "(receive image/file)"
|
||||
await self._publish_ws_inbound(frame, text, files=files)
|
||||
|
||||
async def _on_ws_image(self, frame: dict[str, Any]) -> None:
|
||||
body = frame.get("body", {}) or {}
|
||||
image = body.get("image") or {}
|
||||
url = image.get("url")
|
||||
aeskey = image.get("aeskey")
|
||||
if not isinstance(url, str) or not url:
|
||||
return
|
||||
await self._publish_ws_inbound(
|
||||
frame,
|
||||
"(receive image )",
|
||||
files=[
|
||||
{
|
||||
"type": "image",
|
||||
"url": url,
|
||||
"aeskey": aeskey if isinstance(aeskey, str) and aeskey else None,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
async def _on_ws_file(self, frame: dict[str, Any]) -> None:
|
||||
body = frame.get("body", {}) or {}
|
||||
file_obj = body.get("file") or {}
|
||||
url = file_obj.get("url")
|
||||
aeskey = file_obj.get("aeskey")
|
||||
if not isinstance(url, str) or not url:
|
||||
return
|
||||
await self._publish_ws_inbound(
|
||||
frame,
|
||||
"(receive file)",
|
||||
files=[
|
||||
{
|
||||
"type": "file",
|
||||
"url": url,
|
||||
"aeskey": aeskey if isinstance(aeskey, str) and aeskey else None,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
async def _publish_ws_inbound(
|
||||
self,
|
||||
frame: dict[str, Any],
|
||||
text: str,
|
||||
*,
|
||||
files: list[dict[str, Any]] | None = None,
|
||||
) -> None:
|
||||
if not self._ws_client:
|
||||
return
|
||||
try:
|
||||
from aibot import generate_req_id
|
||||
except Exception:
|
||||
return
|
||||
|
||||
body = frame.get("body", {}) or {}
|
||||
msg_id = body.get("msgid")
|
||||
if not msg_id:
|
||||
return
|
||||
|
||||
user_id = (body.get("from") or {}).get("userid")
|
||||
|
||||
inbound_type = InboundMessageType.COMMAND if text.startswith("/") else InboundMessageType.CHAT
|
||||
inbound = self._make_inbound(
|
||||
chat_id=user_id, # keep user's conversation in memory
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=inbound_type,
|
||||
thread_ts=msg_id,
|
||||
files=files or [],
|
||||
metadata={"aibotid": body.get("aibotid"), "chattype": body.get("chattype")},
|
||||
)
|
||||
inbound.topic_id = user_id # keep the same thread
|
||||
|
||||
stream_id = generate_req_id("stream")
|
||||
self._ws_frames[msg_id] = frame
|
||||
self._ws_stream_ids[msg_id] = stream_id
|
||||
|
||||
try:
|
||||
await self._ws_client.reply_stream(frame, stream_id, self._working_message, False)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
await self.bus.publish_inbound(inbound)
|
||||
|
||||
async def _send_ws(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if not self._ws_client:
|
||||
return
|
||||
try:
|
||||
from aibot import generate_req_id
|
||||
except Exception:
|
||||
generate_req_id = None
|
||||
|
||||
if msg.thread_ts and msg.thread_ts in self._ws_frames:
|
||||
frame = self._ws_frames[msg.thread_ts]
|
||||
stream_id = self._ws_stream_ids.get(msg.thread_ts)
|
||||
if not stream_id and generate_req_id:
|
||||
stream_id = generate_req_id("stream")
|
||||
self._ws_stream_ids[msg.thread_ts] = stream_id
|
||||
if not stream_id:
|
||||
return
|
||||
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
await self._ws_client.reply_stream(frame, stream_id, msg.text, bool(msg.is_final))
|
||||
return
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
await asyncio.sleep(2**attempt)
|
||||
if last_exc:
|
||||
raise last_exc
|
||||
|
||||
body = {"msgtype": "markdown", "markdown": {"content": msg.text}}
|
||||
last_exc = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
await self._ws_client.send_message(msg.chat_id, body)
|
||||
return
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
await asyncio.sleep(2**attempt)
|
||||
if last_exc:
|
||||
raise last_exc
|
||||
|
||||
async def _upload_media_ws(
|
||||
self,
|
||||
*,
|
||||
media_type: str,
|
||||
filename: str,
|
||||
path: str,
|
||||
size: int,
|
||||
) -> str | None:
|
||||
if not self._ws_client:
|
||||
return None
|
||||
try:
|
||||
from aibot import generate_req_id
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
chunk_size = 512 * 1024
|
||||
total_chunks = (size + chunk_size - 1) // chunk_size
|
||||
if total_chunks < 1 or total_chunks > 100:
|
||||
logger.warning("[WeCom] invalid total_chunks=%d for %s", total_chunks, filename)
|
||||
return None
|
||||
|
||||
md5_hasher = hashlib.md5()
|
||||
with open(path, "rb") as f:
|
||||
for chunk in iter(lambda: f.read(1024 * 1024), b""):
|
||||
md5_hasher.update(chunk)
|
||||
md5 = md5_hasher.hexdigest()
|
||||
|
||||
init_req_id = generate_req_id("aibot_upload_media_init")
|
||||
init_body = {
|
||||
"type": media_type,
|
||||
"filename": filename,
|
||||
"total_size": int(size),
|
||||
"total_chunks": int(total_chunks),
|
||||
"md5": md5,
|
||||
}
|
||||
init_ack = await self._send_ws_upload_command(init_req_id, init_body, "aibot_upload_media_init")
|
||||
upload_id = (init_ack.get("body") or {}).get("upload_id")
|
||||
if not upload_id:
|
||||
logger.warning("[WeCom] upload init returned no upload_id: %s", init_ack)
|
||||
return None
|
||||
|
||||
with open(path, "rb") as f:
|
||||
for idx in range(total_chunks):
|
||||
data = f.read(chunk_size)
|
||||
if not data:
|
||||
break
|
||||
chunk_req_id = generate_req_id("aibot_upload_media_chunk")
|
||||
chunk_body = {
|
||||
"upload_id": upload_id,
|
||||
"chunk_index": int(idx),
|
||||
"base64_data": base64.b64encode(data).decode("utf-8"),
|
||||
}
|
||||
await self._send_ws_upload_command(chunk_req_id, chunk_body, "aibot_upload_media_chunk")
|
||||
|
||||
finish_req_id = generate_req_id("aibot_upload_media_finish")
|
||||
finish_ack = await self._send_ws_upload_command(finish_req_id, {"upload_id": upload_id}, "aibot_upload_media_finish")
|
||||
media_id = (finish_ack.get("body") or {}).get("media_id")
|
||||
if not media_id:
|
||||
logger.warning("[WeCom] upload finish returned no media_id: %s", finish_ack)
|
||||
return None
|
||||
return media_id
|
||||
+165
-7
@@ -1,16 +1,23 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from app.gateway.auth_middleware import AuthMiddleware
|
||||
from app.gateway.config import get_gateway_config
|
||||
from app.gateway.csrf_middleware import CSRFMiddleware
|
||||
from app.gateway.deps import langgraph_runtime
|
||||
from app.gateway.routers import (
|
||||
agents,
|
||||
artifacts,
|
||||
assistants_compat,
|
||||
auth,
|
||||
channels,
|
||||
feedback,
|
||||
mcp,
|
||||
memory,
|
||||
models,
|
||||
@@ -21,7 +28,7 @@ from app.gateway.routers import (
|
||||
threads,
|
||||
uploads,
|
||||
)
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
@@ -32,14 +39,123 @@ logging.basicConfig(
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Upper bound (seconds) each lifespan shutdown hook is allowed to run.
|
||||
# Bounds worker exit time so uvicorn's reload supervisor does not keep
|
||||
# firing signals into a worker that is stuck waiting for shutdown cleanup.
|
||||
_SHUTDOWN_HOOK_TIMEOUT_SECONDS = 5.0
|
||||
|
||||
|
||||
async def _ensure_admin_user(app: FastAPI) -> None:
|
||||
"""Startup hook: handle first boot and migrate orphan threads otherwise.
|
||||
|
||||
After admin creation, migrate orphan threads from the LangGraph
|
||||
store (metadata.user_id unset) to the admin account. This is the
|
||||
"no-auth → with-auth" upgrade path: users who ran DeerFlow without
|
||||
authentication have existing LangGraph thread data that needs an
|
||||
owner assigned.
|
||||
First boot (no admin exists):
|
||||
- Does NOT create any user accounts automatically.
|
||||
- The operator must visit ``/setup`` to create the first admin.
|
||||
|
||||
Subsequent boots (admin already exists):
|
||||
- Runs the one-time "no-auth → with-auth" orphan thread migration for
|
||||
existing LangGraph thread metadata that has no owner_id.
|
||||
|
||||
No SQL persistence migration is needed: the four user_id columns
|
||||
(threads_meta, runs, run_events, feedback) only come into existence
|
||||
alongside the auth module via create_all, so freshly created tables
|
||||
never contain NULL-owner rows.
|
||||
"""
|
||||
from sqlalchemy import select
|
||||
|
||||
from app.gateway.deps import get_local_provider
|
||||
from deerflow.persistence.engine import get_session_factory
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
provider = get_local_provider()
|
||||
admin_count = await provider.count_admin_users()
|
||||
|
||||
if admin_count == 0:
|
||||
logger.info("=" * 60)
|
||||
logger.info(" First boot detected — no admin account exists.")
|
||||
logger.info(" Visit /setup to complete admin account creation.")
|
||||
logger.info("=" * 60)
|
||||
return
|
||||
|
||||
# Admin already exists — run orphan thread migration for any
|
||||
# LangGraph thread metadata that pre-dates the auth module.
|
||||
sf = get_session_factory()
|
||||
if sf is None:
|
||||
return
|
||||
|
||||
async with sf() as session:
|
||||
stmt = select(UserRow).where(UserRow.system_role == "admin").limit(1)
|
||||
row = (await session.execute(stmt)).scalar_one_or_none()
|
||||
|
||||
if row is None:
|
||||
return # Should not happen (admin_count > 0 above), but be safe.
|
||||
|
||||
admin_id = str(row.id)
|
||||
|
||||
# LangGraph store orphan migration — non-fatal.
|
||||
# This covers the "no-auth → with-auth" upgrade path for users
|
||||
# whose existing LangGraph thread metadata has no user_id set.
|
||||
store = getattr(app.state, "store", None)
|
||||
if store is not None:
|
||||
try:
|
||||
migrated = await _migrate_orphaned_threads(store, admin_id)
|
||||
if migrated:
|
||||
logger.info("Migrated %d orphan LangGraph thread(s) to admin", migrated)
|
||||
except Exception:
|
||||
logger.exception("LangGraph thread migration failed (non-fatal)")
|
||||
|
||||
|
||||
async def _iter_store_items(store, namespace, *, page_size: int = 500):
|
||||
"""Paginated async iterator over a LangGraph store namespace.
|
||||
|
||||
Replaces the old hardcoded ``limit=1000`` call with a cursor-style
|
||||
loop so that environments with more than one page of orphans do
|
||||
not silently lose data. Terminates when a page is empty OR when a
|
||||
short page arrives (indicating the last page).
|
||||
"""
|
||||
offset = 0
|
||||
while True:
|
||||
batch = await store.asearch(namespace, limit=page_size, offset=offset)
|
||||
if not batch:
|
||||
return
|
||||
for item in batch:
|
||||
yield item
|
||||
if len(batch) < page_size:
|
||||
return
|
||||
offset += page_size
|
||||
|
||||
|
||||
async def _migrate_orphaned_threads(store, admin_user_id: str) -> int:
|
||||
"""Migrate LangGraph store threads with no user_id to the given admin.
|
||||
|
||||
Uses cursor pagination so all orphans are migrated regardless of
|
||||
count. Returns the number of rows migrated.
|
||||
"""
|
||||
migrated = 0
|
||||
async for item in _iter_store_items(store, ("threads",)):
|
||||
metadata = item.value.get("metadata", {})
|
||||
if not metadata.get("user_id"):
|
||||
metadata["user_id"] = admin_user_id
|
||||
item.value["metadata"] = metadata
|
||||
await store.aput(("threads",), item.key, item.value)
|
||||
migrated += 1
|
||||
return migrated
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
"""Application lifespan handler."""
|
||||
|
||||
# Load config and check necessary environment variables at startup
|
||||
try:
|
||||
get_app_config()
|
||||
# ``app.state.config`` is the sole source of truth for
|
||||
# ``Depends(get_config)``. Consumers that want AppConfig must receive
|
||||
# it as an explicit parameter; there is no ambient singleton.
|
||||
app.state.config = AppConfig.from_file()
|
||||
logger.info("Configuration loaded successfully")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to load configuration during gateway startup: {e}"
|
||||
@@ -52,22 +168,34 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
async with langgraph_runtime(app):
|
||||
logger.info("LangGraph runtime initialised")
|
||||
|
||||
# Ensure admin user exists (auto-create on first boot)
|
||||
# Must run AFTER langgraph_runtime so app.state.store is available for thread migration
|
||||
await _ensure_admin_user(app)
|
||||
|
||||
# Start IM channel service if any channels are configured
|
||||
try:
|
||||
from app.channels.service import start_channel_service
|
||||
|
||||
channel_service = await start_channel_service()
|
||||
channel_service = await start_channel_service(app.state.config)
|
||||
logger.info("Channel service started: %s", channel_service.get_status())
|
||||
except Exception:
|
||||
logger.exception("No IM channels configured or channel service failed to start")
|
||||
|
||||
yield
|
||||
|
||||
# Stop channel service on shutdown
|
||||
# Stop channel service on shutdown (bounded to prevent worker hang)
|
||||
try:
|
||||
from app.channels.service import stop_channel_service
|
||||
|
||||
await stop_channel_service()
|
||||
await asyncio.wait_for(
|
||||
stop_channel_service(),
|
||||
timeout=_SHUTDOWN_HOOK_TIMEOUT_SECONDS,
|
||||
)
|
||||
except TimeoutError:
|
||||
logger.warning(
|
||||
"Channel service shutdown exceeded %.1fs; proceeding with worker exit.",
|
||||
_SHUTDOWN_HOOK_TIMEOUT_SECONDS,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to stop channel service")
|
||||
|
||||
@@ -163,7 +291,31 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
|
||||
],
|
||||
)
|
||||
|
||||
# CORS is handled by nginx - no need for FastAPI middleware
|
||||
# Auth: reject unauthenticated requests to non-public paths (fail-closed safety net)
|
||||
app.add_middleware(AuthMiddleware)
|
||||
|
||||
# CSRF: Double Submit Cookie pattern for state-changing requests
|
||||
app.add_middleware(CSRFMiddleware)
|
||||
|
||||
# CORS: when GATEWAY_CORS_ORIGINS is set (dev without nginx), add CORS middleware.
|
||||
# In production, nginx handles CORS and no middleware is needed.
|
||||
cors_origins_env = os.environ.get("GATEWAY_CORS_ORIGINS", "")
|
||||
if cors_origins_env:
|
||||
cors_origins = [o.strip() for o in cors_origins_env.split(",") if o.strip()]
|
||||
# Validate: wildcard origin with credentials is a security misconfiguration
|
||||
for origin in cors_origins:
|
||||
if origin == "*":
|
||||
logger.error("GATEWAY_CORS_ORIGINS contains wildcard '*' with allow_credentials=True. This is a security misconfiguration — browsers will reject the response. Use explicit scheme://host:port origins instead.")
|
||||
cors_origins = [o for o in cors_origins if o != "*"]
|
||||
break
|
||||
if cors_origins:
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=cors_origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Include routers
|
||||
# Models API is mounted at /api/models
|
||||
@@ -199,6 +351,12 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
|
||||
# Assistants compatibility API (LangGraph Platform stub)
|
||||
app.include_router(assistants_compat.router)
|
||||
|
||||
# Auth API is mounted at /api/v1/auth
|
||||
app.include_router(auth.router)
|
||||
|
||||
# Feedback API is mounted at /api/threads/{thread_id}/runs/{run_id}/feedback
|
||||
app.include_router(feedback.router)
|
||||
|
||||
# Thread Runs API (LangGraph Platform-compatible runs lifecycle)
|
||||
app.include_router(thread_runs.router)
|
||||
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
"""Authentication module for DeerFlow.
|
||||
|
||||
This module provides:
|
||||
- JWT-based authentication
|
||||
- Provider Factory pattern for extensible auth methods
|
||||
- UserRepository interface for storage backends (SQLite)
|
||||
"""
|
||||
|
||||
from app.gateway.auth.config import AuthConfig, get_auth_config, set_auth_config
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse, TokenError
|
||||
from app.gateway.auth.jwt import TokenPayload, create_access_token, decode_token
|
||||
from app.gateway.auth.local_provider import LocalAuthProvider
|
||||
from app.gateway.auth.models import User, UserResponse
|
||||
from app.gateway.auth.password import hash_password, verify_password
|
||||
from app.gateway.auth.providers import AuthProvider
|
||||
from app.gateway.auth.repositories.base import UserRepository
|
||||
|
||||
__all__ = [
|
||||
# Config
|
||||
"AuthConfig",
|
||||
"get_auth_config",
|
||||
"set_auth_config",
|
||||
# Errors
|
||||
"AuthErrorCode",
|
||||
"AuthErrorResponse",
|
||||
"TokenError",
|
||||
# JWT
|
||||
"TokenPayload",
|
||||
"create_access_token",
|
||||
"decode_token",
|
||||
# Password
|
||||
"hash_password",
|
||||
"verify_password",
|
||||
# Models
|
||||
"User",
|
||||
"UserResponse",
|
||||
# Providers
|
||||
"AuthProvider",
|
||||
"LocalAuthProvider",
|
||||
# Repository
|
||||
"UserRepository",
|
||||
]
|
||||
@@ -0,0 +1,57 @@
|
||||
"""Authentication configuration for DeerFlow."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import secrets
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
load_dotenv()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthConfig(BaseModel):
|
||||
"""JWT and auth-related configuration. Parsed once at startup.
|
||||
|
||||
Note: the ``users`` table now lives in the shared persistence
|
||||
database managed by ``deerflow.persistence.engine``. The old
|
||||
``users_db_path`` config key has been removed — user storage is
|
||||
configured through ``config.database`` like every other table.
|
||||
"""
|
||||
|
||||
jwt_secret: str = Field(
|
||||
...,
|
||||
description="Secret key for JWT signing. MUST be set via AUTH_JWT_SECRET.",
|
||||
)
|
||||
token_expiry_days: int = Field(default=7, ge=1, le=30)
|
||||
oauth_github_client_id: str | None = Field(default=None)
|
||||
oauth_github_client_secret: str | None = Field(default=None)
|
||||
|
||||
|
||||
_auth_config: AuthConfig | None = None
|
||||
|
||||
|
||||
def get_auth_config() -> AuthConfig:
|
||||
"""Get the global AuthConfig instance. Parses from env on first call."""
|
||||
global _auth_config
|
||||
if _auth_config is None:
|
||||
jwt_secret = os.environ.get("AUTH_JWT_SECRET")
|
||||
if not jwt_secret:
|
||||
jwt_secret = secrets.token_urlsafe(32)
|
||||
os.environ["AUTH_JWT_SECRET"] = jwt_secret
|
||||
logger.warning(
|
||||
"⚠ AUTH_JWT_SECRET is not set — using an auto-generated ephemeral secret. "
|
||||
"Sessions will be invalidated on restart. "
|
||||
"For production, add AUTH_JWT_SECRET to your .env file: "
|
||||
'python -c "import secrets; print(secrets.token_urlsafe(32))"'
|
||||
)
|
||||
_auth_config = AuthConfig(jwt_secret=jwt_secret)
|
||||
return _auth_config
|
||||
|
||||
|
||||
def set_auth_config(config: AuthConfig) -> None:
|
||||
"""Set the global AuthConfig instance (for testing)."""
|
||||
global _auth_config
|
||||
_auth_config = config
|
||||
@@ -0,0 +1,48 @@
|
||||
"""Write initial admin credentials to a restricted file instead of logs.
|
||||
|
||||
Logging secrets to stdout/stderr is a well-known CodeQL finding
|
||||
(py/clear-text-logging-sensitive-data) — in production those logs
|
||||
get collected into ELK/Splunk/etc and become a secret sprawl
|
||||
source. This helper writes the credential to a 0600 file that only
|
||||
the process user can read, and returns the path so the caller can
|
||||
log **the path** (not the password) for the operator to pick up.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
_CREDENTIAL_FILENAME = "admin_initial_credentials.txt"
|
||||
|
||||
|
||||
def write_initial_credentials(email: str, password: str, *, label: str = "initial") -> Path:
|
||||
"""Write the admin email + password to ``{base_dir}/admin_initial_credentials.txt``.
|
||||
|
||||
The file is created **atomically** with mode 0600 via ``os.open``
|
||||
so the password is never world-readable, even for the single syscall
|
||||
window between ``write_text`` and ``chmod``.
|
||||
|
||||
``label`` distinguishes "initial" (fresh creation) from "reset"
|
||||
(password reset) in the file header so an operator picking up the
|
||||
file after a restart can tell which event produced it.
|
||||
|
||||
Returns the absolute :class:`Path` to the file.
|
||||
"""
|
||||
target = get_paths().base_dir / _CREDENTIAL_FILENAME
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
content = (
|
||||
f"# DeerFlow admin {label} credentials\n# This file is generated on first boot or password reset.\n# Change the password after login via Settings -> Account,\n# then delete this file.\n#\nemail: {email}\npassword: {password}\n"
|
||||
)
|
||||
|
||||
# Atomic 0600 create-or-truncate. O_TRUNC (not O_EXCL) so the
|
||||
# reset-password path can rewrite an existing file without a
|
||||
# separate unlink-then-create dance.
|
||||
fd = os.open(target, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
fh.write(content)
|
||||
|
||||
return target.resolve()
|
||||
@@ -0,0 +1,45 @@
|
||||
"""Typed error definitions for auth module.
|
||||
|
||||
AuthErrorCode: exhaustive enum of all auth failure conditions.
|
||||
TokenError: exhaustive enum of JWT decode failures.
|
||||
AuthErrorResponse: structured error payload for HTTP responses.
|
||||
"""
|
||||
|
||||
from enum import StrEnum
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AuthErrorCode(StrEnum):
|
||||
"""Exhaustive list of auth error conditions."""
|
||||
|
||||
INVALID_CREDENTIALS = "invalid_credentials"
|
||||
TOKEN_EXPIRED = "token_expired"
|
||||
TOKEN_INVALID = "token_invalid"
|
||||
USER_NOT_FOUND = "user_not_found"
|
||||
EMAIL_ALREADY_EXISTS = "email_already_exists"
|
||||
PROVIDER_NOT_FOUND = "provider_not_found"
|
||||
NOT_AUTHENTICATED = "not_authenticated"
|
||||
SYSTEM_ALREADY_INITIALIZED = "system_already_initialized"
|
||||
|
||||
|
||||
class TokenError(StrEnum):
|
||||
"""Exhaustive list of JWT decode failure reasons."""
|
||||
|
||||
EXPIRED = "expired"
|
||||
INVALID_SIGNATURE = "invalid_signature"
|
||||
MALFORMED = "malformed"
|
||||
|
||||
|
||||
class AuthErrorResponse(BaseModel):
|
||||
"""Structured error response — replaces bare `detail` strings."""
|
||||
|
||||
code: AuthErrorCode
|
||||
message: str
|
||||
|
||||
|
||||
def token_error_to_code(err: TokenError) -> AuthErrorCode:
|
||||
"""Map TokenError to AuthErrorCode — single source of truth."""
|
||||
if err == TokenError.EXPIRED:
|
||||
return AuthErrorCode.TOKEN_EXPIRED
|
||||
return AuthErrorCode.TOKEN_INVALID
|
||||
@@ -0,0 +1,55 @@
|
||||
"""JWT token creation and verification."""
|
||||
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
import jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from app.gateway.auth.config import get_auth_config
|
||||
from app.gateway.auth.errors import TokenError
|
||||
|
||||
|
||||
class TokenPayload(BaseModel):
|
||||
"""JWT token payload."""
|
||||
|
||||
sub: str # user_id
|
||||
exp: datetime
|
||||
iat: datetime | None = None
|
||||
ver: int = 0 # token_version — must match User.token_version
|
||||
|
||||
|
||||
def create_access_token(user_id: str, expires_delta: timedelta | None = None, token_version: int = 0) -> str:
|
||||
"""Create a JWT access token.
|
||||
|
||||
Args:
|
||||
user_id: The user's UUID as string
|
||||
expires_delta: Optional custom expiry, defaults to 7 days
|
||||
token_version: User's current token_version for invalidation
|
||||
|
||||
Returns:
|
||||
Encoded JWT string
|
||||
"""
|
||||
config = get_auth_config()
|
||||
expiry = expires_delta or timedelta(days=config.token_expiry_days)
|
||||
|
||||
now = datetime.now(UTC)
|
||||
payload = {"sub": user_id, "exp": now + expiry, "iat": now, "ver": token_version}
|
||||
return jwt.encode(payload, config.jwt_secret, algorithm="HS256")
|
||||
|
||||
|
||||
def decode_token(token: str) -> TokenPayload | TokenError:
|
||||
"""Decode and validate a JWT token.
|
||||
|
||||
Returns:
|
||||
TokenPayload if valid, or a specific TokenError variant.
|
||||
"""
|
||||
config = get_auth_config()
|
||||
try:
|
||||
payload = jwt.decode(token, config.jwt_secret, algorithms=["HS256"])
|
||||
return TokenPayload(**payload)
|
||||
except jwt.ExpiredSignatureError:
|
||||
return TokenError.EXPIRED
|
||||
except jwt.InvalidSignatureError:
|
||||
return TokenError.INVALID_SIGNATURE
|
||||
except jwt.PyJWTError:
|
||||
return TokenError.MALFORMED
|
||||
@@ -0,0 +1,91 @@
|
||||
"""Local email/password authentication provider."""
|
||||
|
||||
from app.gateway.auth.models import User
|
||||
from app.gateway.auth.password import hash_password_async, verify_password_async
|
||||
from app.gateway.auth.providers import AuthProvider
|
||||
from app.gateway.auth.repositories.base import UserRepository
|
||||
|
||||
|
||||
class LocalAuthProvider(AuthProvider):
|
||||
"""Email/password authentication provider using local database."""
|
||||
|
||||
def __init__(self, repository: UserRepository):
|
||||
"""Initialize with a UserRepository.
|
||||
|
||||
Args:
|
||||
repository: UserRepository implementation (SQLite)
|
||||
"""
|
||||
self._repo = repository
|
||||
|
||||
async def authenticate(self, credentials: dict) -> User | None:
|
||||
"""Authenticate with email and password.
|
||||
|
||||
Args:
|
||||
credentials: dict with 'email' and 'password' keys
|
||||
|
||||
Returns:
|
||||
User if authentication succeeds, None otherwise
|
||||
"""
|
||||
email = credentials.get("email")
|
||||
password = credentials.get("password")
|
||||
|
||||
if not email or not password:
|
||||
return None
|
||||
|
||||
user = await self._repo.get_user_by_email(email)
|
||||
if user is None:
|
||||
return None
|
||||
|
||||
if user.password_hash is None:
|
||||
# OAuth user without local password
|
||||
return None
|
||||
|
||||
if not await verify_password_async(password, user.password_hash):
|
||||
return None
|
||||
|
||||
return user
|
||||
|
||||
async def get_user(self, user_id: str) -> User | None:
|
||||
"""Get user by ID."""
|
||||
return await self._repo.get_user_by_id(user_id)
|
||||
|
||||
async def create_user(self, email: str, password: str | None = None, system_role: str = "user", needs_setup: bool = False) -> User:
|
||||
"""Create a new local user.
|
||||
|
||||
Args:
|
||||
email: User email address
|
||||
password: Plain text password (will be hashed)
|
||||
system_role: Role to assign ("admin" or "user")
|
||||
needs_setup: If True, user must complete setup on first login
|
||||
|
||||
Returns:
|
||||
Created User instance
|
||||
"""
|
||||
password_hash = await hash_password_async(password) if password else None
|
||||
user = User(
|
||||
email=email,
|
||||
password_hash=password_hash,
|
||||
system_role=system_role,
|
||||
needs_setup=needs_setup,
|
||||
)
|
||||
return await self._repo.create_user(user)
|
||||
|
||||
async def get_user_by_oauth(self, provider: str, oauth_id: str) -> User | None:
|
||||
"""Get user by OAuth provider and ID."""
|
||||
return await self._repo.get_user_by_oauth(provider, oauth_id)
|
||||
|
||||
async def count_users(self) -> int:
|
||||
"""Return total number of registered users."""
|
||||
return await self._repo.count_users()
|
||||
|
||||
async def count_admin_users(self) -> int:
|
||||
"""Return number of admin users."""
|
||||
return await self._repo.count_admin_users()
|
||||
|
||||
async def update_user(self, user: User) -> User:
|
||||
"""Update an existing user."""
|
||||
return await self._repo.update_user(user)
|
||||
|
||||
async def get_user_by_email(self, email: str) -> User | None:
|
||||
"""Get user by email."""
|
||||
return await self._repo.get_user_by_email(email)
|
||||
@@ -0,0 +1,41 @@
|
||||
"""User Pydantic models for authentication."""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from typing import Literal
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, EmailStr, Field
|
||||
|
||||
|
||||
def _utc_now() -> datetime:
|
||||
"""Return current UTC time (timezone-aware)."""
|
||||
return datetime.now(UTC)
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
"""Internal user representation."""
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
id: UUID = Field(default_factory=uuid4, description="Primary key")
|
||||
email: EmailStr = Field(..., description="Unique email address")
|
||||
password_hash: str | None = Field(None, description="bcrypt hash, nullable for OAuth users")
|
||||
system_role: Literal["admin", "user"] = Field(default="user")
|
||||
created_at: datetime = Field(default_factory=_utc_now)
|
||||
|
||||
# OAuth linkage (optional)
|
||||
oauth_provider: str | None = Field(None, description="e.g. 'github', 'google'")
|
||||
oauth_id: str | None = Field(None, description="User ID from OAuth provider")
|
||||
|
||||
# Auth lifecycle
|
||||
needs_setup: bool = Field(default=False, description="True for auto-created admin until setup completes")
|
||||
token_version: int = Field(default=0, description="Incremented on password change to invalidate old JWTs")
|
||||
|
||||
|
||||
class UserResponse(BaseModel):
|
||||
"""Response model for user info endpoint."""
|
||||
|
||||
id: str
|
||||
email: str
|
||||
system_role: Literal["admin", "user"]
|
||||
needs_setup: bool = False
|
||||
@@ -0,0 +1,33 @@
|
||||
"""Password hashing utilities using bcrypt directly."""
|
||||
|
||||
import asyncio
|
||||
|
||||
import bcrypt
|
||||
|
||||
|
||||
def hash_password(password: str) -> str:
|
||||
"""Hash a password using bcrypt."""
|
||||
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
|
||||
|
||||
|
||||
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
||||
"""Verify a password against its hash."""
|
||||
return bcrypt.checkpw(plain_password.encode("utf-8"), hashed_password.encode("utf-8"))
|
||||
|
||||
|
||||
async def hash_password_async(password: str) -> str:
|
||||
"""Hash a password using bcrypt (non-blocking).
|
||||
|
||||
Wraps the blocking bcrypt operation in a thread pool to avoid
|
||||
blocking the event loop during password hashing.
|
||||
"""
|
||||
return await asyncio.to_thread(hash_password, password)
|
||||
|
||||
|
||||
async def verify_password_async(plain_password: str, hashed_password: str) -> bool:
|
||||
"""Verify a password against its hash (non-blocking).
|
||||
|
||||
Wraps the blocking bcrypt operation in a thread pool to avoid
|
||||
blocking the event loop during password verification.
|
||||
"""
|
||||
return await asyncio.to_thread(verify_password, plain_password, hashed_password)
|
||||
@@ -0,0 +1,24 @@
|
||||
"""Auth provider abstraction."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class AuthProvider(ABC):
|
||||
"""Abstract base class for authentication providers."""
|
||||
|
||||
@abstractmethod
|
||||
async def authenticate(self, credentials: dict) -> "User | None":
|
||||
"""Authenticate user with given credentials.
|
||||
|
||||
Returns User if authentication succeeds, None otherwise.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def get_user(self, user_id: str) -> "User | None":
|
||||
"""Retrieve user by ID."""
|
||||
...
|
||||
|
||||
|
||||
# Import User at runtime to avoid circular imports
|
||||
from app.gateway.auth.models import User # noqa: E402
|
||||
@@ -0,0 +1,102 @@
|
||||
"""User repository interface for abstracting database operations."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from app.gateway.auth.models import User
|
||||
|
||||
|
||||
class UserNotFoundError(LookupError):
|
||||
"""Raised when a user repository operation targets a non-existent row.
|
||||
|
||||
Subclass of :class:`LookupError` so callers that already catch
|
||||
``LookupError`` for "missing entity" can keep working unchanged,
|
||||
while specific call sites can pin to this class to distinguish
|
||||
"concurrent delete during update" from other lookups.
|
||||
"""
|
||||
|
||||
|
||||
class UserRepository(ABC):
|
||||
"""Abstract interface for user data storage.
|
||||
|
||||
Implement this interface to support different storage backends
|
||||
(SQLite)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def create_user(self, user: User) -> User:
|
||||
"""Create a new user.
|
||||
|
||||
Args:
|
||||
user: User object to create
|
||||
|
||||
Returns:
|
||||
Created User with ID assigned
|
||||
|
||||
Raises:
|
||||
ValueError: If email already exists
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_id(self, user_id: str) -> User | None:
|
||||
"""Get user by ID.
|
||||
|
||||
Args:
|
||||
user_id: User UUID as string
|
||||
|
||||
Returns:
|
||||
User if found, None otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_email(self, email: str) -> User | None:
|
||||
"""Get user by email.
|
||||
|
||||
Args:
|
||||
email: User email address
|
||||
|
||||
Returns:
|
||||
User if found, None otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def update_user(self, user: User) -> User:
|
||||
"""Update an existing user.
|
||||
|
||||
Args:
|
||||
user: User object with updated fields
|
||||
|
||||
Returns:
|
||||
Updated User
|
||||
|
||||
Raises:
|
||||
UserNotFoundError: If no row exists for ``user.id``. This is
|
||||
a hard failure (not a no-op) so callers cannot mistake a
|
||||
concurrent-delete race for a successful update.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def count_users(self) -> int:
|
||||
"""Return total number of registered users."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def count_admin_users(self) -> int:
|
||||
"""Return number of users with system_role == 'admin'."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_oauth(self, provider: str, oauth_id: str) -> User | None:
|
||||
"""Get user by OAuth provider and ID.
|
||||
|
||||
Args:
|
||||
provider: OAuth provider name (e.g. 'github', 'google')
|
||||
oauth_id: User ID from the OAuth provider
|
||||
|
||||
Returns:
|
||||
User if found, None otherwise
|
||||
"""
|
||||
...
|
||||
@@ -0,0 +1,127 @@
|
||||
"""SQLAlchemy-backed UserRepository implementation.
|
||||
|
||||
Uses the shared async session factory from
|
||||
``deerflow.persistence.engine`` — the ``users`` table lives in the
|
||||
same database as ``threads_meta``, ``runs``, ``run_events``, and
|
||||
``feedback``.
|
||||
|
||||
Constructor takes the session factory directly (same pattern as the
|
||||
other four repositories in ``deerflow.persistence.*``). Callers
|
||||
construct this after ``init_engine_from_config()`` has run.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import func, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
from app.gateway.auth.models import User
|
||||
from app.gateway.auth.repositories.base import UserNotFoundError, UserRepository
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
|
||||
class SQLiteUserRepository(UserRepository):
|
||||
"""Async user repository backed by the shared SQLAlchemy engine."""
|
||||
|
||||
def __init__(self, session_factory: async_sessionmaker[AsyncSession]) -> None:
|
||||
self._sf = session_factory
|
||||
|
||||
# ── Converters ────────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _row_to_user(row: UserRow) -> User:
|
||||
return User(
|
||||
id=UUID(row.id),
|
||||
email=row.email,
|
||||
password_hash=row.password_hash,
|
||||
system_role=row.system_role, # type: ignore[arg-type]
|
||||
# SQLite loses tzinfo on read; reattach UTC so downstream
|
||||
# code can compare timestamps reliably.
|
||||
created_at=row.created_at if row.created_at.tzinfo else row.created_at.replace(tzinfo=UTC),
|
||||
oauth_provider=row.oauth_provider,
|
||||
oauth_id=row.oauth_id,
|
||||
needs_setup=row.needs_setup,
|
||||
token_version=row.token_version,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _user_to_row(user: User) -> UserRow:
|
||||
return UserRow(
|
||||
id=str(user.id),
|
||||
email=user.email,
|
||||
password_hash=user.password_hash,
|
||||
system_role=user.system_role,
|
||||
created_at=user.created_at,
|
||||
oauth_provider=user.oauth_provider,
|
||||
oauth_id=user.oauth_id,
|
||||
needs_setup=user.needs_setup,
|
||||
token_version=user.token_version,
|
||||
)
|
||||
|
||||
# ── CRUD ──────────────────────────────────────────────────────────
|
||||
|
||||
async def create_user(self, user: User) -> User:
|
||||
"""Insert a new user. Raises ``ValueError`` on duplicate email."""
|
||||
row = self._user_to_row(user)
|
||||
async with self._sf() as session:
|
||||
session.add(row)
|
||||
try:
|
||||
await session.commit()
|
||||
except IntegrityError as exc:
|
||||
await session.rollback()
|
||||
raise ValueError(f"Email already registered: {user.email}") from exc
|
||||
return user
|
||||
|
||||
async def get_user_by_id(self, user_id: str) -> User | None:
|
||||
async with self._sf() as session:
|
||||
row = await session.get(UserRow, user_id)
|
||||
return self._row_to_user(row) if row is not None else None
|
||||
|
||||
async def get_user_by_email(self, email: str) -> User | None:
|
||||
stmt = select(UserRow).where(UserRow.email == email)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
row = result.scalar_one_or_none()
|
||||
return self._row_to_user(row) if row is not None else None
|
||||
|
||||
async def update_user(self, user: User) -> User:
|
||||
async with self._sf() as session:
|
||||
row = await session.get(UserRow, str(user.id))
|
||||
if row is None:
|
||||
# Hard fail on concurrent delete: callers (reset_admin,
|
||||
# password change handlers, _ensure_admin_user) all
|
||||
# fetched the user just before this call, so a missing
|
||||
# row here means the row vanished underneath us. Silent
|
||||
# success would let the caller log "password reset" for
|
||||
# a row that no longer exists.
|
||||
raise UserNotFoundError(f"User {user.id} no longer exists")
|
||||
row.email = user.email
|
||||
row.password_hash = user.password_hash
|
||||
row.system_role = user.system_role
|
||||
row.oauth_provider = user.oauth_provider
|
||||
row.oauth_id = user.oauth_id
|
||||
row.needs_setup = user.needs_setup
|
||||
row.token_version = user.token_version
|
||||
await session.commit()
|
||||
return user
|
||||
|
||||
async def count_users(self) -> int:
|
||||
stmt = select(func.count()).select_from(UserRow)
|
||||
async with self._sf() as session:
|
||||
return await session.scalar(stmt) or 0
|
||||
|
||||
async def count_admin_users(self) -> int:
|
||||
stmt = select(func.count()).select_from(UserRow).where(UserRow.system_role == "admin")
|
||||
async with self._sf() as session:
|
||||
return await session.scalar(stmt) or 0
|
||||
|
||||
async def get_user_by_oauth(self, provider: str, oauth_id: str) -> User | None:
|
||||
stmt = select(UserRow).where(UserRow.oauth_provider == provider, UserRow.oauth_id == oauth_id)
|
||||
async with self._sf() as session:
|
||||
result = await session.execute(stmt)
|
||||
row = result.scalar_one_or_none()
|
||||
return self._row_to_user(row) if row is not None else None
|
||||
@@ -0,0 +1,92 @@
|
||||
"""CLI tool to reset an admin password.
|
||||
|
||||
Usage:
|
||||
python -m app.gateway.auth.reset_admin
|
||||
python -m app.gateway.auth.reset_admin --email admin@example.com
|
||||
|
||||
Writes the new password to ``.deer-flow/admin_initial_credentials.txt``
|
||||
(mode 0600) instead of printing it, so CI / log aggregators never see
|
||||
the cleartext secret.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
from sqlalchemy import select
|
||||
|
||||
from app.gateway.auth.credential_file import write_initial_credentials
|
||||
from app.gateway.auth.password import hash_password
|
||||
from app.gateway.auth.repositories.sqlite import SQLiteUserRepository
|
||||
from deerflow.persistence.user.model import UserRow
|
||||
|
||||
|
||||
async def _run(email: str | None) -> int:
|
||||
from deerflow.config import AppConfig
|
||||
from deerflow.persistence.engine import (
|
||||
close_engine,
|
||||
get_session_factory,
|
||||
init_engine_from_config,
|
||||
)
|
||||
|
||||
# CLI entry: load config explicitly at the top, pass down through the closure.
|
||||
config = AppConfig.from_file()
|
||||
await init_engine_from_config(config.database)
|
||||
try:
|
||||
sf = get_session_factory()
|
||||
if sf is None:
|
||||
print("Error: persistence engine not available (check config.database).", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
repo = SQLiteUserRepository(sf)
|
||||
|
||||
if email:
|
||||
user = await repo.get_user_by_email(email)
|
||||
else:
|
||||
# Find first admin via direct SELECT — repository does not
|
||||
# expose a "first admin" helper and we do not want to add
|
||||
# one just for this CLI.
|
||||
async with sf() as session:
|
||||
stmt = select(UserRow).where(UserRow.system_role == "admin").limit(1)
|
||||
row = (await session.execute(stmt)).scalar_one_or_none()
|
||||
if row is None:
|
||||
user = None
|
||||
else:
|
||||
user = await repo.get_user_by_id(row.id)
|
||||
|
||||
if user is None:
|
||||
if email:
|
||||
print(f"Error: user '{email}' not found.", file=sys.stderr)
|
||||
else:
|
||||
print("Error: no admin user found.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
new_password = secrets.token_urlsafe(16)
|
||||
user.password_hash = hash_password(new_password)
|
||||
user.token_version += 1
|
||||
user.needs_setup = True
|
||||
await repo.update_user(user)
|
||||
|
||||
cred_path = write_initial_credentials(user.email, new_password, label="reset")
|
||||
print(f"Password reset for: {user.email}")
|
||||
print(f"Credentials written to: {cred_path} (mode 0600)")
|
||||
print("Next login will require setup (new email + password).")
|
||||
return 0
|
||||
finally:
|
||||
await close_engine()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Reset admin password")
|
||||
parser.add_argument("--email", help="Admin email (default: first admin found)")
|
||||
args = parser.parse_args()
|
||||
|
||||
exit_code = asyncio.run(_run(args.email))
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,118 @@
|
||||
"""Global authentication middleware — fail-closed safety net.
|
||||
|
||||
Rejects unauthenticated requests to non-public paths with 401. When a
|
||||
request passes the cookie check, resolves the JWT payload to a real
|
||||
``User`` object and stamps it into both ``request.state.user`` and the
|
||||
``deerflow.runtime.user_context`` contextvar so that repository-layer
|
||||
owner filtering works automatically via the sentinel pattern.
|
||||
|
||||
Fine-grained permission checks remain in authz.py decorators.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
|
||||
from fastapi import HTTPException, Request, Response
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
from starlette.responses import JSONResponse
|
||||
from starlette.types import ASGIApp
|
||||
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse
|
||||
from app.gateway.authz import _ALL_PERMISSIONS, AuthContext
|
||||
from deerflow.runtime.user_context import reset_current_user, set_current_user
|
||||
|
||||
# Paths that never require authentication.
|
||||
_PUBLIC_PATH_PREFIXES: tuple[str, ...] = (
|
||||
"/health",
|
||||
"/docs",
|
||||
"/redoc",
|
||||
"/openapi.json",
|
||||
)
|
||||
|
||||
# Exact auth paths that are public (login/register/status check).
|
||||
# /api/v1/auth/me, /api/v1/auth/change-password etc. are NOT public.
|
||||
_PUBLIC_EXACT_PATHS: frozenset[str] = frozenset(
|
||||
{
|
||||
"/api/v1/auth/login/local",
|
||||
"/api/v1/auth/register",
|
||||
"/api/v1/auth/logout",
|
||||
"/api/v1/auth/setup-status",
|
||||
"/api/v1/auth/initialize",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _is_public(path: str) -> bool:
|
||||
stripped = path.rstrip("/")
|
||||
if stripped in _PUBLIC_EXACT_PATHS:
|
||||
return True
|
||||
return any(path.startswith(prefix) for prefix in _PUBLIC_PATH_PREFIXES)
|
||||
|
||||
|
||||
class AuthMiddleware(BaseHTTPMiddleware):
|
||||
"""Strict auth gate: reject requests without a valid session.
|
||||
|
||||
Two-stage check for non-public paths:
|
||||
|
||||
1. Cookie presence — return 401 NOT_AUTHENTICATED if missing
|
||||
2. JWT validation via ``get_optional_user_from_request`` — return 401
|
||||
TOKEN_INVALID if the token is absent, malformed, expired, or the
|
||||
signed user does not exist / is stale
|
||||
|
||||
On success, stamps ``request.state.user`` and the
|
||||
``deerflow.runtime.user_context`` contextvar so that repository-layer
|
||||
owner filters work downstream without every route needing a
|
||||
``@require_auth`` decorator. Routes that need per-resource
|
||||
authorization (e.g. "user A cannot read user B's thread by guessing
|
||||
the URL") should additionally use ``@require_permission(...,
|
||||
owner_check=True)`` for explicit enforcement — but authentication
|
||||
itself is fully handled here.
|
||||
"""
|
||||
|
||||
def __init__(self, app: ASGIApp) -> None:
|
||||
super().__init__(app)
|
||||
|
||||
async def dispatch(self, request: Request, call_next: Callable) -> Response:
|
||||
if _is_public(request.url.path):
|
||||
return await call_next(request)
|
||||
|
||||
# Non-public path: require session cookie
|
||||
if not request.cookies.get("access_token"):
|
||||
return JSONResponse(
|
||||
status_code=401,
|
||||
content={
|
||||
"detail": AuthErrorResponse(
|
||||
code=AuthErrorCode.NOT_AUTHENTICATED,
|
||||
message="Authentication required",
|
||||
).model_dump()
|
||||
},
|
||||
)
|
||||
|
||||
# Strict JWT validation: reject junk/expired tokens with 401
|
||||
# right here instead of silently passing through. This closes
|
||||
# the "junk cookie bypass" gap (AUTH_TEST_PLAN test 7.5.8):
|
||||
# without this, non-isolation routes like /api/models would
|
||||
# accept any cookie-shaped string as authentication.
|
||||
#
|
||||
# We call the *strict* resolver so that fine-grained error
|
||||
# codes (token_expired, token_invalid, user_not_found, …)
|
||||
# propagate from AuthErrorCode, not get flattened into one
|
||||
# generic code. BaseHTTPMiddleware doesn't let HTTPException
|
||||
# bubble up, so we catch and render it as JSONResponse here.
|
||||
from app.gateway.deps import get_current_user_from_request
|
||||
|
||||
try:
|
||||
user = await get_current_user_from_request(request)
|
||||
except HTTPException as exc:
|
||||
return JSONResponse(status_code=exc.status_code, content={"detail": exc.detail})
|
||||
|
||||
# Stamp both request.state.user (for the contextvar pattern)
|
||||
# and request.state.auth (so @require_permission's "auth is
|
||||
# None" branch short-circuits instead of running the entire
|
||||
# JWT-decode + DB-lookup pipeline a second time per request).
|
||||
request.state.user = user
|
||||
request.state.auth = AuthContext(user=user, permissions=_ALL_PERMISSIONS)
|
||||
token = set_current_user(user)
|
||||
try:
|
||||
return await call_next(request)
|
||||
finally:
|
||||
reset_current_user(token)
|
||||
@@ -0,0 +1,262 @@
|
||||
"""Authorization decorators and context for DeerFlow.
|
||||
|
||||
Inspired by LangGraph Auth system: https://github.com/langchain-ai/langgraph/blob/main/libs/sdk-py/langgraph_sdk/auth/__init__.py
|
||||
|
||||
**Usage:**
|
||||
|
||||
1. Use ``@require_auth`` on routes that need authentication
|
||||
2. Use ``@require_permission("resource", "action", filter_key=...)`` for permission checks
|
||||
3. The decorator chain processes from bottom to top
|
||||
|
||||
**Example:**
|
||||
|
||||
@router.get("/{thread_id}")
|
||||
@require_auth
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread(thread_id: str, request: Request):
|
||||
# User is authenticated and has threads:read permission
|
||||
...
|
||||
|
||||
**Permission Model:**
|
||||
|
||||
- threads:read - View thread
|
||||
- threads:write - Create/update thread
|
||||
- threads:delete - Delete thread
|
||||
- runs:create - Run agent
|
||||
- runs:read - View run
|
||||
- runs:cancel - Cancel run
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.gateway.auth.models import User
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
# Permission constants
|
||||
class Permissions:
|
||||
"""Permission constants for resource:action format."""
|
||||
|
||||
# Threads
|
||||
THREADS_READ = "threads:read"
|
||||
THREADS_WRITE = "threads:write"
|
||||
THREADS_DELETE = "threads:delete"
|
||||
|
||||
# Runs
|
||||
RUNS_CREATE = "runs:create"
|
||||
RUNS_READ = "runs:read"
|
||||
RUNS_CANCEL = "runs:cancel"
|
||||
|
||||
|
||||
class AuthContext:
|
||||
"""Authentication context for the current request.
|
||||
|
||||
Stored in request.state.auth after require_auth decoration.
|
||||
|
||||
Attributes:
|
||||
user: The authenticated user, or None if anonymous
|
||||
permissions: List of permission strings (e.g., "threads:read")
|
||||
"""
|
||||
|
||||
__slots__ = ("user", "permissions")
|
||||
|
||||
def __init__(self, user: User | None = None, permissions: list[str] | None = None):
|
||||
self.user = user
|
||||
self.permissions = permissions or []
|
||||
|
||||
@property
|
||||
def is_authenticated(self) -> bool:
|
||||
"""Check if user is authenticated."""
|
||||
return self.user is not None
|
||||
|
||||
def has_permission(self, resource: str, action: str) -> bool:
|
||||
"""Check if context has permission for resource:action.
|
||||
|
||||
Args:
|
||||
resource: Resource name (e.g., "threads")
|
||||
action: Action name (e.g., "read")
|
||||
|
||||
Returns:
|
||||
True if user has permission
|
||||
"""
|
||||
permission = f"{resource}:{action}"
|
||||
return permission in self.permissions
|
||||
|
||||
def require_user(self) -> User:
|
||||
"""Get user or raise 401.
|
||||
|
||||
Raises:
|
||||
HTTPException 401 if not authenticated
|
||||
"""
|
||||
if not self.user:
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
return self.user
|
||||
|
||||
|
||||
def get_auth_context(request: Request) -> AuthContext | None:
|
||||
"""Get AuthContext from request state."""
|
||||
return getattr(request.state, "auth", None)
|
||||
|
||||
|
||||
_ALL_PERMISSIONS: list[str] = [
|
||||
Permissions.THREADS_READ,
|
||||
Permissions.THREADS_WRITE,
|
||||
Permissions.THREADS_DELETE,
|
||||
Permissions.RUNS_CREATE,
|
||||
Permissions.RUNS_READ,
|
||||
Permissions.RUNS_CANCEL,
|
||||
]
|
||||
|
||||
|
||||
async def _authenticate(request: Request) -> AuthContext:
|
||||
"""Authenticate request and return AuthContext.
|
||||
|
||||
Delegates to deps.get_optional_user_from_request() for the JWT→User pipeline.
|
||||
Returns AuthContext with user=None for anonymous requests.
|
||||
"""
|
||||
from app.gateway.deps import get_optional_user_from_request
|
||||
|
||||
user = await get_optional_user_from_request(request)
|
||||
if user is None:
|
||||
return AuthContext(user=None, permissions=[])
|
||||
|
||||
# In future, permissions could be stored in user record
|
||||
return AuthContext(user=user, permissions=_ALL_PERMISSIONS)
|
||||
|
||||
|
||||
def require_auth[**P, T](func: Callable[P, T]) -> Callable[P, T]:
|
||||
"""Decorator that authenticates the request and sets AuthContext.
|
||||
|
||||
Must be placed ABOVE other decorators (executes after them).
|
||||
|
||||
Usage:
|
||||
@router.get("/{thread_id}")
|
||||
@require_auth # Bottom decorator (executes first after permission check)
|
||||
@require_permission("threads", "read")
|
||||
async def get_thread(thread_id: str, request: Request):
|
||||
auth: AuthContext = request.state.auth
|
||||
...
|
||||
|
||||
Raises:
|
||||
ValueError: If 'request' parameter is missing
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
async def wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
request = kwargs.get("request")
|
||||
if request is None:
|
||||
raise ValueError("require_auth decorator requires 'request' parameter")
|
||||
|
||||
# Authenticate and set context
|
||||
auth_context = await _authenticate(request)
|
||||
request.state.auth = auth_context
|
||||
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def require_permission(
|
||||
resource: str,
|
||||
action: str,
|
||||
owner_check: bool = False,
|
||||
require_existing: bool = False,
|
||||
) -> Callable[[Callable[P, T]], Callable[P, T]]:
|
||||
"""Decorator that checks permission for resource:action.
|
||||
|
||||
Must be used AFTER @require_auth.
|
||||
|
||||
Args:
|
||||
resource: Resource name (e.g., "threads", "runs")
|
||||
action: Action name (e.g., "read", "write", "delete")
|
||||
owner_check: If True, validates that the current user owns the resource.
|
||||
Requires 'thread_id' path parameter and performs ownership check.
|
||||
require_existing: Only meaningful with ``owner_check=True``. If True, a
|
||||
missing ``threads_meta`` row counts as a denial (404)
|
||||
instead of "untracked legacy thread, allow". Use on
|
||||
**destructive / mutating** routes (DELETE, PATCH,
|
||||
state-update) so a deleted thread can't be re-targeted
|
||||
by another user via the missing-row code path.
|
||||
|
||||
Usage:
|
||||
# Read-style: legacy untracked threads are allowed
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread(thread_id: str, request: Request):
|
||||
...
|
||||
|
||||
# Destructive: thread row MUST exist and be owned by caller
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_thread(thread_id: str, request: Request):
|
||||
...
|
||||
|
||||
Raises:
|
||||
HTTPException 401: If authentication required but user is anonymous
|
||||
HTTPException 403: If user lacks permission
|
||||
HTTPException 404: If owner_check=True but user doesn't own the thread
|
||||
ValueError: If owner_check=True but 'thread_id' parameter is missing
|
||||
"""
|
||||
|
||||
def decorator(func: Callable[P, T]) -> Callable[P, T]:
|
||||
@functools.wraps(func)
|
||||
async def wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
request = kwargs.get("request")
|
||||
if request is None:
|
||||
raise ValueError("require_permission decorator requires 'request' parameter")
|
||||
|
||||
auth: AuthContext = getattr(request.state, "auth", None)
|
||||
if auth is None:
|
||||
auth = await _authenticate(request)
|
||||
request.state.auth = auth
|
||||
|
||||
if not auth.is_authenticated:
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
|
||||
# Check permission
|
||||
if not auth.has_permission(resource, action):
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail=f"Permission denied: {resource}:{action}",
|
||||
)
|
||||
|
||||
# Owner check for thread-specific resources.
|
||||
#
|
||||
# 2.0-rc moved thread metadata into the SQL persistence layer
|
||||
# (``threads_meta`` table). We verify ownership via
|
||||
# ``ThreadMetaStore.check_access``: it returns True for
|
||||
# missing rows (untracked legacy thread) and for rows whose
|
||||
# ``user_id`` is NULL (shared / pre-auth data), so this is
|
||||
# strict-deny rather than strict-allow — only an *existing*
|
||||
# row with a *different* user_id triggers 404.
|
||||
if owner_check:
|
||||
thread_id = kwargs.get("thread_id")
|
||||
if thread_id is None:
|
||||
raise ValueError("require_permission with owner_check=True requires 'thread_id' parameter")
|
||||
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
thread_store = get_thread_store(request)
|
||||
allowed = await thread_store.check_access(
|
||||
thread_id,
|
||||
str(auth.user.id),
|
||||
require_existing=require_existing,
|
||||
)
|
||||
if not allowed:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Thread {thread_id} not found",
|
||||
)
|
||||
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -0,0 +1,113 @@
|
||||
"""CSRF protection middleware for FastAPI.
|
||||
|
||||
Per RFC-001:
|
||||
State-changing operations require CSRF protection.
|
||||
"""
|
||||
|
||||
import secrets
|
||||
from collections.abc import Callable
|
||||
|
||||
from fastapi import Request, Response
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
from starlette.responses import JSONResponse
|
||||
from starlette.types import ASGIApp
|
||||
|
||||
CSRF_COOKIE_NAME = "csrf_token"
|
||||
CSRF_HEADER_NAME = "X-CSRF-Token"
|
||||
CSRF_TOKEN_LENGTH = 64 # bytes
|
||||
|
||||
|
||||
def is_secure_request(request: Request) -> bool:
|
||||
"""Detect whether the original client request was made over HTTPS."""
|
||||
return request.headers.get("x-forwarded-proto", request.url.scheme) == "https"
|
||||
|
||||
|
||||
def generate_csrf_token() -> str:
|
||||
"""Generate a secure random CSRF token."""
|
||||
return secrets.token_urlsafe(CSRF_TOKEN_LENGTH)
|
||||
|
||||
|
||||
def should_check_csrf(request: Request) -> bool:
|
||||
"""Determine if a request needs CSRF validation.
|
||||
|
||||
CSRF is checked for state-changing methods (POST, PUT, DELETE, PATCH).
|
||||
GET, HEAD, OPTIONS, and TRACE are exempt per RFC 7231.
|
||||
"""
|
||||
if request.method not in ("POST", "PUT", "DELETE", "PATCH"):
|
||||
return False
|
||||
|
||||
path = request.url.path.rstrip("/")
|
||||
# Exempt /api/v1/auth/me endpoint
|
||||
if path == "/api/v1/auth/me":
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
_AUTH_EXEMPT_PATHS: frozenset[str] = frozenset(
|
||||
{
|
||||
"/api/v1/auth/login/local",
|
||||
"/api/v1/auth/logout",
|
||||
"/api/v1/auth/register",
|
||||
"/api/v1/auth/initialize",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def is_auth_endpoint(request: Request) -> bool:
|
||||
"""Check if the request is to an auth endpoint.
|
||||
|
||||
Auth endpoints don't need CSRF validation on first call (no token).
|
||||
"""
|
||||
return request.url.path.rstrip("/") in _AUTH_EXEMPT_PATHS
|
||||
|
||||
|
||||
class CSRFMiddleware(BaseHTTPMiddleware):
|
||||
"""Middleware that implements CSRF protection using Double Submit Cookie pattern."""
|
||||
|
||||
def __init__(self, app: ASGIApp) -> None:
|
||||
super().__init__(app)
|
||||
|
||||
async def dispatch(self, request: Request, call_next: Callable) -> Response:
|
||||
_is_auth = is_auth_endpoint(request)
|
||||
|
||||
if should_check_csrf(request) and not _is_auth:
|
||||
cookie_token = request.cookies.get(CSRF_COOKIE_NAME)
|
||||
header_token = request.headers.get(CSRF_HEADER_NAME)
|
||||
|
||||
if not cookie_token or not header_token:
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
content={"detail": "CSRF token missing. Include X-CSRF-Token header."},
|
||||
)
|
||||
|
||||
if not secrets.compare_digest(cookie_token, header_token):
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
content={"detail": "CSRF token mismatch."},
|
||||
)
|
||||
|
||||
response = await call_next(request)
|
||||
|
||||
# For auth endpoints that set up session, also set CSRF cookie
|
||||
if _is_auth and request.method == "POST":
|
||||
# Generate a new CSRF token for the session
|
||||
csrf_token = generate_csrf_token()
|
||||
is_https = is_secure_request(request)
|
||||
response.set_cookie(
|
||||
key=CSRF_COOKIE_NAME,
|
||||
value=csrf_token,
|
||||
httponly=False, # Must be JS-readable for Double Submit Cookie pattern
|
||||
secure=is_https,
|
||||
samesite="strict",
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def get_csrf_token(request: Request) -> str | None:
|
||||
"""Get the CSRF token from the current request's cookies.
|
||||
|
||||
This is useful for server-side rendering where you need to embed
|
||||
token in forms or headers.
|
||||
"""
|
||||
return request.cookies.get(CSRF_COOKIE_NAME)
|
||||
+204
-27
@@ -10,10 +10,30 @@ from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import AsyncExitStack, asynccontextmanager
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Request
|
||||
|
||||
from deerflow.runtime import RunManager, StreamBridge
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.runtime import RunContext, RunManager
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.gateway.auth.local_provider import LocalAuthProvider
|
||||
from app.gateway.auth.repositories.sqlite import SQLiteUserRepository
|
||||
from deerflow.persistence.thread_meta.base import ThreadMetaStore
|
||||
|
||||
|
||||
def get_config(request: Request) -> AppConfig:
|
||||
"""FastAPI dependency returning the app-scoped ``AppConfig``.
|
||||
|
||||
Reads from ``request.app.state.config`` which is set at startup
|
||||
(``app.py`` lifespan) and swapped on config reload (``routers/mcp.py``,
|
||||
``routers/skills.py``).
|
||||
"""
|
||||
cfg = getattr(request.app.state, "config", None)
|
||||
if cfg is None:
|
||||
raise HTTPException(status_code=503, detail="Configuration not available")
|
||||
return cfg
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
@@ -25,15 +45,54 @@ async def langgraph_runtime(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
async with langgraph_runtime(app):
|
||||
yield
|
||||
"""
|
||||
from deerflow.agents.checkpointer.async_provider import make_checkpointer
|
||||
from deerflow.persistence.engine import close_engine, get_session_factory, init_engine_from_config
|
||||
from deerflow.runtime import make_store, make_stream_bridge
|
||||
from deerflow.runtime.checkpointer.async_provider import make_checkpointer
|
||||
from deerflow.runtime.events.store import make_run_event_store
|
||||
|
||||
async with AsyncExitStack() as stack:
|
||||
app.state.stream_bridge = await stack.enter_async_context(make_stream_bridge())
|
||||
app.state.checkpointer = await stack.enter_async_context(make_checkpointer())
|
||||
app.state.store = await stack.enter_async_context(make_store())
|
||||
app.state.run_manager = RunManager()
|
||||
yield
|
||||
# app.state.config is populated earlier in lifespan(); thread it
|
||||
# explicitly into every provider below.
|
||||
config = app.state.config
|
||||
|
||||
app.state.stream_bridge = await stack.enter_async_context(make_stream_bridge(config))
|
||||
|
||||
# Initialize persistence engine BEFORE checkpointer so that
|
||||
# auto-create-database logic runs first (postgres backend).
|
||||
await init_engine_from_config(config.database)
|
||||
|
||||
app.state.checkpointer = await stack.enter_async_context(make_checkpointer(config))
|
||||
app.state.store = await stack.enter_async_context(make_store(config))
|
||||
|
||||
# Initialize repositories — one get_session_factory() call for all.
|
||||
sf = get_session_factory()
|
||||
if sf is not None:
|
||||
from deerflow.persistence.feedback import FeedbackRepository
|
||||
from deerflow.persistence.run import RunRepository
|
||||
|
||||
app.state.run_store = RunRepository(sf)
|
||||
app.state.feedback_repo = FeedbackRepository(sf)
|
||||
else:
|
||||
from deerflow.runtime.runs.store.memory import MemoryRunStore
|
||||
|
||||
app.state.run_store = MemoryRunStore()
|
||||
app.state.feedback_repo = None
|
||||
|
||||
from deerflow.persistence.thread_meta import make_thread_store
|
||||
|
||||
app.state.thread_store = make_thread_store(sf, app.state.store)
|
||||
|
||||
# Run event store (has its own factory with config-driven backend selection)
|
||||
run_events_config = getattr(config, "run_events", None)
|
||||
app.state.run_event_store = make_run_event_store(run_events_config)
|
||||
|
||||
# RunManager with store backing for persistence
|
||||
app.state.run_manager = RunManager(store=app.state.run_store)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
await close_engine()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -41,30 +100,148 @@ async def langgraph_runtime(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_stream_bridge(request: Request) -> StreamBridge:
|
||||
"""Return the global :class:`StreamBridge`, or 503."""
|
||||
bridge = getattr(request.app.state, "stream_bridge", None)
|
||||
if bridge is None:
|
||||
raise HTTPException(status_code=503, detail="Stream bridge not available")
|
||||
return bridge
|
||||
def _require(attr: str, label: str):
|
||||
"""Create a FastAPI dependency that returns ``app.state.<attr>`` or 503."""
|
||||
|
||||
def dep(request: Request):
|
||||
val = getattr(request.app.state, attr, None)
|
||||
if val is None:
|
||||
raise HTTPException(status_code=503, detail=f"{label} not available")
|
||||
return val
|
||||
|
||||
dep.__name__ = dep.__qualname__ = f"get_{attr}"
|
||||
return dep
|
||||
|
||||
|
||||
def get_run_manager(request: Request) -> RunManager:
|
||||
"""Return the global :class:`RunManager`, or 503."""
|
||||
mgr = getattr(request.app.state, "run_manager", None)
|
||||
if mgr is None:
|
||||
raise HTTPException(status_code=503, detail="Run manager not available")
|
||||
return mgr
|
||||
|
||||
|
||||
def get_checkpointer(request: Request):
|
||||
"""Return the global checkpointer, or 503."""
|
||||
cp = getattr(request.app.state, "checkpointer", None)
|
||||
if cp is None:
|
||||
raise HTTPException(status_code=503, detail="Checkpointer not available")
|
||||
return cp
|
||||
get_stream_bridge = _require("stream_bridge", "Stream bridge")
|
||||
get_run_manager = _require("run_manager", "Run manager")
|
||||
get_checkpointer = _require("checkpointer", "Checkpointer")
|
||||
get_run_event_store = _require("run_event_store", "Run event store")
|
||||
get_feedback_repo = _require("feedback_repo", "Feedback")
|
||||
get_run_store = _require("run_store", "Run store")
|
||||
|
||||
|
||||
def get_store(request: Request):
|
||||
"""Return the global store (may be ``None`` if not configured)."""
|
||||
return getattr(request.app.state, "store", None)
|
||||
|
||||
|
||||
def get_thread_store(request: Request) -> ThreadMetaStore:
|
||||
"""Return the thread metadata store (SQL or memory-backed)."""
|
||||
val = getattr(request.app.state, "thread_store", None)
|
||||
if val is None:
|
||||
raise HTTPException(status_code=503, detail="Thread metadata store not available")
|
||||
return val
|
||||
|
||||
|
||||
def get_run_context(request: Request) -> RunContext:
|
||||
"""Build a :class:`RunContext` from ``app.state`` singletons.
|
||||
|
||||
Returns a *base* context with infrastructure dependencies. Callers that
|
||||
need per-run fields (e.g. ``follow_up_to_run_id``) should use
|
||||
``dataclasses.replace(ctx, follow_up_to_run_id=...)`` before passing it
|
||||
to :func:`run_agent`.
|
||||
"""
|
||||
config = get_config(request)
|
||||
return RunContext(
|
||||
checkpointer=get_checkpointer(request),
|
||||
store=get_store(request),
|
||||
event_store=get_run_event_store(request),
|
||||
run_events_config=getattr(config, "run_events", None),
|
||||
thread_store=get_thread_store(request),
|
||||
app_config=config,
|
||||
)
|
||||
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Auth helpers (used by authz.py and auth middleware)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Cached singletons to avoid repeated instantiation per request
|
||||
_cached_local_provider: LocalAuthProvider | None = None
|
||||
_cached_repo: SQLiteUserRepository | None = None
|
||||
|
||||
|
||||
def get_local_provider() -> LocalAuthProvider:
|
||||
"""Get or create the cached LocalAuthProvider singleton.
|
||||
|
||||
Must be called after ``init_engine_from_config()`` — the shared
|
||||
session factory is required to construct the user repository.
|
||||
"""
|
||||
global _cached_local_provider, _cached_repo
|
||||
if _cached_repo is None:
|
||||
from app.gateway.auth.repositories.sqlite import SQLiteUserRepository
|
||||
from deerflow.persistence.engine import get_session_factory
|
||||
|
||||
sf = get_session_factory()
|
||||
if sf is None:
|
||||
raise RuntimeError("get_local_provider() called before init_engine_from_config(); cannot access users table")
|
||||
_cached_repo = SQLiteUserRepository(sf)
|
||||
if _cached_local_provider is None:
|
||||
from app.gateway.auth.local_provider import LocalAuthProvider
|
||||
|
||||
_cached_local_provider = LocalAuthProvider(repository=_cached_repo)
|
||||
return _cached_local_provider
|
||||
|
||||
|
||||
async def get_current_user_from_request(request: Request):
|
||||
"""Get the current authenticated user from the request cookie.
|
||||
|
||||
Raises HTTPException 401 if not authenticated.
|
||||
"""
|
||||
from app.gateway.auth import decode_token
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse, TokenError, token_error_to_code
|
||||
|
||||
access_token = request.cookies.get("access_token")
|
||||
if not access_token:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.NOT_AUTHENTICATED, message="Not authenticated").model_dump(),
|
||||
)
|
||||
|
||||
payload = decode_token(access_token)
|
||||
if isinstance(payload, TokenError):
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=token_error_to_code(payload), message=f"Token error: {payload.value}").model_dump(),
|
||||
)
|
||||
|
||||
provider = get_local_provider()
|
||||
user = await provider.get_user(payload.sub)
|
||||
if user is None:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.USER_NOT_FOUND, message="User not found").model_dump(),
|
||||
)
|
||||
|
||||
# Token version mismatch → password was changed, token is stale
|
||||
if user.token_version != payload.ver:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.TOKEN_INVALID, message="Token revoked (password changed)").model_dump(),
|
||||
)
|
||||
|
||||
return user
|
||||
|
||||
|
||||
async def get_optional_user_from_request(request: Request):
|
||||
"""Get optional authenticated user from request.
|
||||
|
||||
Returns None if not authenticated.
|
||||
"""
|
||||
try:
|
||||
return await get_current_user_from_request(request)
|
||||
except HTTPException:
|
||||
return None
|
||||
|
||||
|
||||
async def get_current_user(request: Request) -> str | None:
|
||||
"""Extract user_id from request cookie, or None if not authenticated.
|
||||
|
||||
Thin adapter that returns the string id for callers that only need
|
||||
identification (e.g., ``feedback.py``). Full-user callers should use
|
||||
``get_current_user_from_request`` or ``get_optional_user_from_request``.
|
||||
"""
|
||||
user = await get_optional_user_from_request(request)
|
||||
return str(user.id) if user else None
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
"""LangGraph Server auth handler — shares JWT logic with Gateway.
|
||||
|
||||
Loaded by LangGraph Server via langgraph.json ``auth.path``.
|
||||
Reuses the same ``decode_token`` / ``get_auth_config`` as Gateway,
|
||||
so both modes validate tokens with the same secret and rules.
|
||||
|
||||
Two layers:
|
||||
1. @auth.authenticate — validates JWT cookie, extracts user_id,
|
||||
and enforces CSRF on state-changing methods (POST/PUT/DELETE/PATCH)
|
||||
2. @auth.on — returns metadata filter so each user only sees own threads
|
||||
"""
|
||||
|
||||
import secrets
|
||||
|
||||
from langgraph_sdk import Auth
|
||||
|
||||
from app.gateway.auth.errors import TokenError
|
||||
from app.gateway.auth.jwt import decode_token
|
||||
from app.gateway.deps import get_local_provider
|
||||
|
||||
auth = Auth()
|
||||
|
||||
# Methods that require CSRF validation (state-changing per RFC 7231).
|
||||
_CSRF_METHODS = frozenset({"POST", "PUT", "DELETE", "PATCH"})
|
||||
|
||||
|
||||
def _check_csrf(request) -> None:
|
||||
"""Enforce Double Submit Cookie CSRF check for state-changing requests.
|
||||
|
||||
Mirrors Gateway's CSRFMiddleware logic so that LangGraph routes
|
||||
proxied directly by nginx have the same CSRF protection.
|
||||
"""
|
||||
method = getattr(request, "method", "") or ""
|
||||
if method.upper() not in _CSRF_METHODS:
|
||||
return
|
||||
|
||||
cookie_token = request.cookies.get("csrf_token")
|
||||
header_token = request.headers.get("x-csrf-token")
|
||||
|
||||
if not cookie_token or not header_token:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=403,
|
||||
detail="CSRF token missing. Include X-CSRF-Token header.",
|
||||
)
|
||||
|
||||
if not secrets.compare_digest(cookie_token, header_token):
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=403,
|
||||
detail="CSRF token mismatch.",
|
||||
)
|
||||
|
||||
|
||||
@auth.authenticate
|
||||
async def authenticate(request):
|
||||
"""Validate the session cookie, decode JWT, and check token_version.
|
||||
|
||||
Same validation chain as Gateway's get_current_user_from_request:
|
||||
cookie → decode JWT → DB lookup → token_version match
|
||||
Also enforces CSRF on state-changing methods.
|
||||
"""
|
||||
# CSRF check before authentication so forged cross-site requests
|
||||
# are rejected early, even if the cookie carries a valid JWT.
|
||||
_check_csrf(request)
|
||||
|
||||
token = request.cookies.get("access_token")
|
||||
if not token:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail="Not authenticated",
|
||||
)
|
||||
|
||||
payload = decode_token(token)
|
||||
if isinstance(payload, TokenError):
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail=f"Token error: {payload.value}",
|
||||
)
|
||||
|
||||
user = await get_local_provider().get_user(payload.sub)
|
||||
if user is None:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail="User not found",
|
||||
)
|
||||
if user.token_version != payload.ver:
|
||||
raise Auth.exceptions.HTTPException(
|
||||
status_code=401,
|
||||
detail="Token revoked (password changed)",
|
||||
)
|
||||
|
||||
return payload.sub
|
||||
|
||||
|
||||
@auth.on
|
||||
async def add_owner_filter(ctx: Auth.types.AuthContext, value: dict):
|
||||
"""Inject user_id metadata on writes; filter by user_id on reads.
|
||||
|
||||
Gateway stores thread ownership as ``metadata.user_id``.
|
||||
This handler ensures LangGraph Server enforces the same isolation.
|
||||
"""
|
||||
# On create/update: stamp user_id into metadata
|
||||
metadata = value.setdefault("metadata", {})
|
||||
metadata["user_id"] = ctx.user.identity
|
||||
|
||||
# Return filter dict — LangGraph applies it to search/read/delete
|
||||
return {"user_id": ctx.user.identity}
|
||||
@@ -5,6 +5,7 @@ from pathlib import Path
|
||||
from fastapi import HTTPException
|
||||
|
||||
from deerflow.config.paths import get_paths
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
|
||||
def resolve_thread_virtual_path(thread_id: str, virtual_path: str) -> Path:
|
||||
@@ -22,7 +23,7 @@ def resolve_thread_virtual_path(thread_id: str, virtual_path: str) -> Path:
|
||||
HTTPException: If the path is invalid or outside allowed directories.
|
||||
"""
|
||||
try:
|
||||
return get_paths().resolve_virtual_path(thread_id, virtual_path)
|
||||
return get_paths().resolve_virtual_path(thread_id, virtual_path, user_id=get_effective_user_id())
|
||||
except ValueError as e:
|
||||
status = 403 if "traversal" in str(e) else 400
|
||||
raise HTTPException(status_code=status, detail=str(e))
|
||||
|
||||
@@ -5,10 +5,12 @@ import re
|
||||
import shutil
|
||||
|
||||
import yaml
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.agents_config import AgentConfig, list_custom_agents, load_agent_config, load_agent_soul
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -24,7 +26,8 @@ class AgentResponse(BaseModel):
|
||||
description: str = Field(default="", description="Agent description")
|
||||
model: str | None = Field(default=None, description="Optional model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Optional tool group whitelist")
|
||||
soul: str | None = Field(default=None, description="SOUL.md content (included on GET /{name})")
|
||||
skills: list[str] | None = Field(default=None, description="Optional skill whitelist (None=all, []=none)")
|
||||
soul: str | None = Field(default=None, description="SOUL.md content")
|
||||
|
||||
|
||||
class AgentsListResponse(BaseModel):
|
||||
@@ -40,6 +43,7 @@ class AgentCreateRequest(BaseModel):
|
||||
description: str = Field(default="", description="Agent description")
|
||||
model: str | None = Field(default=None, description="Optional model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Optional tool group whitelist")
|
||||
skills: list[str] | None = Field(default=None, description="Optional skill whitelist (None=all enabled, []=none)")
|
||||
soul: str = Field(default="", description="SOUL.md content — agent personality and behavioral guardrails")
|
||||
|
||||
|
||||
@@ -49,6 +53,7 @@ class AgentUpdateRequest(BaseModel):
|
||||
description: str | None = Field(default=None, description="Updated description")
|
||||
model: str | None = Field(default=None, description="Updated model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Updated tool group whitelist")
|
||||
skills: list[str] | None = Field(default=None, description="Updated skill whitelist (None=all, []=none)")
|
||||
soul: str | None = Field(default=None, description="Updated SOUL.md content")
|
||||
|
||||
|
||||
@@ -73,6 +78,15 @@ def _normalize_agent_name(name: str) -> str:
|
||||
return name.lower()
|
||||
|
||||
|
||||
def _require_agents_api_enabled(app_config: AppConfig) -> None:
|
||||
"""Reject access unless the custom-agent management API is explicitly enabled."""
|
||||
if not app_config.agents_api.enabled:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail=("Custom-agent management API is disabled. Set agents_api.enabled=true to expose agent and user-profile routes over HTTP."),
|
||||
)
|
||||
|
||||
|
||||
def _agent_config_to_response(agent_cfg: AgentConfig, include_soul: bool = False) -> AgentResponse:
|
||||
"""Convert AgentConfig to AgentResponse."""
|
||||
soul: str | None = None
|
||||
@@ -84,6 +98,7 @@ def _agent_config_to_response(agent_cfg: AgentConfig, include_soul: bool = False
|
||||
description=agent_cfg.description,
|
||||
model=agent_cfg.model,
|
||||
tool_groups=agent_cfg.tool_groups,
|
||||
skills=agent_cfg.skills,
|
||||
soul=soul,
|
||||
)
|
||||
|
||||
@@ -92,17 +107,19 @@ def _agent_config_to_response(agent_cfg: AgentConfig, include_soul: bool = False
|
||||
"/agents",
|
||||
response_model=AgentsListResponse,
|
||||
summary="List Custom Agents",
|
||||
description="List all custom agents available in the agents directory.",
|
||||
description="List all custom agents available in the agents directory, including their soul content.",
|
||||
)
|
||||
async def list_agents() -> AgentsListResponse:
|
||||
async def list_agents(app_config: AppConfig = Depends(get_config)) -> AgentsListResponse:
|
||||
"""List all custom agents.
|
||||
|
||||
Returns:
|
||||
List of all custom agents with their metadata (without soul content).
|
||||
List of all custom agents with their metadata and soul content.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
|
||||
try:
|
||||
agents = list_custom_agents()
|
||||
return AgentsListResponse(agents=[_agent_config_to_response(a) for a in agents])
|
||||
return AgentsListResponse(agents=[_agent_config_to_response(a, include_soul=True) for a in agents])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list agents: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to list agents: {str(e)}")
|
||||
@@ -125,6 +142,7 @@ async def check_agent_name(name: str) -> dict:
|
||||
Raises:
|
||||
HTTPException: 422 if the name is invalid.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
_validate_agent_name(name)
|
||||
normalized = _normalize_agent_name(name)
|
||||
available = not get_paths().agent_dir(normalized).exists()
|
||||
@@ -137,7 +155,7 @@ async def check_agent_name(name: str) -> dict:
|
||||
summary="Get Custom Agent",
|
||||
description="Retrieve details and SOUL.md content for a specific custom agent.",
|
||||
)
|
||||
async def get_agent(name: str) -> AgentResponse:
|
||||
async def get_agent(name: str, app_config: AppConfig = Depends(get_config)) -> AgentResponse:
|
||||
"""Get a specific custom agent by name.
|
||||
|
||||
Args:
|
||||
@@ -149,6 +167,7 @@ async def get_agent(name: str) -> AgentResponse:
|
||||
Raises:
|
||||
HTTPException: 404 if agent not found.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
_validate_agent_name(name)
|
||||
name = _normalize_agent_name(name)
|
||||
|
||||
@@ -169,7 +188,7 @@ async def get_agent(name: str) -> AgentResponse:
|
||||
summary="Create Custom Agent",
|
||||
description="Create a new custom agent with its config and SOUL.md.",
|
||||
)
|
||||
async def create_agent_endpoint(request: AgentCreateRequest) -> AgentResponse:
|
||||
async def create_agent_endpoint(request: AgentCreateRequest, app_config: AppConfig = Depends(get_config)) -> AgentResponse:
|
||||
"""Create a new custom agent.
|
||||
|
||||
Args:
|
||||
@@ -181,6 +200,7 @@ async def create_agent_endpoint(request: AgentCreateRequest) -> AgentResponse:
|
||||
Raises:
|
||||
HTTPException: 409 if agent already exists, 422 if name is invalid.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
_validate_agent_name(request.name)
|
||||
normalized_name = _normalize_agent_name(request.name)
|
||||
|
||||
@@ -200,6 +220,8 @@ async def create_agent_endpoint(request: AgentCreateRequest) -> AgentResponse:
|
||||
config_data["model"] = request.model
|
||||
if request.tool_groups is not None:
|
||||
config_data["tool_groups"] = request.tool_groups
|
||||
if request.skills is not None:
|
||||
config_data["skills"] = request.skills
|
||||
|
||||
config_file = agent_dir / "config.yaml"
|
||||
with open(config_file, "w", encoding="utf-8") as f:
|
||||
@@ -230,7 +252,7 @@ async def create_agent_endpoint(request: AgentCreateRequest) -> AgentResponse:
|
||||
summary="Update Custom Agent",
|
||||
description="Update an existing custom agent's config and/or SOUL.md.",
|
||||
)
|
||||
async def update_agent(name: str, request: AgentUpdateRequest) -> AgentResponse:
|
||||
async def update_agent(name: str, request: AgentUpdateRequest, app_config: AppConfig = Depends(get_config)) -> AgentResponse:
|
||||
"""Update an existing custom agent.
|
||||
|
||||
Args:
|
||||
@@ -243,6 +265,7 @@ async def update_agent(name: str, request: AgentUpdateRequest) -> AgentResponse:
|
||||
Raises:
|
||||
HTTPException: 404 if agent not found.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
_validate_agent_name(name)
|
||||
name = _normalize_agent_name(name)
|
||||
|
||||
@@ -255,21 +278,32 @@ async def update_agent(name: str, request: AgentUpdateRequest) -> AgentResponse:
|
||||
|
||||
try:
|
||||
# Update config if any config fields changed
|
||||
config_changed = any(v is not None for v in [request.description, request.model, request.tool_groups])
|
||||
# Use model_fields_set to distinguish "field omitted" from "explicitly set to null".
|
||||
# This is critical for skills where None means "inherit all" (not "don't change").
|
||||
fields_set = request.model_fields_set
|
||||
config_changed = bool(fields_set & {"description", "model", "tool_groups", "skills"})
|
||||
|
||||
if config_changed:
|
||||
updated: dict = {
|
||||
"name": agent_cfg.name,
|
||||
"description": request.description if request.description is not None else agent_cfg.description,
|
||||
"description": request.description if "description" in fields_set else agent_cfg.description,
|
||||
}
|
||||
new_model = request.model if request.model is not None else agent_cfg.model
|
||||
new_model = request.model if "model" in fields_set else agent_cfg.model
|
||||
if new_model is not None:
|
||||
updated["model"] = new_model
|
||||
|
||||
new_tool_groups = request.tool_groups if request.tool_groups is not None else agent_cfg.tool_groups
|
||||
new_tool_groups = request.tool_groups if "tool_groups" in fields_set else agent_cfg.tool_groups
|
||||
if new_tool_groups is not None:
|
||||
updated["tool_groups"] = new_tool_groups
|
||||
|
||||
# skills: None = inherit all, [] = no skills, ["a","b"] = whitelist
|
||||
if "skills" in fields_set:
|
||||
new_skills = request.skills
|
||||
else:
|
||||
new_skills = agent_cfg.skills
|
||||
if new_skills is not None:
|
||||
updated["skills"] = new_skills
|
||||
|
||||
config_file = agent_dir / "config.yaml"
|
||||
with open(config_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated, f, default_flow_style=False, allow_unicode=True)
|
||||
@@ -309,12 +343,14 @@ class UserProfileUpdateRequest(BaseModel):
|
||||
summary="Get User Profile",
|
||||
description="Read the global USER.md file that is injected into all custom agents.",
|
||||
)
|
||||
async def get_user_profile() -> UserProfileResponse:
|
||||
async def get_user_profile(app_config: AppConfig = Depends(get_config)) -> UserProfileResponse:
|
||||
"""Return the current USER.md content.
|
||||
|
||||
Returns:
|
||||
UserProfileResponse with content=None if USER.md does not exist yet.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
|
||||
try:
|
||||
user_md_path = get_paths().user_md_file
|
||||
if not user_md_path.exists():
|
||||
@@ -332,7 +368,7 @@ async def get_user_profile() -> UserProfileResponse:
|
||||
summary="Update User Profile",
|
||||
description="Write the global USER.md file that is injected into all custom agents.",
|
||||
)
|
||||
async def update_user_profile(request: UserProfileUpdateRequest) -> UserProfileResponse:
|
||||
async def update_user_profile(request: UserProfileUpdateRequest, app_config: AppConfig = Depends(get_config)) -> UserProfileResponse:
|
||||
"""Create or overwrite the global USER.md.
|
||||
|
||||
Args:
|
||||
@@ -341,6 +377,8 @@ async def update_user_profile(request: UserProfileUpdateRequest) -> UserProfileR
|
||||
Returns:
|
||||
UserProfileResponse with the saved content.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
|
||||
try:
|
||||
paths = get_paths()
|
||||
paths.base_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -358,7 +396,7 @@ async def update_user_profile(request: UserProfileUpdateRequest) -> UserProfileR
|
||||
summary="Delete Custom Agent",
|
||||
description="Delete a custom agent and all its files (config, SOUL.md, memory).",
|
||||
)
|
||||
async def delete_agent(name: str) -> None:
|
||||
async def delete_agent(name: str, app_config: AppConfig = Depends(get_config)) -> None:
|
||||
"""Delete a custom agent.
|
||||
|
||||
Args:
|
||||
@@ -367,6 +405,7 @@ async def delete_agent(name: str) -> None:
|
||||
Raises:
|
||||
HTTPException: 404 if agent not found.
|
||||
"""
|
||||
_require_agents_api_enabled(app_config)
|
||||
_validate_agent_name(name)
|
||||
name = _normalize_agent_name(name)
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ from urllib.parse import quote
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from fastapi.responses import FileResponse, PlainTextResponse, Response
|
||||
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.path_utils import resolve_thread_virtual_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -81,6 +82,7 @@ def _extract_file_from_skill_archive(zip_path: Path, internal_path: str) -> byte
|
||||
summary="Get Artifact File",
|
||||
description="Retrieve an artifact file generated by the AI agent. Text and binary files can be viewed inline, while active web content is always downloaded.",
|
||||
)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_artifact(thread_id: str, path: str, request: Request, download: bool = False) -> Response:
|
||||
"""Get an artifact file by its path.
|
||||
|
||||
|
||||
@@ -0,0 +1,459 @@
|
||||
"""Authentication endpoints."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from ipaddress import ip_address, ip_network
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
|
||||
from fastapi.security import OAuth2PasswordRequestForm
|
||||
from pydantic import BaseModel, EmailStr, Field, field_validator
|
||||
|
||||
from app.gateway.auth import (
|
||||
UserResponse,
|
||||
create_access_token,
|
||||
)
|
||||
from app.gateway.auth.config import get_auth_config
|
||||
from app.gateway.auth.errors import AuthErrorCode, AuthErrorResponse
|
||||
from app.gateway.csrf_middleware import is_secure_request
|
||||
from app.gateway.deps import get_current_user_from_request, get_local_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/v1/auth", tags=["auth"])
|
||||
|
||||
|
||||
# ── Request/Response Models ──────────────────────────────────────────────
|
||||
|
||||
|
||||
class LoginResponse(BaseModel):
|
||||
"""Response model for login — token only lives in HttpOnly cookie."""
|
||||
|
||||
expires_in: int # seconds
|
||||
needs_setup: bool = False
|
||||
|
||||
|
||||
# Top common-password blocklist. Drawn from the public SecLists "10k worst
|
||||
# passwords" set, lowercased + length>=8 only (shorter ones already fail
|
||||
# the min_length check). Kept tight on purpose: this is the **lower bound**
|
||||
# defense, not a full HIBP / passlib check, and runs in-process per request.
|
||||
_COMMON_PASSWORDS: frozenset[str] = frozenset(
|
||||
{
|
||||
"password",
|
||||
"password1",
|
||||
"password12",
|
||||
"password123",
|
||||
"password1234",
|
||||
"12345678",
|
||||
"123456789",
|
||||
"1234567890",
|
||||
"qwerty12",
|
||||
"qwertyui",
|
||||
"qwerty123",
|
||||
"abc12345",
|
||||
"abcd1234",
|
||||
"iloveyou",
|
||||
"letmein1",
|
||||
"welcome1",
|
||||
"welcome123",
|
||||
"admin123",
|
||||
"administrator",
|
||||
"passw0rd",
|
||||
"p@ssw0rd",
|
||||
"monkey12",
|
||||
"trustno1",
|
||||
"sunshine",
|
||||
"princess",
|
||||
"football",
|
||||
"baseball",
|
||||
"superman",
|
||||
"batman123",
|
||||
"starwars",
|
||||
"dragon123",
|
||||
"master123",
|
||||
"shadow12",
|
||||
"michael1",
|
||||
"jennifer",
|
||||
"computer",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _password_is_common(password: str) -> bool:
|
||||
"""Case-insensitive blocklist check.
|
||||
|
||||
Lowercases the input so trivial mutations like ``Password`` /
|
||||
``PASSWORD`` are also rejected. Does not normalize digit substitutions
|
||||
(``p@ssw0rd`` is included as a literal entry instead) — keeping the
|
||||
rule cheap and predictable.
|
||||
"""
|
||||
return password.lower() in _COMMON_PASSWORDS
|
||||
|
||||
|
||||
def _validate_strong_password(value: str) -> str:
|
||||
"""Pydantic field-validator body shared by Register + ChangePassword.
|
||||
|
||||
Constraint = function, not type-level mixin. The two request models
|
||||
have no "is-a" relationship; they only share the password-strength
|
||||
rule. Lifting it into a free function lets each model bind it via
|
||||
``@field_validator(field_name)`` without inheritance gymnastics.
|
||||
"""
|
||||
if _password_is_common(value):
|
||||
raise ValueError("Password is too common; choose a stronger password.")
|
||||
return value
|
||||
|
||||
|
||||
class RegisterRequest(BaseModel):
|
||||
"""Request model for user registration."""
|
||||
|
||||
email: EmailStr
|
||||
password: str = Field(..., min_length=8)
|
||||
|
||||
_strong_password = field_validator("password")(classmethod(lambda cls, v: _validate_strong_password(v)))
|
||||
|
||||
|
||||
class ChangePasswordRequest(BaseModel):
|
||||
"""Request model for password change (also handles setup flow)."""
|
||||
|
||||
current_password: str
|
||||
new_password: str = Field(..., min_length=8)
|
||||
new_email: EmailStr | None = None
|
||||
|
||||
_strong_password = field_validator("new_password")(classmethod(lambda cls, v: _validate_strong_password(v)))
|
||||
|
||||
|
||||
class MessageResponse(BaseModel):
|
||||
"""Generic message response."""
|
||||
|
||||
message: str
|
||||
|
||||
|
||||
# ── Helpers ───────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _set_session_cookie(response: Response, token: str, request: Request) -> None:
|
||||
"""Set the access_token HttpOnly cookie on the response."""
|
||||
config = get_auth_config()
|
||||
is_https = is_secure_request(request)
|
||||
response.set_cookie(
|
||||
key="access_token",
|
||||
value=token,
|
||||
httponly=True,
|
||||
secure=is_https,
|
||||
samesite="lax",
|
||||
max_age=config.token_expiry_days * 24 * 3600 if is_https else None,
|
||||
)
|
||||
|
||||
|
||||
# ── Rate Limiting ────────────────────────────────────────────────────────
|
||||
# In-process dict — not shared across workers. Sufficient for single-worker deployments.
|
||||
|
||||
_MAX_LOGIN_ATTEMPTS = 5
|
||||
_LOCKOUT_SECONDS = 300 # 5 minutes
|
||||
|
||||
# ip → (fail_count, lock_until_timestamp)
|
||||
_login_attempts: dict[str, tuple[int, float]] = {}
|
||||
|
||||
|
||||
def _trusted_proxies() -> list:
|
||||
"""Parse ``AUTH_TRUSTED_PROXIES`` env var into a list of ip_network objects.
|
||||
|
||||
Comma-separated CIDR or single-IP entries. Empty / unset = no proxy is
|
||||
trusted (direct mode). Invalid entries are skipped with a logger warning.
|
||||
Read live so env-var overrides take effect immediately and tests can
|
||||
``monkeypatch.setenv`` without poking a module-level cache.
|
||||
"""
|
||||
raw = os.getenv("AUTH_TRUSTED_PROXIES", "").strip()
|
||||
if not raw:
|
||||
return []
|
||||
nets = []
|
||||
for entry in raw.split(","):
|
||||
entry = entry.strip()
|
||||
if not entry:
|
||||
continue
|
||||
try:
|
||||
nets.append(ip_network(entry, strict=False))
|
||||
except ValueError:
|
||||
logger.warning("AUTH_TRUSTED_PROXIES: ignoring invalid entry %r", entry)
|
||||
return nets
|
||||
|
||||
|
||||
def _get_client_ip(request: Request) -> str:
|
||||
"""Extract the real client IP for rate limiting.
|
||||
|
||||
Trust model:
|
||||
|
||||
- The TCP peer (``request.client.host``) is always the baseline. It is
|
||||
whatever the kernel reports as the connecting socket — unforgeable
|
||||
by the client itself.
|
||||
- ``X-Real-IP`` is **only** honored if the TCP peer is in the
|
||||
``AUTH_TRUSTED_PROXIES`` allowlist (set via env var, comma-separated
|
||||
CIDR or single IPs). When set, the gateway is assumed to be behind a
|
||||
reverse proxy (nginx, Cloudflare, ALB, …) that overwrites
|
||||
``X-Real-IP`` with the original client address.
|
||||
- With no ``AUTH_TRUSTED_PROXIES`` set, ``X-Real-IP`` is silently
|
||||
ignored — closing the bypass where any client could rotate the
|
||||
header to dodge per-IP rate limits in dev / direct-gateway mode.
|
||||
|
||||
``X-Forwarded-For`` is intentionally NOT used because it is naturally
|
||||
client-controlled at the *first* hop and the trust chain is harder to
|
||||
audit per-request.
|
||||
"""
|
||||
peer_host = request.client.host if request.client else None
|
||||
|
||||
trusted = _trusted_proxies()
|
||||
if trusted and peer_host:
|
||||
try:
|
||||
peer_ip = ip_address(peer_host)
|
||||
if any(peer_ip in net for net in trusted):
|
||||
real_ip = request.headers.get("x-real-ip", "").strip()
|
||||
if real_ip:
|
||||
return real_ip
|
||||
except ValueError:
|
||||
# peer_host wasn't a parseable IP (e.g. "unknown") — fall through
|
||||
pass
|
||||
|
||||
return peer_host or "unknown"
|
||||
|
||||
|
||||
def _check_rate_limit(ip: str) -> None:
|
||||
"""Raise 429 if the IP is currently locked out."""
|
||||
record = _login_attempts.get(ip)
|
||||
if record is None:
|
||||
return
|
||||
fail_count, lock_until = record
|
||||
if fail_count >= _MAX_LOGIN_ATTEMPTS:
|
||||
if time.time() < lock_until:
|
||||
raise HTTPException(
|
||||
status_code=429,
|
||||
detail="Too many login attempts. Try again later.",
|
||||
)
|
||||
del _login_attempts[ip]
|
||||
|
||||
|
||||
_MAX_TRACKED_IPS = 10000
|
||||
|
||||
|
||||
def _record_login_failure(ip: str) -> None:
|
||||
"""Record a failed login attempt for the given IP."""
|
||||
# Evict expired lockouts when dict grows too large
|
||||
if len(_login_attempts) >= _MAX_TRACKED_IPS:
|
||||
now = time.time()
|
||||
expired = [k for k, (c, t) in _login_attempts.items() if c >= _MAX_LOGIN_ATTEMPTS and now >= t]
|
||||
for k in expired:
|
||||
del _login_attempts[k]
|
||||
# If still too large, evict cheapest-to-lose half: below-threshold
|
||||
# IPs (lock_until=0.0) sort first, then earliest-expiring lockouts.
|
||||
if len(_login_attempts) >= _MAX_TRACKED_IPS:
|
||||
by_time = sorted(_login_attempts.items(), key=lambda kv: kv[1][1])
|
||||
for k, _ in by_time[: len(by_time) // 2]:
|
||||
del _login_attempts[k]
|
||||
|
||||
record = _login_attempts.get(ip)
|
||||
if record is None:
|
||||
_login_attempts[ip] = (1, 0.0)
|
||||
else:
|
||||
new_count = record[0] + 1
|
||||
lock_until = time.time() + _LOCKOUT_SECONDS if new_count >= _MAX_LOGIN_ATTEMPTS else 0.0
|
||||
_login_attempts[ip] = (new_count, lock_until)
|
||||
|
||||
|
||||
def _record_login_success(ip: str) -> None:
|
||||
"""Clear failure counter for the given IP on successful login."""
|
||||
_login_attempts.pop(ip, None)
|
||||
|
||||
|
||||
# ── Endpoints ─────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@router.post("/login/local", response_model=LoginResponse)
|
||||
async def login_local(
|
||||
request: Request,
|
||||
response: Response,
|
||||
form_data: OAuth2PasswordRequestForm = Depends(),
|
||||
):
|
||||
"""Local email/password login."""
|
||||
client_ip = _get_client_ip(request)
|
||||
_check_rate_limit(client_ip)
|
||||
|
||||
user = await get_local_provider().authenticate({"email": form_data.username, "password": form_data.password})
|
||||
|
||||
if user is None:
|
||||
_record_login_failure(client_ip)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.INVALID_CREDENTIALS, message="Incorrect email or password").model_dump(),
|
||||
)
|
||||
|
||||
_record_login_success(client_ip)
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return LoginResponse(
|
||||
expires_in=get_auth_config().token_expiry_days * 24 * 3600,
|
||||
needs_setup=user.needs_setup,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/register", response_model=UserResponse, status_code=status.HTTP_201_CREATED)
|
||||
async def register(request: Request, response: Response, body: RegisterRequest):
|
||||
"""Register a new user account (always 'user' role).
|
||||
|
||||
Admin is auto-created on first boot. This endpoint creates regular users.
|
||||
Auto-login by setting the session cookie.
|
||||
"""
|
||||
try:
|
||||
user = await get_local_provider().create_user(email=body.email, password=body.password, system_role="user")
|
||||
except ValueError:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.EMAIL_ALREADY_EXISTS, message="Email already registered").model_dump(),
|
||||
)
|
||||
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return UserResponse(id=str(user.id), email=user.email, system_role=user.system_role)
|
||||
|
||||
|
||||
@router.post("/logout", response_model=MessageResponse)
|
||||
async def logout(request: Request, response: Response):
|
||||
"""Logout current user by clearing the cookie."""
|
||||
response.delete_cookie(key="access_token", secure=is_secure_request(request), samesite="lax")
|
||||
return MessageResponse(message="Successfully logged out")
|
||||
|
||||
|
||||
@router.post("/change-password", response_model=MessageResponse)
|
||||
async def change_password(request: Request, response: Response, body: ChangePasswordRequest):
|
||||
"""Change password for the currently authenticated user.
|
||||
|
||||
Also handles the first-boot setup flow:
|
||||
- If new_email is provided, updates email (checks uniqueness)
|
||||
- If user.needs_setup is True and new_email is given, clears needs_setup
|
||||
- Always increments token_version to invalidate old sessions
|
||||
- Re-issues session cookie with new token_version
|
||||
"""
|
||||
from app.gateway.auth.password import hash_password_async, verify_password_async
|
||||
|
||||
user = await get_current_user_from_request(request)
|
||||
|
||||
if user.password_hash is None:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=AuthErrorResponse(code=AuthErrorCode.INVALID_CREDENTIALS, message="OAuth users cannot change password").model_dump())
|
||||
|
||||
if not await verify_password_async(body.current_password, user.password_hash):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=AuthErrorResponse(code=AuthErrorCode.INVALID_CREDENTIALS, message="Current password is incorrect").model_dump())
|
||||
|
||||
provider = get_local_provider()
|
||||
|
||||
# Update email if provided
|
||||
if body.new_email is not None:
|
||||
existing = await provider.get_user_by_email(body.new_email)
|
||||
if existing and str(existing.id) != str(user.id):
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=AuthErrorResponse(code=AuthErrorCode.EMAIL_ALREADY_EXISTS, message="Email already in use").model_dump())
|
||||
user.email = body.new_email
|
||||
|
||||
# Update password + bump version
|
||||
user.password_hash = await hash_password_async(body.new_password)
|
||||
user.token_version += 1
|
||||
|
||||
# Clear setup flag if this is the setup flow
|
||||
if user.needs_setup and body.new_email is not None:
|
||||
user.needs_setup = False
|
||||
|
||||
await provider.update_user(user)
|
||||
|
||||
# Re-issue cookie with new token_version
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return MessageResponse(message="Password changed successfully")
|
||||
|
||||
|
||||
@router.get("/me", response_model=UserResponse)
|
||||
async def get_me(request: Request):
|
||||
"""Get current authenticated user info."""
|
||||
user = await get_current_user_from_request(request)
|
||||
return UserResponse(id=str(user.id), email=user.email, system_role=user.system_role, needs_setup=user.needs_setup)
|
||||
|
||||
|
||||
@router.get("/setup-status")
|
||||
async def setup_status():
|
||||
"""Check if an admin account exists. Returns needs_setup=True when no admin exists."""
|
||||
admin_count = await get_local_provider().count_admin_users()
|
||||
return {"needs_setup": admin_count == 0}
|
||||
|
||||
|
||||
class InitializeAdminRequest(BaseModel):
|
||||
"""Request model for first-boot admin account creation."""
|
||||
|
||||
email: EmailStr
|
||||
password: str = Field(..., min_length=8)
|
||||
|
||||
_strong_password = field_validator("password")(classmethod(lambda cls, v: _validate_strong_password(v)))
|
||||
|
||||
|
||||
@router.post("/initialize", response_model=UserResponse, status_code=status.HTTP_201_CREATED)
|
||||
async def initialize_admin(request: Request, response: Response, body: InitializeAdminRequest):
|
||||
"""Create the first admin account on initial system setup.
|
||||
|
||||
Only callable when no admin exists. Returns 409 Conflict if an admin
|
||||
already exists.
|
||||
|
||||
On success, the admin account is created with ``needs_setup=False`` and
|
||||
the session cookie is set.
|
||||
"""
|
||||
admin_count = await get_local_provider().count_admin_users()
|
||||
if admin_count > 0:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.SYSTEM_ALREADY_INITIALIZED, message="System already initialized").model_dump(),
|
||||
)
|
||||
|
||||
try:
|
||||
user = await get_local_provider().create_user(email=body.email, password=body.password, system_role="admin", needs_setup=False)
|
||||
except ValueError:
|
||||
# DB unique-constraint race: another concurrent request beat us.
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=AuthErrorResponse(code=AuthErrorCode.SYSTEM_ALREADY_INITIALIZED, message="System already initialized").model_dump(),
|
||||
)
|
||||
|
||||
token = create_access_token(str(user.id), token_version=user.token_version)
|
||||
_set_session_cookie(response, token, request)
|
||||
|
||||
return UserResponse(id=str(user.id), email=user.email, system_role=user.system_role)
|
||||
|
||||
|
||||
# ── OAuth Endpoints (Future/Placeholder) ─────────────────────────────────
|
||||
|
||||
|
||||
@router.get("/oauth/{provider}")
|
||||
async def oauth_login(provider: str):
|
||||
"""Initiate OAuth login flow.
|
||||
|
||||
Redirects to the OAuth provider's authorization URL.
|
||||
Currently a placeholder - requires OAuth provider implementation.
|
||||
"""
|
||||
if provider not in ["github", "google"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Unsupported OAuth provider: {provider}",
|
||||
)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="OAuth login not yet implemented",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/callback/{provider}")
|
||||
async def oauth_callback(provider: str, code: str, state: str):
|
||||
"""OAuth callback endpoint.
|
||||
|
||||
Handles the OAuth provider's callback after user authorization.
|
||||
Currently a placeholder.
|
||||
"""
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_501_NOT_IMPLEMENTED,
|
||||
detail="OAuth callback not yet implemented",
|
||||
)
|
||||
@@ -0,0 +1,188 @@
|
||||
"""Feedback endpoints — create, list, stats, delete.
|
||||
|
||||
Allows users to submit thumbs-up/down feedback on runs,
|
||||
optionally scoped to a specific message.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_current_user, get_feedback_repo, get_run_store
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/threads", tags=["feedback"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Request / response models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class FeedbackCreateRequest(BaseModel):
|
||||
rating: int = Field(..., description="Feedback rating: +1 (positive) or -1 (negative)")
|
||||
comment: str | None = Field(default=None, description="Optional text feedback")
|
||||
message_id: str | None = Field(default=None, description="Optional: scope feedback to a specific message")
|
||||
|
||||
|
||||
class FeedbackUpsertRequest(BaseModel):
|
||||
rating: int = Field(..., description="Feedback rating: +1 (positive) or -1 (negative)")
|
||||
comment: str | None = Field(default=None, description="Optional text feedback")
|
||||
|
||||
|
||||
class FeedbackResponse(BaseModel):
|
||||
feedback_id: str
|
||||
run_id: str
|
||||
thread_id: str
|
||||
user_id: str | None = None
|
||||
message_id: str | None = None
|
||||
rating: int
|
||||
comment: str | None = None
|
||||
created_at: str = ""
|
||||
|
||||
|
||||
class FeedbackStatsResponse(BaseModel):
|
||||
run_id: str
|
||||
total: int = 0
|
||||
positive: int = 0
|
||||
negative: int = 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.put("/{thread_id}/runs/{run_id}/feedback", response_model=FeedbackResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def upsert_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
body: FeedbackUpsertRequest,
|
||||
request: Request,
|
||||
) -> dict[str, Any]:
|
||||
"""Create or update feedback for a run (idempotent)."""
|
||||
if body.rating not in (1, -1):
|
||||
raise HTTPException(status_code=400, detail="rating must be +1 or -1")
|
||||
|
||||
user_id = await get_current_user(request)
|
||||
|
||||
run_store = get_run_store(request)
|
||||
run = await run_store.get(run_id)
|
||||
if run is None:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found")
|
||||
if run.get("thread_id") != thread_id:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found in thread {thread_id}")
|
||||
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.upsert(
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
rating=body.rating,
|
||||
user_id=user_id,
|
||||
comment=body.comment,
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/{thread_id}/runs/{run_id}/feedback")
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_run_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
) -> dict[str, bool]:
|
||||
"""Delete the current user's feedback for a run."""
|
||||
user_id = await get_current_user(request)
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
deleted = await feedback_repo.delete_by_run(
|
||||
thread_id=thread_id,
|
||||
run_id=run_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=404, detail="No feedback found for this run")
|
||||
return {"success": True}
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/{run_id}/feedback", response_model=FeedbackResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def create_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
body: FeedbackCreateRequest,
|
||||
request: Request,
|
||||
) -> dict[str, Any]:
|
||||
"""Submit feedback (thumbs-up/down) for a run."""
|
||||
if body.rating not in (1, -1):
|
||||
raise HTTPException(status_code=400, detail="rating must be +1 or -1")
|
||||
|
||||
user_id = await get_current_user(request)
|
||||
|
||||
# Validate run exists and belongs to thread
|
||||
run_store = get_run_store(request)
|
||||
run = await run_store.get(run_id)
|
||||
if run is None:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found")
|
||||
if run.get("thread_id") != thread_id:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found in thread {thread_id}")
|
||||
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.create(
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
rating=body.rating,
|
||||
user_id=user_id,
|
||||
message_id=body.message_id,
|
||||
comment=body.comment,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/feedback", response_model=list[FeedbackResponse])
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def list_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""List all feedback for a run."""
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.list_by_run(thread_id, run_id)
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/feedback/stats", response_model=FeedbackStatsResponse)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def feedback_stats(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
) -> dict[str, Any]:
|
||||
"""Get aggregated feedback stats (positive/negative counts) for a run."""
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.aggregate_by_run(thread_id, run_id)
|
||||
|
||||
|
||||
@router.delete("/{thread_id}/runs/{run_id}/feedback/{feedback_id}")
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_feedback(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
feedback_id: str,
|
||||
request: Request,
|
||||
) -> dict[str, bool]:
|
||||
"""Delete a feedback record."""
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
# Verify feedback belongs to the specified thread/run before deleting
|
||||
existing = await feedback_repo.get(feedback_id)
|
||||
if existing is None:
|
||||
raise HTTPException(status_code=404, detail=f"Feedback {feedback_id} not found")
|
||||
if existing.get("thread_id") != thread_id or existing.get("run_id") != run_id:
|
||||
raise HTTPException(status_code=404, detail=f"Feedback {feedback_id} not found in run {run_id}")
|
||||
deleted = await feedback_repo.delete(feedback_id)
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=404, detail=f"Feedback {feedback_id} not found")
|
||||
return {"success": True}
|
||||
@@ -3,10 +3,12 @@ import logging
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.config.extensions_config import ExtensionsConfig, get_extensions_config, reload_extensions_config
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.extensions_config import ExtensionsConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api", tags=["mcp"])
|
||||
@@ -69,7 +71,7 @@ class McpConfigUpdateRequest(BaseModel):
|
||||
summary="Get MCP Configuration",
|
||||
description="Retrieve the current Model Context Protocol (MCP) server configurations.",
|
||||
)
|
||||
async def get_mcp_configuration() -> McpConfigResponse:
|
||||
async def get_mcp_configuration(config: AppConfig = Depends(get_config)) -> McpConfigResponse:
|
||||
"""Get the current MCP configuration.
|
||||
|
||||
Returns:
|
||||
@@ -90,9 +92,9 @@ async def get_mcp_configuration() -> McpConfigResponse:
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_extensions_config()
|
||||
ext = config.extensions
|
||||
|
||||
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in config.mcp_servers.items()})
|
||||
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in ext.mcp_servers.items()})
|
||||
|
||||
|
||||
@router.put(
|
||||
@@ -101,7 +103,11 @@ async def get_mcp_configuration() -> McpConfigResponse:
|
||||
summary="Update MCP Configuration",
|
||||
description="Update Model Context Protocol (MCP) server configurations and save to file.",
|
||||
)
|
||||
async def update_mcp_configuration(request: McpConfigUpdateRequest) -> McpConfigResponse:
|
||||
async def update_mcp_configuration(
|
||||
request: McpConfigUpdateRequest,
|
||||
http_request: Request,
|
||||
config: AppConfig = Depends(get_config),
|
||||
) -> McpConfigResponse:
|
||||
"""Update the MCP configuration.
|
||||
|
||||
This will:
|
||||
@@ -142,13 +148,13 @@ async def update_mcp_configuration(request: McpConfigUpdateRequest) -> McpConfig
|
||||
config_path = Path.cwd().parent / "extensions_config.json"
|
||||
logger.info(f"No existing extensions config found. Creating new config at: {config_path}")
|
||||
|
||||
# Load current config to preserve skills configuration
|
||||
current_config = get_extensions_config()
|
||||
# Use injected config to preserve skills configuration
|
||||
current_ext = config.extensions
|
||||
|
||||
# Convert request to dict format for JSON serialization
|
||||
config_data = {
|
||||
"mcpServers": {name: server.model_dump() for name, server in request.mcp_servers.items()},
|
||||
"skills": {name: {"enabled": skill.enabled} for name, skill in current_config.skills.items()},
|
||||
"skills": {name: {"enabled": skill.enabled} for name, skill in current_ext.skills.items()},
|
||||
}
|
||||
|
||||
# Write the configuration to file
|
||||
@@ -160,9 +166,11 @@ async def update_mcp_configuration(request: McpConfigUpdateRequest) -> McpConfig
|
||||
# NOTE: No need to reload/reset cache here - LangGraph Server (separate process)
|
||||
# will detect config file changes via mtime and reinitialize MCP tools automatically
|
||||
|
||||
# Reload the configuration and update the global cache
|
||||
reloaded_config = reload_extensions_config()
|
||||
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in reloaded_config.mcp_servers.items()})
|
||||
# Reload the configuration and swap ``app.state.config`` so subsequent
|
||||
# ``Depends(get_config)`` calls see the refreshed value.
|
||||
reloaded = AppConfig.from_file()
|
||||
http_request.app.state.config = reloaded
|
||||
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in reloaded.extensions.mcp_servers.items()})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update MCP configuration: {e}", exc_info=True)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
"""Memory API router for retrieving and managing global memory data."""
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.agents.memory.updater import (
|
||||
clear_memory_data,
|
||||
create_memory_fact,
|
||||
@@ -12,7 +13,8 @@ from deerflow.agents.memory.updater import (
|
||||
reload_memory_data,
|
||||
update_memory_fact,
|
||||
)
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["memory"])
|
||||
|
||||
@@ -49,6 +51,7 @@ class Fact(BaseModel):
|
||||
confidence: float = Field(default=0.5, description="Confidence score (0-1)")
|
||||
createdAt: str = Field(default="", description="Creation timestamp")
|
||||
source: str = Field(default="unknown", description="Source thread ID")
|
||||
sourceError: str | None = Field(default=None, description="Optional description of the prior mistake or wrong approach")
|
||||
|
||||
|
||||
class MemoryResponse(BaseModel):
|
||||
@@ -108,10 +111,11 @@ class MemoryStatusResponse(BaseModel):
|
||||
@router.get(
|
||||
"/memory",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Get Memory Data",
|
||||
description="Retrieve the current global memory data including user context, history, and facts.",
|
||||
)
|
||||
async def get_memory() -> MemoryResponse:
|
||||
async def get_memory(app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Get the current global memory data.
|
||||
|
||||
Returns:
|
||||
@@ -145,17 +149,18 @@ async def get_memory() -> MemoryResponse:
|
||||
}
|
||||
```
|
||||
"""
|
||||
memory_data = get_memory_data()
|
||||
memory_data = get_memory_data(app_config.memory, user_id=get_effective_user_id())
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/memory/reload",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Reload Memory Data",
|
||||
description="Reload memory data from the storage file, refreshing the in-memory cache.",
|
||||
)
|
||||
async def reload_memory() -> MemoryResponse:
|
||||
async def reload_memory(app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Reload memory data from file.
|
||||
|
||||
This forces a reload of the memory data from the storage file,
|
||||
@@ -164,20 +169,21 @@ async def reload_memory() -> MemoryResponse:
|
||||
Returns:
|
||||
The reloaded memory data.
|
||||
"""
|
||||
memory_data = reload_memory_data()
|
||||
memory_data = reload_memory_data(app_config.memory, user_id=get_effective_user_id())
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/memory",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Clear All Memory Data",
|
||||
description="Delete all saved memory data and reset the memory structure to an empty state.",
|
||||
)
|
||||
async def clear_memory() -> MemoryResponse:
|
||||
async def clear_memory(app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Clear all persisted memory data."""
|
||||
try:
|
||||
memory_data = clear_memory_data()
|
||||
memory_data = clear_memory_data(app_config.memory, user_id=get_effective_user_id())
|
||||
except OSError as exc:
|
||||
raise HTTPException(status_code=500, detail="Failed to clear memory data.") from exc
|
||||
|
||||
@@ -187,16 +193,19 @@ async def clear_memory() -> MemoryResponse:
|
||||
@router.post(
|
||||
"/memory/facts",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Create Memory Fact",
|
||||
description="Create a single saved memory fact manually.",
|
||||
)
|
||||
async def create_memory_fact_endpoint(request: FactCreateRequest) -> MemoryResponse:
|
||||
async def create_memory_fact_endpoint(request: FactCreateRequest, app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Create a single fact manually."""
|
||||
try:
|
||||
memory_data = create_memory_fact(
|
||||
app_config.memory,
|
||||
content=request.content,
|
||||
category=request.category,
|
||||
confidence=request.confidence,
|
||||
user_id=get_effective_user_id(),
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise _map_memory_fact_value_error(exc) from exc
|
||||
@@ -209,13 +218,14 @@ async def create_memory_fact_endpoint(request: FactCreateRequest) -> MemoryRespo
|
||||
@router.delete(
|
||||
"/memory/facts/{fact_id}",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Delete Memory Fact",
|
||||
description="Delete a single saved memory fact by its fact id.",
|
||||
)
|
||||
async def delete_memory_fact_endpoint(fact_id: str) -> MemoryResponse:
|
||||
async def delete_memory_fact_endpoint(fact_id: str, app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Delete a single fact from memory by fact id."""
|
||||
try:
|
||||
memory_data = delete_memory_fact(fact_id)
|
||||
memory_data = delete_memory_fact(app_config.memory, fact_id, user_id=get_effective_user_id())
|
||||
except KeyError as exc:
|
||||
raise HTTPException(status_code=404, detail=f"Memory fact '{fact_id}' not found.") from exc
|
||||
except OSError as exc:
|
||||
@@ -227,17 +237,20 @@ async def delete_memory_fact_endpoint(fact_id: str) -> MemoryResponse:
|
||||
@router.patch(
|
||||
"/memory/facts/{fact_id}",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Patch Memory Fact",
|
||||
description="Partially update a single saved memory fact by its fact id while preserving omitted fields.",
|
||||
)
|
||||
async def update_memory_fact_endpoint(fact_id: str, request: FactPatchRequest) -> MemoryResponse:
|
||||
async def update_memory_fact_endpoint(fact_id: str, request: FactPatchRequest, app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Partially update a single fact manually."""
|
||||
try:
|
||||
memory_data = update_memory_fact(
|
||||
app_config.memory,
|
||||
fact_id=fact_id,
|
||||
content=request.content,
|
||||
category=request.category,
|
||||
confidence=request.confidence,
|
||||
user_id=get_effective_user_id(),
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise _map_memory_fact_value_error(exc) from exc
|
||||
@@ -252,25 +265,27 @@ async def update_memory_fact_endpoint(fact_id: str, request: FactPatchRequest) -
|
||||
@router.get(
|
||||
"/memory/export",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Export Memory Data",
|
||||
description="Export the current global memory data as JSON for backup or transfer.",
|
||||
)
|
||||
async def export_memory() -> MemoryResponse:
|
||||
async def export_memory(app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Export the current memory data."""
|
||||
memory_data = get_memory_data()
|
||||
memory_data = get_memory_data(app_config.memory, user_id=get_effective_user_id())
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/memory/import",
|
||||
response_model=MemoryResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Import Memory Data",
|
||||
description="Import and overwrite the current global memory data from a JSON payload.",
|
||||
)
|
||||
async def import_memory(request: MemoryResponse) -> MemoryResponse:
|
||||
async def import_memory(request: MemoryResponse, app_config: AppConfig = Depends(get_config)) -> MemoryResponse:
|
||||
"""Import and persist memory data."""
|
||||
try:
|
||||
memory_data = import_memory_data(request.model_dump())
|
||||
memory_data = import_memory_data(app_config.memory, request.model_dump(), user_id=get_effective_user_id())
|
||||
except OSError as exc:
|
||||
raise HTTPException(status_code=500, detail="Failed to import memory data.") from exc
|
||||
|
||||
@@ -283,7 +298,9 @@ async def import_memory(request: MemoryResponse) -> MemoryResponse:
|
||||
summary="Get Memory Configuration",
|
||||
description="Retrieve the current memory system configuration.",
|
||||
)
|
||||
async def get_memory_config_endpoint() -> MemoryConfigResponse:
|
||||
async def get_memory_config_endpoint(
|
||||
app_config: AppConfig = Depends(get_config),
|
||||
) -> MemoryConfigResponse:
|
||||
"""Get the memory system configuration.
|
||||
|
||||
Returns:
|
||||
@@ -302,7 +319,7 @@ async def get_memory_config_endpoint() -> MemoryConfigResponse:
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_memory_config()
|
||||
config = app_config.memory
|
||||
return MemoryConfigResponse(
|
||||
enabled=config.enabled,
|
||||
storage_path=config.storage_path,
|
||||
@@ -317,17 +334,20 @@ async def get_memory_config_endpoint() -> MemoryConfigResponse:
|
||||
@router.get(
|
||||
"/memory/status",
|
||||
response_model=MemoryStatusResponse,
|
||||
response_model_exclude_none=True,
|
||||
summary="Get Memory Status",
|
||||
description="Retrieve both memory configuration and current data in a single request.",
|
||||
)
|
||||
async def get_memory_status() -> MemoryStatusResponse:
|
||||
async def get_memory_status(
|
||||
app_config: AppConfig = Depends(get_config),
|
||||
) -> MemoryStatusResponse:
|
||||
"""Get the memory system status including configuration and data.
|
||||
|
||||
Returns:
|
||||
Combined memory configuration and current data.
|
||||
"""
|
||||
config = get_memory_config()
|
||||
memory_data = get_memory_data()
|
||||
config = app_config.memory
|
||||
memory_data = get_memory_data(config, user_id=get_effective_user_id())
|
||||
|
||||
return MemoryStatusResponse(
|
||||
config=MemoryConfigResponse(
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["models"])
|
||||
|
||||
@@ -17,10 +18,17 @@ class ModelResponse(BaseModel):
|
||||
supports_reasoning_effort: bool = Field(default=False, description="Whether model supports reasoning effort")
|
||||
|
||||
|
||||
class TokenUsageResponse(BaseModel):
|
||||
"""Token usage display configuration."""
|
||||
|
||||
enabled: bool = Field(default=False, description="Whether token usage display is enabled")
|
||||
|
||||
|
||||
class ModelsListResponse(BaseModel):
|
||||
"""Response model for listing all models."""
|
||||
|
||||
models: list[ModelResponse]
|
||||
token_usage: TokenUsageResponse
|
||||
|
||||
|
||||
@router.get(
|
||||
@@ -29,14 +37,14 @@ class ModelsListResponse(BaseModel):
|
||||
summary="List All Models",
|
||||
description="Retrieve a list of all available AI models configured in the system.",
|
||||
)
|
||||
async def list_models() -> ModelsListResponse:
|
||||
async def list_models(config: AppConfig = Depends(get_config)) -> ModelsListResponse:
|
||||
"""List all available models from configuration.
|
||||
|
||||
Returns model information suitable for frontend display,
|
||||
excluding sensitive fields like API keys and internal configuration.
|
||||
|
||||
Returns:
|
||||
A list of all configured models with their metadata.
|
||||
A list of all configured models with their metadata and token usage display settings.
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
@@ -44,21 +52,27 @@ async def list_models() -> ModelsListResponse:
|
||||
"models": [
|
||||
{
|
||||
"name": "gpt-4",
|
||||
"model": "gpt-4",
|
||||
"display_name": "GPT-4",
|
||||
"description": "OpenAI GPT-4 model",
|
||||
"supports_thinking": false
|
||||
"supports_thinking": false,
|
||||
"supports_reasoning_effort": false
|
||||
},
|
||||
{
|
||||
"name": "claude-3-opus",
|
||||
"model": "claude-3-opus",
|
||||
"display_name": "Claude 3 Opus",
|
||||
"description": "Anthropic Claude 3 Opus model",
|
||||
"supports_thinking": true
|
||||
"supports_thinking": true,
|
||||
"supports_reasoning_effort": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"token_usage": {
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_app_config()
|
||||
models = [
|
||||
ModelResponse(
|
||||
name=model.name,
|
||||
@@ -70,7 +84,10 @@ async def list_models() -> ModelsListResponse:
|
||||
)
|
||||
for model in config.models
|
||||
]
|
||||
return ModelsListResponse(models=models)
|
||||
return ModelsListResponse(
|
||||
models=models,
|
||||
token_usage=TokenUsageResponse(enabled=config.token_usage.enabled),
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
@@ -79,7 +96,7 @@ async def list_models() -> ModelsListResponse:
|
||||
summary="Get Model Details",
|
||||
description="Retrieve detailed information about a specific AI model by its name.",
|
||||
)
|
||||
async def get_model(model_name: str) -> ModelResponse:
|
||||
async def get_model(model_name: str, config: AppConfig = Depends(get_config)) -> ModelResponse:
|
||||
"""Get a specific model by name.
|
||||
|
||||
Args:
|
||||
@@ -101,7 +118,6 @@ async def get_model(model_name: str) -> ModelResponse:
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_app_config()
|
||||
model = config.get_model_config(model_name)
|
||||
if model is None:
|
||||
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
|
||||
|
||||
@@ -11,10 +11,11 @@ import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi import APIRouter, HTTPException, Query, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_run_manager, get_stream_bridge
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_checkpointer, get_feedback_repo, get_run_event_store, get_run_manager, get_run_store, get_stream_bridge
|
||||
from app.gateway.routers.thread_runs import RunCreateRequest
|
||||
from app.gateway.services import sse_consumer, start_run
|
||||
from deerflow.runtime import serialize_channel_values
|
||||
@@ -51,6 +52,7 @@ async def stateless_stream(body: RunCreateRequest, request: Request) -> Streamin
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
"Content-Location": f"/api/threads/{thread_id}/runs/{record.run_id}",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -84,3 +86,57 @@ async def stateless_wait(body: RunCreateRequest, request: Request) -> dict:
|
||||
logger.exception("Failed to fetch final state for run %s", record.run_id)
|
||||
|
||||
return {"status": record.status.value, "error": record.error}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Run-scoped read endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _resolve_run(run_id: str, request: Request) -> dict:
|
||||
"""Fetch run by run_id with user ownership check. Raises 404 if not found."""
|
||||
run_store = get_run_store(request)
|
||||
record = await run_store.get(run_id) # user_id=AUTO filters by contextvar
|
||||
if record is None:
|
||||
raise HTTPException(status_code=404, detail=f"Run {run_id} not found")
|
||||
return record
|
||||
|
||||
|
||||
@router.get("/{run_id}/messages")
|
||||
@require_permission("runs", "read")
|
||||
async def run_messages(
|
||||
run_id: str,
|
||||
request: Request,
|
||||
limit: int = Query(default=50, le=200, ge=1),
|
||||
before_seq: int | None = Query(default=None),
|
||||
after_seq: int | None = Query(default=None),
|
||||
) -> dict:
|
||||
"""Return paginated messages for a run (cursor-based).
|
||||
|
||||
Pagination:
|
||||
- after_seq: messages with seq > after_seq (forward)
|
||||
- before_seq: messages with seq < before_seq (backward)
|
||||
- neither: latest messages
|
||||
|
||||
Response: { data: [...], has_more: bool }
|
||||
"""
|
||||
run = await _resolve_run(run_id, request)
|
||||
event_store = get_run_event_store(request)
|
||||
rows = await event_store.list_messages_by_run(
|
||||
run["thread_id"], run_id,
|
||||
limit=limit + 1,
|
||||
before_seq=before_seq,
|
||||
after_seq=after_seq,
|
||||
)
|
||||
has_more = len(rows) > limit
|
||||
data = rows[:limit] if has_more else rows
|
||||
return {"data": data, "has_more": has_more}
|
||||
|
||||
|
||||
@router.get("/{run_id}/feedback")
|
||||
@require_permission("runs", "read")
|
||||
async def run_feedback(run_id: str, request: Request) -> list[dict]:
|
||||
"""Return all feedback for a run."""
|
||||
run = await _resolve_run(run_id, request)
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
return await feedback_repo.list_by_run(run["thread_id"], run_id)
|
||||
|
||||
@@ -1,14 +1,32 @@
|
||||
import errno
|
||||
import json
|
||||
import logging
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.deps import get_config
|
||||
from app.gateway.path_utils import resolve_thread_virtual_path
|
||||
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig, get_extensions_config, reload_extensions_config
|
||||
from deerflow.agents.lead_agent.prompt import refresh_skills_system_prompt_cache_async
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.extensions_config import ExtensionsConfig
|
||||
from deerflow.skills import Skill, load_skills
|
||||
from deerflow.skills.installer import SkillAlreadyExistsError, install_skill_from_archive
|
||||
from deerflow.skills.manager import (
|
||||
append_history,
|
||||
atomic_write,
|
||||
custom_skill_exists,
|
||||
ensure_custom_skill_is_editable,
|
||||
get_custom_skill_dir,
|
||||
get_custom_skill_file,
|
||||
get_skill_history_file,
|
||||
read_custom_skill_content,
|
||||
read_history,
|
||||
validate_skill_markdown_content,
|
||||
)
|
||||
from deerflow.skills.security_scanner import scan_skill_content
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -52,6 +70,22 @@ class SkillInstallResponse(BaseModel):
|
||||
message: str = Field(..., description="Installation result message")
|
||||
|
||||
|
||||
class CustomSkillContentResponse(SkillResponse):
|
||||
content: str = Field(..., description="Raw SKILL.md content")
|
||||
|
||||
|
||||
class CustomSkillUpdateRequest(BaseModel):
|
||||
content: str = Field(..., description="Replacement SKILL.md content")
|
||||
|
||||
|
||||
class CustomSkillHistoryResponse(BaseModel):
|
||||
history: list[dict]
|
||||
|
||||
|
||||
class SkillRollbackRequest(BaseModel):
|
||||
history_index: int = Field(default=-1, description="History entry index to restore from, defaulting to the latest change.")
|
||||
|
||||
|
||||
def _skill_to_response(skill: Skill) -> SkillResponse:
|
||||
"""Convert a Skill object to a SkillResponse."""
|
||||
return SkillResponse(
|
||||
@@ -69,24 +103,214 @@ def _skill_to_response(skill: Skill) -> SkillResponse:
|
||||
summary="List All Skills",
|
||||
description="Retrieve a list of all available skills from both public and custom directories.",
|
||||
)
|
||||
async def list_skills() -> SkillsListResponse:
|
||||
async def list_skills(app_config: AppConfig = Depends(get_config)) -> SkillsListResponse:
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skills = load_skills(app_config, enabled_only=False)
|
||||
return SkillsListResponse(skills=[_skill_to_response(skill) for skill in skills])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load skills: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to load skills: {str(e)}")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/skills/install",
|
||||
response_model=SkillInstallResponse,
|
||||
summary="Install Skill",
|
||||
description="Install a skill from a .skill file (ZIP archive) located in the thread's user-data directory.",
|
||||
)
|
||||
async def install_skill(request: SkillInstallRequest, app_config: AppConfig = Depends(get_config)) -> SkillInstallResponse:
|
||||
try:
|
||||
skill_file_path = resolve_thread_virtual_path(request.thread_id, request.path)
|
||||
result = install_skill_from_archive(skill_file_path)
|
||||
await refresh_skills_system_prompt_cache_async(app_config)
|
||||
return SkillInstallResponse(**result)
|
||||
except FileNotFoundError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except SkillAlreadyExistsError as e:
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to install skill: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to install skill: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/skills/custom", response_model=SkillsListResponse, summary="List Custom Skills")
|
||||
async def list_custom_skills(app_config: AppConfig = Depends(get_config)) -> SkillsListResponse:
|
||||
try:
|
||||
skills = [skill for skill in load_skills(app_config, enabled_only=False) if skill.category == "custom"]
|
||||
return SkillsListResponse(skills=[_skill_to_response(skill) for skill in skills])
|
||||
except Exception as e:
|
||||
logger.error("Failed to list custom skills: %s", e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to list custom skills: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/skills/custom/{skill_name}", response_model=CustomSkillContentResponse, summary="Get Custom Skill Content")
|
||||
async def get_custom_skill(skill_name: str, app_config: AppConfig = Depends(get_config)) -> CustomSkillContentResponse:
|
||||
try:
|
||||
skills = load_skills(app_config, enabled_only=False)
|
||||
skill = next((s for s in skills if s.name == skill_name and s.category == "custom"), None)
|
||||
if skill is None:
|
||||
raise HTTPException(status_code=404, detail=f"Custom skill '{skill_name}' not found")
|
||||
return CustomSkillContentResponse(**_skill_to_response(skill).model_dump(), content=read_custom_skill_content(skill_name, app_config))
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Failed to get custom skill %s: %s", skill_name, e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get custom skill: {str(e)}")
|
||||
|
||||
|
||||
@router.put("/skills/custom/{skill_name}", response_model=CustomSkillContentResponse, summary="Edit Custom Skill")
|
||||
async def update_custom_skill(
|
||||
skill_name: str,
|
||||
request: CustomSkillUpdateRequest,
|
||||
app_config: AppConfig = Depends(get_config),
|
||||
) -> CustomSkillContentResponse:
|
||||
try:
|
||||
ensure_custom_skill_is_editable(skill_name, app_config)
|
||||
validate_skill_markdown_content(skill_name, request.content)
|
||||
scan = await scan_skill_content(app_config, request.content, executable=False, location=f"{skill_name}/SKILL.md")
|
||||
if scan.decision == "block":
|
||||
raise HTTPException(status_code=400, detail=f"Security scan blocked the edit: {scan.reason}")
|
||||
skill_file = get_custom_skill_dir(skill_name, app_config) / "SKILL.md"
|
||||
prev_content = skill_file.read_text(encoding="utf-8")
|
||||
atomic_write(skill_file, request.content)
|
||||
append_history(
|
||||
skill_name,
|
||||
{
|
||||
"action": "human_edit",
|
||||
"author": "human",
|
||||
"thread_id": None,
|
||||
"file_path": "SKILL.md",
|
||||
"prev_content": prev_content,
|
||||
"new_content": request.content,
|
||||
"scanner": {"decision": scan.decision, "reason": scan.reason},
|
||||
},
|
||||
app_config,
|
||||
)
|
||||
await refresh_skills_system_prompt_cache_async(app_config)
|
||||
return await get_custom_skill(skill_name, app_config)
|
||||
except HTTPException:
|
||||
raise
|
||||
except FileNotFoundError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Failed to update custom skill %s: %s", skill_name, e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update custom skill: {str(e)}")
|
||||
|
||||
|
||||
@router.delete("/skills/custom/{skill_name}", summary="Delete Custom Skill")
|
||||
async def delete_custom_skill(skill_name: str, app_config: AppConfig = Depends(get_config)) -> dict[str, bool]:
|
||||
try:
|
||||
ensure_custom_skill_is_editable(skill_name, app_config)
|
||||
skill_dir = get_custom_skill_dir(skill_name, app_config)
|
||||
prev_content = read_custom_skill_content(skill_name, app_config)
|
||||
try:
|
||||
append_history(
|
||||
skill_name,
|
||||
{
|
||||
"action": "human_delete",
|
||||
"author": "human",
|
||||
"thread_id": None,
|
||||
"file_path": "SKILL.md",
|
||||
"prev_content": prev_content,
|
||||
"new_content": None,
|
||||
"scanner": {"decision": "allow", "reason": "Deletion requested."},
|
||||
},
|
||||
app_config,
|
||||
)
|
||||
except OSError as e:
|
||||
if not isinstance(e, PermissionError) and e.errno not in {errno.EACCES, errno.EPERM, errno.EROFS}:
|
||||
raise
|
||||
logger.warning("Skipping delete history write for custom skill %s due to readonly/permission failure; continuing with skill directory removal: %s", skill_name, e)
|
||||
shutil.rmtree(skill_dir)
|
||||
await refresh_skills_system_prompt_cache_async(app_config)
|
||||
return {"success": True}
|
||||
except FileNotFoundError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Failed to delete custom skill %s: %s", skill_name, e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to delete custom skill: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/skills/custom/{skill_name}/history", response_model=CustomSkillHistoryResponse, summary="Get Custom Skill History")
|
||||
async def get_custom_skill_history(skill_name: str, app_config: AppConfig = Depends(get_config)) -> CustomSkillHistoryResponse:
|
||||
try:
|
||||
if not custom_skill_exists(skill_name, app_config) and not get_skill_history_file(skill_name, app_config).exists():
|
||||
raise HTTPException(status_code=404, detail=f"Custom skill '{skill_name}' not found")
|
||||
return CustomSkillHistoryResponse(history=read_history(skill_name, app_config))
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Failed to read history for %s: %s", skill_name, e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to read history: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/skills/custom/{skill_name}/rollback", response_model=CustomSkillContentResponse, summary="Rollback Custom Skill")
|
||||
async def rollback_custom_skill(
|
||||
skill_name: str,
|
||||
request: SkillRollbackRequest,
|
||||
app_config: AppConfig = Depends(get_config),
|
||||
) -> CustomSkillContentResponse:
|
||||
try:
|
||||
if not custom_skill_exists(skill_name, app_config) and not get_skill_history_file(skill_name, app_config).exists():
|
||||
raise HTTPException(status_code=404, detail=f"Custom skill '{skill_name}' not found")
|
||||
history = read_history(skill_name, app_config)
|
||||
if not history:
|
||||
raise HTTPException(status_code=400, detail=f"Custom skill '{skill_name}' has no history")
|
||||
record = history[request.history_index]
|
||||
target_content = record.get("prev_content")
|
||||
if target_content is None:
|
||||
raise HTTPException(status_code=400, detail="Selected history entry has no previous content to roll back to")
|
||||
validate_skill_markdown_content(skill_name, target_content)
|
||||
scan = await scan_skill_content(app_config, target_content, executable=False, location=f"{skill_name}/SKILL.md")
|
||||
skill_file = get_custom_skill_file(skill_name, app_config)
|
||||
current_content = skill_file.read_text(encoding="utf-8") if skill_file.exists() else None
|
||||
history_entry = {
|
||||
"action": "rollback",
|
||||
"author": "human",
|
||||
"thread_id": None,
|
||||
"file_path": "SKILL.md",
|
||||
"prev_content": current_content,
|
||||
"new_content": target_content,
|
||||
"rollback_from_ts": record.get("ts"),
|
||||
"scanner": {"decision": scan.decision, "reason": scan.reason},
|
||||
}
|
||||
if scan.decision == "block":
|
||||
append_history(skill_name, history_entry, app_config)
|
||||
raise HTTPException(status_code=400, detail=f"Rollback blocked by security scanner: {scan.reason}")
|
||||
atomic_write(skill_file, target_content)
|
||||
append_history(skill_name, history_entry, app_config)
|
||||
await refresh_skills_system_prompt_cache_async(app_config)
|
||||
return await get_custom_skill(skill_name, app_config)
|
||||
except HTTPException:
|
||||
raise
|
||||
except IndexError:
|
||||
raise HTTPException(status_code=400, detail="history_index is out of range")
|
||||
except FileNotFoundError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
logger.error("Failed to roll back custom skill %s: %s", skill_name, e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to roll back custom skill: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/skills/{skill_name}",
|
||||
response_model=SkillResponse,
|
||||
summary="Get Skill Details",
|
||||
description="Retrieve detailed information about a specific skill by its name.",
|
||||
)
|
||||
async def get_skill(skill_name: str) -> SkillResponse:
|
||||
async def get_skill(skill_name: str, app_config: AppConfig = Depends(get_config)) -> SkillResponse:
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skills = load_skills(app_config, enabled_only=False)
|
||||
skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if skill is None:
|
||||
@@ -106,9 +330,14 @@ async def get_skill(skill_name: str) -> SkillResponse:
|
||||
summary="Update Skill",
|
||||
description="Update a skill's enabled status by modifying the extensions_config.json file.",
|
||||
)
|
||||
async def update_skill(skill_name: str, request: SkillUpdateRequest) -> SkillResponse:
|
||||
async def update_skill(
|
||||
skill_name: str,
|
||||
request: SkillUpdateRequest,
|
||||
http_request: Request,
|
||||
app_config: AppConfig = Depends(get_config),
|
||||
) -> SkillResponse:
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skills = load_skills(app_config, enabled_only=False)
|
||||
skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if skill is None:
|
||||
@@ -119,21 +348,29 @@ async def update_skill(skill_name: str, request: SkillUpdateRequest) -> SkillRes
|
||||
config_path = Path.cwd().parent / "extensions_config.json"
|
||||
logger.info(f"No existing extensions config found. Creating new config at: {config_path}")
|
||||
|
||||
extensions_config = get_extensions_config()
|
||||
extensions_config.skills[skill_name] = SkillStateConfig(enabled=request.enabled)
|
||||
# Do not mutate the frozen AppConfig in place. Compose the new skills
|
||||
# state in a fresh dict, write to disk, and reload AppConfig below so
|
||||
# every subsequent Depends(get_config) sees the refreshed snapshot.
|
||||
ext = app_config.extensions
|
||||
updated_skills = {name: {"enabled": skill_config.enabled} for name, skill_config in ext.skills.items()}
|
||||
updated_skills[skill_name] = {"enabled": request.enabled}
|
||||
|
||||
config_data = {
|
||||
"mcpServers": {name: server.model_dump() for name, server in extensions_config.mcp_servers.items()},
|
||||
"skills": {name: {"enabled": skill_config.enabled} for name, skill_config in extensions_config.skills.items()},
|
||||
"mcpServers": {name: server.model_dump() for name, server in ext.mcp_servers.items()},
|
||||
"skills": updated_skills,
|
||||
}
|
||||
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
json.dump(config_data, f, indent=2)
|
||||
|
||||
logger.info(f"Skills configuration updated and saved to: {config_path}")
|
||||
reload_extensions_config()
|
||||
# Reload AppConfig and swap ``app.state.config`` so subsequent
|
||||
# ``Depends(get_config)`` sees the refreshed value.
|
||||
reloaded = AppConfig.from_file()
|
||||
http_request.app.state.config = reloaded
|
||||
await refresh_skills_system_prompt_cache_async(reloaded)
|
||||
|
||||
skills = load_skills(enabled_only=False)
|
||||
skills = load_skills(reloaded, enabled_only=False)
|
||||
updated_skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if updated_skill is None:
|
||||
@@ -147,27 +384,3 @@ async def update_skill(skill_name: str, request: SkillUpdateRequest) -> SkillRes
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update skill {skill_name}: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update skill: {str(e)}")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/skills/install",
|
||||
response_model=SkillInstallResponse,
|
||||
summary="Install Skill",
|
||||
description="Install a skill from a .skill file (ZIP archive) located in the thread's user-data directory.",
|
||||
)
|
||||
async def install_skill(request: SkillInstallRequest) -> SkillInstallResponse:
|
||||
try:
|
||||
skill_file_path = resolve_thread_virtual_path(request.thread_id, request.path)
|
||||
result = install_skill_from_archive(skill_file_path)
|
||||
return SkillInstallResponse(**result)
|
||||
except FileNotFoundError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except SkillAlreadyExistsError as e:
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to install skill: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to install skill: {str(e)}")
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.models import create_chat_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -97,31 +101,31 @@ def _format_conversation(messages: list[SuggestionMessage]) -> str:
|
||||
summary="Generate Follow-up Questions",
|
||||
description="Generate short follow-up questions a user might ask next, based on recent conversation context.",
|
||||
)
|
||||
async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> SuggestionsResponse:
|
||||
if not request.messages:
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def generate_suggestions(thread_id: str, body: SuggestionsRequest, request: Request, app_config: AppConfig = Depends(get_config)) -> SuggestionsResponse:
|
||||
if not body.messages:
|
||||
return SuggestionsResponse(suggestions=[])
|
||||
|
||||
n = request.n
|
||||
conversation = _format_conversation(request.messages)
|
||||
n = body.n
|
||||
conversation = _format_conversation(body.messages)
|
||||
if not conversation:
|
||||
return SuggestionsResponse(suggestions=[])
|
||||
|
||||
prompt = (
|
||||
system_instruction = (
|
||||
"You are generating follow-up questions to help the user continue the conversation.\n"
|
||||
f"Based on the conversation below, produce EXACTLY {n} short questions the user might ask next.\n"
|
||||
"Requirements:\n"
|
||||
"- Questions must be relevant to the conversation.\n"
|
||||
"- Questions must be relevant to the preceding conversation.\n"
|
||||
"- Questions must be written in the same language as the user.\n"
|
||||
"- Keep each question concise (ideally <= 20 words / <= 40 Chinese characters).\n"
|
||||
"- Do NOT include numbering, markdown, or any extra text.\n"
|
||||
"- Output MUST be a JSON array of strings only.\n\n"
|
||||
"Conversation:\n"
|
||||
f"{conversation}\n"
|
||||
"- Output MUST be a JSON array of strings only.\n"
|
||||
)
|
||||
user_content = f"Conversation Context:\n{conversation}\n\nGenerate {n} follow-up questions"
|
||||
|
||||
try:
|
||||
model = create_chat_model(name=request.model_name, thinking_enabled=False)
|
||||
response = model.invoke(prompt)
|
||||
model = create_chat_model(name=body.model_name, thinking_enabled=False, app_config=app_config)
|
||||
response = await model.ainvoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)], config={"run_name": "suggest_agent"})
|
||||
raw = _extract_response_text(response.content)
|
||||
suggestions = _parse_json_string_list(raw) or []
|
||||
cleaned = [s.replace("\n", " ").strip() for s in suggestions if s.strip()]
|
||||
|
||||
@@ -19,7 +19,8 @@ from fastapi import APIRouter, HTTPException, Query, Request
|
||||
from fastapi.responses import Response, StreamingResponse
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_run_manager, get_stream_bridge
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_checkpointer, get_current_user, get_feedback_repo, get_run_event_store, get_run_manager, get_run_store, get_stream_bridge
|
||||
from app.gateway.services import sse_consumer, start_run
|
||||
from deerflow.runtime import RunRecord, serialize_channel_values
|
||||
|
||||
@@ -38,6 +39,7 @@ class RunCreateRequest(BaseModel):
|
||||
command: dict[str, Any] | None = Field(default=None, description="LangGraph Command")
|
||||
metadata: dict[str, Any] | None = Field(default=None, description="Run metadata")
|
||||
config: dict[str, Any] | None = Field(default=None, description="RunnableConfig overrides")
|
||||
context: dict[str, Any] | None = Field(default=None, description="DeerFlow context overrides (model_name, thinking_enabled, etc.)")
|
||||
webhook: str | None = Field(default=None, description="Completion callback URL")
|
||||
checkpoint_id: str | None = Field(default=None, description="Resume from checkpoint")
|
||||
checkpoint: dict[str, Any] | None = Field(default=None, description="Full checkpoint object")
|
||||
@@ -52,6 +54,7 @@ class RunCreateRequest(BaseModel):
|
||||
after_seconds: float | None = Field(default=None, description="Delayed execution")
|
||||
if_not_exists: Literal["reject", "create"] = Field(default="create", description="Thread creation policy")
|
||||
feedback_keys: list[str] | None = Field(default=None, description="LangSmith feedback keys")
|
||||
follow_up_to_run_id: str | None = Field(default=None, description="Run ID this message follows up on. Auto-detected from latest successful run if not provided.")
|
||||
|
||||
|
||||
class RunResponse(BaseModel):
|
||||
@@ -91,6 +94,7 @@ def _record_to_response(record: RunRecord) -> RunResponse:
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs", response_model=RunResponse)
|
||||
@require_permission("runs", "create", owner_check=True, require_existing=True)
|
||||
async def create_run(thread_id: str, body: RunCreateRequest, request: Request) -> RunResponse:
|
||||
"""Create a background run (returns immediately)."""
|
||||
record = await start_run(body, thread_id, request)
|
||||
@@ -98,6 +102,7 @@ async def create_run(thread_id: str, body: RunCreateRequest, request: Request) -
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/stream")
|
||||
@require_permission("runs", "create", owner_check=True, require_existing=True)
|
||||
async def stream_run(thread_id: str, body: RunCreateRequest, request: Request) -> StreamingResponse:
|
||||
"""Create a run and stream events via SSE.
|
||||
|
||||
@@ -117,13 +122,15 @@ async def stream_run(thread_id: str, body: RunCreateRequest, request: Request) -
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
# LangGraph Platform includes run metadata in this header.
|
||||
# The SDK's _get_run_metadata_from_response() parses it.
|
||||
"Content-Location": (f"/api/threads/{thread_id}/runs/{record.run_id}/stream?thread_id={thread_id}&run_id={record.run_id}"),
|
||||
# The SDK uses a greedy regex to extract the run id from this path,
|
||||
# so it must point at the canonical run resource without extra suffixes.
|
||||
"Content-Location": f"/api/threads/{thread_id}/runs/{record.run_id}",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/wait", response_model=dict)
|
||||
@require_permission("runs", "create", owner_check=True, require_existing=True)
|
||||
async def wait_run(thread_id: str, body: RunCreateRequest, request: Request) -> dict:
|
||||
"""Create a run and block until it completes, returning the final state."""
|
||||
record = await start_run(body, thread_id, request)
|
||||
@@ -149,6 +156,7 @@ async def wait_run(thread_id: str, body: RunCreateRequest, request: Request) ->
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs", response_model=list[RunResponse])
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_runs(thread_id: str, request: Request) -> list[RunResponse]:
|
||||
"""List all runs for a thread."""
|
||||
run_mgr = get_run_manager(request)
|
||||
@@ -157,6 +165,7 @@ async def list_runs(thread_id: str, request: Request) -> list[RunResponse]:
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}", response_model=RunResponse)
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def get_run(thread_id: str, run_id: str, request: Request) -> RunResponse:
|
||||
"""Get details of a specific run."""
|
||||
run_mgr = get_run_manager(request)
|
||||
@@ -167,6 +176,7 @@ async def get_run(thread_id: str, run_id: str, request: Request) -> RunResponse:
|
||||
|
||||
|
||||
@router.post("/{thread_id}/runs/{run_id}/cancel")
|
||||
@require_permission("runs", "cancel", owner_check=True, require_existing=True)
|
||||
async def cancel_run(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
@@ -204,6 +214,7 @@ async def cancel_run(
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/join")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def join_run(thread_id: str, run_id: str, request: Request) -> StreamingResponse:
|
||||
"""Join an existing run's SSE stream."""
|
||||
bridge = get_stream_bridge(request)
|
||||
@@ -224,6 +235,7 @@ async def join_run(thread_id: str, run_id: str, request: Request) -> StreamingRe
|
||||
|
||||
|
||||
@router.api_route("/{thread_id}/runs/{run_id}/stream", methods=["GET", "POST"], response_model=None)
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def stream_existing_run(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
@@ -263,3 +275,99 @@ async def stream_existing_run(
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Messages / Events / Token usage endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get("/{thread_id}/messages")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_thread_messages(
|
||||
thread_id: str,
|
||||
request: Request,
|
||||
limit: int = Query(default=50, le=200),
|
||||
before_seq: int | None = Query(default=None),
|
||||
after_seq: int | None = Query(default=None),
|
||||
) -> list[dict]:
|
||||
"""Return displayable messages for a thread (across all runs), with feedback attached."""
|
||||
event_store = get_run_event_store(request)
|
||||
messages = await event_store.list_messages(thread_id, limit=limit, before_seq=before_seq, after_seq=after_seq)
|
||||
|
||||
# Attach feedback to the last AI message of each run
|
||||
feedback_repo = get_feedback_repo(request)
|
||||
user_id = await get_current_user(request)
|
||||
feedback_map = await feedback_repo.list_by_thread_grouped(thread_id, user_id=user_id)
|
||||
|
||||
# Find the last ai_message per run_id
|
||||
last_ai_per_run: dict[str, int] = {} # run_id -> index in messages list
|
||||
for i, msg in enumerate(messages):
|
||||
if msg.get("event_type") == "ai_message":
|
||||
last_ai_per_run[msg["run_id"]] = i
|
||||
|
||||
# Attach feedback field
|
||||
last_ai_indices = set(last_ai_per_run.values())
|
||||
for i, msg in enumerate(messages):
|
||||
if i in last_ai_indices:
|
||||
run_id = msg["run_id"]
|
||||
fb = feedback_map.get(run_id)
|
||||
msg["feedback"] = {
|
||||
"feedback_id": fb["feedback_id"],
|
||||
"rating": fb["rating"],
|
||||
"comment": fb.get("comment"),
|
||||
} if fb else None
|
||||
else:
|
||||
msg["feedback"] = None
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/messages")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_run_messages(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
limit: int = Query(default=50, le=200, ge=1),
|
||||
before_seq: int | None = Query(default=None),
|
||||
after_seq: int | None = Query(default=None),
|
||||
) -> dict:
|
||||
"""Return paginated messages for a specific run.
|
||||
|
||||
Response: { data: [...], has_more: bool }
|
||||
"""
|
||||
event_store = get_run_event_store(request)
|
||||
rows = await event_store.list_messages_by_run(
|
||||
thread_id, run_id,
|
||||
limit=limit + 1,
|
||||
before_seq=before_seq,
|
||||
after_seq=after_seq,
|
||||
)
|
||||
has_more = len(rows) > limit
|
||||
data = rows[:limit] if has_more else rows
|
||||
return {"data": data, "has_more": has_more}
|
||||
|
||||
|
||||
@router.get("/{thread_id}/runs/{run_id}/events")
|
||||
@require_permission("runs", "read", owner_check=True)
|
||||
async def list_run_events(
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
request: Request,
|
||||
event_types: str | None = Query(default=None),
|
||||
limit: int = Query(default=500, le=2000),
|
||||
) -> list[dict]:
|
||||
"""Return the full event stream for a run (debug/audit)."""
|
||||
event_store = get_run_event_store(request)
|
||||
types = event_types.split(",") if event_types else None
|
||||
return await event_store.list_events(thread_id, run_id, event_types=types, limit=limit)
|
||||
|
||||
|
||||
@router.get("/{thread_id}/token-usage")
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def thread_token_usage(thread_id: str, request: Request) -> dict:
|
||||
"""Thread-level token usage aggregation."""
|
||||
run_store = get_run_store(request)
|
||||
agg = await run_store.aggregate_tokens_by_thread(thread_id)
|
||||
return {"thread_id": thread_id, **agg}
|
||||
|
||||
@@ -13,28 +13,41 @@ matching the LangGraph Platform wire format expected by the
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_store
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_checkpointer
|
||||
from app.gateway.utils import sanitize_log_param
|
||||
from deerflow.config.paths import Paths, get_paths
|
||||
from deerflow.runtime import serialize_channel_values
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Store namespace
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
THREADS_NS: tuple[str, ...] = ("threads",)
|
||||
"""Namespace used by the Store for thread metadata records."""
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/threads", tags=["threads"])
|
||||
|
||||
|
||||
# Metadata keys that the server controls; clients are not allowed to set
|
||||
# them. Pydantic ``@field_validator("metadata")`` strips them on every
|
||||
# inbound model below so a malicious client cannot reflect a forged
|
||||
# owner identity through the API surface. Defense-in-depth — the
|
||||
# row-level invariant is still ``threads_meta.user_id`` populated from
|
||||
# the auth contextvar; this list closes the metadata-blob echo gap.
|
||||
_SERVER_RESERVED_METADATA_KEYS: frozenset[str] = frozenset({"owner_id", "user_id"})
|
||||
|
||||
|
||||
def _strip_reserved_metadata(metadata: dict[str, Any] | None) -> dict[str, Any]:
|
||||
"""Return ``metadata`` with server-controlled keys removed."""
|
||||
if not metadata:
|
||||
return metadata or {}
|
||||
return {k: v for k, v in metadata.items() if k not in _SERVER_RESERVED_METADATA_KEYS}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Response / request models
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -63,8 +76,11 @@ class ThreadCreateRequest(BaseModel):
|
||||
"""Request body for creating a thread."""
|
||||
|
||||
thread_id: str | None = Field(default=None, description="Optional thread ID (auto-generated if omitted)")
|
||||
assistant_id: str | None = Field(default=None, description="Associate thread with an assistant")
|
||||
metadata: dict[str, Any] = Field(default_factory=dict, description="Initial metadata")
|
||||
|
||||
_strip_reserved = field_validator("metadata")(classmethod(lambda cls, v: _strip_reserved_metadata(v)))
|
||||
|
||||
|
||||
class ThreadSearchRequest(BaseModel):
|
||||
"""Request body for searching threads."""
|
||||
@@ -93,6 +109,8 @@ class ThreadPatchRequest(BaseModel):
|
||||
|
||||
metadata: dict[str, Any] = Field(default_factory=dict, description="Metadata to merge")
|
||||
|
||||
_strip_reserved = field_validator("metadata")(classmethod(lambda cls, v: _strip_reserved_metadata(v)))
|
||||
|
||||
|
||||
class ThreadStateUpdateRequest(BaseModel):
|
||||
"""Request body for updating thread state (human-in-the-loop resume)."""
|
||||
@@ -126,70 +144,25 @@ class ThreadHistoryRequest(BaseModel):
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _delete_thread_data(thread_id: str, paths: Paths | None = None) -> ThreadDeleteResponse:
|
||||
def _delete_thread_data(thread_id: str, paths: Paths | None = None, *, user_id: str | None = None) -> ThreadDeleteResponse:
|
||||
"""Delete local persisted filesystem data for a thread."""
|
||||
path_manager = paths or get_paths()
|
||||
try:
|
||||
path_manager.delete_thread_dir(thread_id)
|
||||
path_manager.delete_thread_dir(thread_id, user_id=user_id)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(status_code=422, detail=str(exc)) from exc
|
||||
except FileNotFoundError:
|
||||
# Not critical — thread data may not exist on disk
|
||||
logger.debug("No local thread data to delete for %s", thread_id)
|
||||
logger.debug("No local thread data to delete for %s", sanitize_log_param(thread_id))
|
||||
return ThreadDeleteResponse(success=True, message=f"No local data for {thread_id}")
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to delete thread data for %s", thread_id)
|
||||
logger.exception("Failed to delete thread data for %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to delete local thread data.") from exc
|
||||
|
||||
logger.info("Deleted local thread data for %s", thread_id)
|
||||
logger.info("Deleted local thread data for %s", sanitize_log_param(thread_id))
|
||||
return ThreadDeleteResponse(success=True, message=f"Deleted local thread data for {thread_id}")
|
||||
|
||||
|
||||
async def _store_get(store, thread_id: str) -> dict | None:
|
||||
"""Fetch a thread record from the Store; returns ``None`` if absent."""
|
||||
item = await store.aget(THREADS_NS, thread_id)
|
||||
return item.value if item is not None else None
|
||||
|
||||
|
||||
async def _store_put(store, record: dict) -> None:
|
||||
"""Write a thread record to the Store."""
|
||||
await store.aput(THREADS_NS, record["thread_id"], record)
|
||||
|
||||
|
||||
async def _store_upsert(store, thread_id: str, *, metadata: dict | None = None, values: dict | None = None) -> None:
|
||||
"""Create or refresh a thread record in the Store.
|
||||
|
||||
On creation the record is written with ``status="idle"``. On update only
|
||||
``updated_at`` (and optionally ``metadata`` / ``values``) are changed so
|
||||
that existing fields are preserved.
|
||||
|
||||
``values`` carries the agent-state snapshot exposed to the frontend
|
||||
(currently just ``{"title": "..."}``).
|
||||
"""
|
||||
now = time.time()
|
||||
existing = await _store_get(store, thread_id)
|
||||
if existing is None:
|
||||
await _store_put(
|
||||
store,
|
||||
{
|
||||
"thread_id": thread_id,
|
||||
"status": "idle",
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"metadata": metadata or {},
|
||||
"values": values or {},
|
||||
},
|
||||
)
|
||||
else:
|
||||
val = dict(existing)
|
||||
val["updated_at"] = now
|
||||
if metadata:
|
||||
val.setdefault("metadata", {}).update(metadata)
|
||||
if values:
|
||||
val.setdefault("values", {}).update(values)
|
||||
await _store_put(store, val)
|
||||
|
||||
|
||||
def _derive_thread_status(checkpoint_tuple) -> str:
|
||||
"""Derive thread status from checkpoint metadata."""
|
||||
if checkpoint_tuple is None:
|
||||
@@ -215,22 +188,18 @@ def _derive_thread_status(checkpoint_tuple) -> str:
|
||||
|
||||
|
||||
@router.delete("/{thread_id}", response_model=ThreadDeleteResponse)
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_thread_data(thread_id: str, request: Request) -> ThreadDeleteResponse:
|
||||
"""Delete local persisted filesystem data for a thread.
|
||||
|
||||
Cleans DeerFlow-managed thread directories, removes checkpoint data,
|
||||
and removes the thread record from the Store.
|
||||
and removes the thread_meta row from the configured ThreadMetaStore
|
||||
(sqlite or memory).
|
||||
"""
|
||||
# Clean local filesystem
|
||||
response = _delete_thread_data(thread_id)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
# Remove from Store (best-effort)
|
||||
store = get_store(request)
|
||||
if store is not None:
|
||||
try:
|
||||
await store.adelete(THREADS_NS, thread_id)
|
||||
except Exception:
|
||||
logger.debug("Could not delete store record for thread %s (not critical)", thread_id)
|
||||
# Clean local filesystem
|
||||
response = _delete_thread_data(thread_id, user_id=get_effective_user_id())
|
||||
|
||||
# Remove checkpoints (best-effort)
|
||||
checkpointer = getattr(request.app.state, "checkpointer", None)
|
||||
@@ -239,7 +208,15 @@ async def delete_thread_data(thread_id: str, request: Request) -> ThreadDeleteRe
|
||||
if hasattr(checkpointer, "adelete_thread"):
|
||||
await checkpointer.adelete_thread(thread_id)
|
||||
except Exception:
|
||||
logger.debug("Could not delete checkpoints for thread %s (not critical)", thread_id)
|
||||
logger.debug("Could not delete checkpoints for thread %s (not critical)", sanitize_log_param(thread_id))
|
||||
|
||||
# Remove thread_meta row (best-effort) — required for sqlite backend
|
||||
# so the deleted thread no longer appears in /threads/search.
|
||||
try:
|
||||
thread_store = get_thread_store(request)
|
||||
await thread_store.delete(thread_id)
|
||||
except Exception:
|
||||
logger.debug("Could not delete thread_meta for %s (not critical)", sanitize_log_param(thread_id))
|
||||
|
||||
return response
|
||||
|
||||
@@ -248,43 +225,40 @@ async def delete_thread_data(thread_id: str, request: Request) -> ThreadDeleteRe
|
||||
async def create_thread(body: ThreadCreateRequest, request: Request) -> ThreadResponse:
|
||||
"""Create a new thread.
|
||||
|
||||
The thread record is written to the Store (for fast listing) and an
|
||||
empty checkpoint is written to the checkpointer (for state reads).
|
||||
Writes a thread_meta record (so the thread appears in /threads/search)
|
||||
and an empty checkpoint (so state endpoints work immediately).
|
||||
Idempotent: returns the existing record when ``thread_id`` already exists.
|
||||
"""
|
||||
store = get_store(request)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
checkpointer = get_checkpointer(request)
|
||||
thread_store = get_thread_store(request)
|
||||
thread_id = body.thread_id or str(uuid.uuid4())
|
||||
now = time.time()
|
||||
# ``body.metadata`` is already stripped of server-reserved keys by
|
||||
# ``ThreadCreateRequest._strip_reserved`` — see the model definition.
|
||||
|
||||
# Idempotency: return existing record from Store when already present
|
||||
if store is not None:
|
||||
existing_record = await _store_get(store, thread_id)
|
||||
if existing_record is not None:
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=existing_record.get("status", "idle"),
|
||||
created_at=str(existing_record.get("created_at", "")),
|
||||
updated_at=str(existing_record.get("updated_at", "")),
|
||||
metadata=existing_record.get("metadata", {}),
|
||||
)
|
||||
# Idempotency: return existing record when already present
|
||||
existing_record = await thread_store.get(thread_id)
|
||||
if existing_record is not None:
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=existing_record.get("status", "idle"),
|
||||
created_at=str(existing_record.get("created_at", "")),
|
||||
updated_at=str(existing_record.get("updated_at", "")),
|
||||
metadata=existing_record.get("metadata", {}),
|
||||
)
|
||||
|
||||
# Write thread record to Store
|
||||
if store is not None:
|
||||
try:
|
||||
await _store_put(
|
||||
store,
|
||||
{
|
||||
"thread_id": thread_id,
|
||||
"status": "idle",
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
"metadata": body.metadata,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to write thread %s to store", thread_id)
|
||||
raise HTTPException(status_code=500, detail="Failed to create thread")
|
||||
# Write thread_meta so the thread appears in /threads/search immediately
|
||||
try:
|
||||
await thread_store.create(
|
||||
thread_id,
|
||||
assistant_id=getattr(body, "assistant_id", None),
|
||||
metadata=body.metadata,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to write thread_meta for %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create thread")
|
||||
|
||||
# Write an empty checkpoint so state endpoints work immediately
|
||||
config = {"configurable": {"thread_id": thread_id, "checkpoint_ns": ""}}
|
||||
@@ -301,10 +275,10 @@ async def create_thread(body: ThreadCreateRequest, request: Request) -> ThreadRe
|
||||
}
|
||||
await checkpointer.aput(config, empty_checkpoint(), ckpt_metadata, {})
|
||||
except Exception:
|
||||
logger.exception("Failed to create checkpoint for thread %s", thread_id)
|
||||
logger.exception("Failed to create checkpoint for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to create thread")
|
||||
|
||||
logger.info("Thread created: %s", thread_id)
|
||||
logger.info("Thread created: %s", sanitize_log_param(thread_id))
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status="idle",
|
||||
@@ -318,166 +292,91 @@ async def create_thread(body: ThreadCreateRequest, request: Request) -> ThreadRe
|
||||
async def search_threads(body: ThreadSearchRequest, request: Request) -> list[ThreadResponse]:
|
||||
"""Search and list threads.
|
||||
|
||||
Two-phase approach:
|
||||
|
||||
**Phase 1 — Store (fast path, O(threads))**: returns threads that were
|
||||
created or run through this Gateway. Store records are tiny metadata
|
||||
dicts so fetching all of them at once is cheap.
|
||||
|
||||
**Phase 2 — Checkpointer supplement (lazy migration)**: threads that
|
||||
were created directly by LangGraph Server (and therefore absent from the
|
||||
Store) are discovered here by iterating the shared checkpointer. Any
|
||||
newly found thread is immediately written to the Store so that the next
|
||||
search skips Phase 2 for that thread — the Store converges to a full
|
||||
index over time without a one-shot migration job.
|
||||
Delegates to the configured ThreadMetaStore implementation
|
||||
(SQL-backed for sqlite/postgres, Store-backed for memory mode).
|
||||
"""
|
||||
store = get_store(request)
|
||||
checkpointer = get_checkpointer(request)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Phase 1: Store
|
||||
# -----------------------------------------------------------------------
|
||||
merged: dict[str, ThreadResponse] = {}
|
||||
|
||||
if store is not None:
|
||||
try:
|
||||
items = await store.asearch(THREADS_NS, limit=10_000)
|
||||
except Exception:
|
||||
logger.warning("Store search failed — falling back to checkpointer only", exc_info=True)
|
||||
items = []
|
||||
|
||||
for item in items:
|
||||
val = item.value
|
||||
merged[val["thread_id"]] = ThreadResponse(
|
||||
thread_id=val["thread_id"],
|
||||
status=val.get("status", "idle"),
|
||||
created_at=str(val.get("created_at", "")),
|
||||
updated_at=str(val.get("updated_at", "")),
|
||||
metadata=val.get("metadata", {}),
|
||||
values=val.get("values", {}),
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Phase 2: Checkpointer supplement
|
||||
# Discovers threads not yet in the Store (e.g. created by LangGraph
|
||||
# Server) and lazily migrates them so future searches skip this phase.
|
||||
# -----------------------------------------------------------------------
|
||||
try:
|
||||
async for checkpoint_tuple in checkpointer.alist(None):
|
||||
cfg = getattr(checkpoint_tuple, "config", {})
|
||||
thread_id = cfg.get("configurable", {}).get("thread_id")
|
||||
if not thread_id or thread_id in merged:
|
||||
continue
|
||||
|
||||
# Skip sub-graph checkpoints (checkpoint_ns is non-empty for those)
|
||||
if cfg.get("configurable", {}).get("checkpoint_ns", ""):
|
||||
continue
|
||||
|
||||
ckpt_meta = getattr(checkpoint_tuple, "metadata", {}) or {}
|
||||
# Strip LangGraph internal keys from the user-visible metadata dict
|
||||
user_meta = {k: v for k, v in ckpt_meta.items() if k not in ("created_at", "updated_at", "step", "source", "writes", "parents")}
|
||||
|
||||
# Extract state values (title) from the checkpoint's channel_values
|
||||
checkpoint_data = getattr(checkpoint_tuple, "checkpoint", {}) or {}
|
||||
channel_values = checkpoint_data.get("channel_values", {})
|
||||
ckpt_values = {}
|
||||
if title := channel_values.get("title"):
|
||||
ckpt_values["title"] = title
|
||||
|
||||
thread_resp = ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=_derive_thread_status(checkpoint_tuple),
|
||||
created_at=str(ckpt_meta.get("created_at", "")),
|
||||
updated_at=str(ckpt_meta.get("updated_at", ckpt_meta.get("created_at", ""))),
|
||||
metadata=user_meta,
|
||||
values=ckpt_values,
|
||||
)
|
||||
merged[thread_id] = thread_resp
|
||||
|
||||
# Lazy migration — write to Store so the next search finds it there
|
||||
if store is not None:
|
||||
try:
|
||||
await _store_upsert(store, thread_id, metadata=user_meta, values=ckpt_values or None)
|
||||
except Exception:
|
||||
logger.debug("Failed to migrate thread %s to store (non-fatal)", thread_id)
|
||||
except Exception:
|
||||
logger.exception("Checkpointer scan failed during thread search")
|
||||
# Don't raise — return whatever was collected from Store + partial scan
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Phase 3: Filter → sort → paginate
|
||||
# -----------------------------------------------------------------------
|
||||
results = list(merged.values())
|
||||
|
||||
if body.metadata:
|
||||
results = [r for r in results if all(r.metadata.get(k) == v for k, v in body.metadata.items())]
|
||||
|
||||
if body.status:
|
||||
results = [r for r in results if r.status == body.status]
|
||||
|
||||
results.sort(key=lambda r: r.updated_at, reverse=True)
|
||||
return results[body.offset : body.offset + body.limit]
|
||||
repo = get_thread_store(request)
|
||||
rows = await repo.search(
|
||||
metadata=body.metadata or None,
|
||||
status=body.status,
|
||||
limit=body.limit,
|
||||
offset=body.offset,
|
||||
)
|
||||
return [
|
||||
ThreadResponse(
|
||||
thread_id=r["thread_id"],
|
||||
status=r.get("status", "idle"),
|
||||
created_at=r.get("created_at", ""),
|
||||
updated_at=r.get("updated_at", ""),
|
||||
metadata=r.get("metadata", {}),
|
||||
values={"title": r["display_name"]} if r.get("display_name") else {},
|
||||
interrupts={},
|
||||
)
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
@router.patch("/{thread_id}", response_model=ThreadResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def patch_thread(thread_id: str, body: ThreadPatchRequest, request: Request) -> ThreadResponse:
|
||||
"""Merge metadata into a thread record."""
|
||||
store = get_store(request)
|
||||
if store is None:
|
||||
raise HTTPException(status_code=503, detail="Store not available")
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
record = await _store_get(store, thread_id)
|
||||
thread_store = get_thread_store(request)
|
||||
record = await thread_store.get(thread_id)
|
||||
if record is None:
|
||||
raise HTTPException(status_code=404, detail=f"Thread {thread_id} not found")
|
||||
|
||||
now = time.time()
|
||||
updated = dict(record)
|
||||
updated.setdefault("metadata", {}).update(body.metadata)
|
||||
updated["updated_at"] = now
|
||||
|
||||
# ``body.metadata`` already stripped by ``ThreadPatchRequest._strip_reserved``.
|
||||
try:
|
||||
await _store_put(store, updated)
|
||||
await thread_store.update_metadata(thread_id, body.metadata)
|
||||
except Exception:
|
||||
logger.exception("Failed to patch thread %s", thread_id)
|
||||
logger.exception("Failed to patch thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update thread")
|
||||
|
||||
# Re-read to get the merged metadata + refreshed updated_at
|
||||
record = await thread_store.get(thread_id) or record
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=updated.get("status", "idle"),
|
||||
created_at=str(updated.get("created_at", "")),
|
||||
updated_at=str(now),
|
||||
metadata=updated.get("metadata", {}),
|
||||
status=record.get("status", "idle"),
|
||||
created_at=str(record.get("created_at", "")),
|
||||
updated_at=str(record.get("updated_at", "")),
|
||||
metadata=record.get("metadata", {}),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{thread_id}", response_model=ThreadResponse)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread(thread_id: str, request: Request) -> ThreadResponse:
|
||||
"""Get thread info.
|
||||
|
||||
Reads metadata from the Store and derives the accurate execution
|
||||
status from the checkpointer. Falls back to the checkpointer alone
|
||||
for threads that pre-date Store adoption (backward compat).
|
||||
Reads metadata from the ThreadMetaStore and derives the accurate
|
||||
execution status from the checkpointer. Falls back to the checkpointer
|
||||
alone for threads that pre-date ThreadMetaStore adoption (backward compat).
|
||||
"""
|
||||
store = get_store(request)
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
thread_store = get_thread_store(request)
|
||||
checkpointer = get_checkpointer(request)
|
||||
|
||||
record: dict | None = None
|
||||
if store is not None:
|
||||
record = await _store_get(store, thread_id)
|
||||
record: dict | None = await thread_store.get(thread_id)
|
||||
|
||||
# Derive accurate status from the checkpointer
|
||||
config = {"configurable": {"thread_id": thread_id, "checkpoint_ns": ""}}
|
||||
try:
|
||||
checkpoint_tuple = await checkpointer.aget_tuple(config)
|
||||
except Exception:
|
||||
logger.exception("Failed to get checkpoint for thread %s", thread_id)
|
||||
logger.exception("Failed to get checkpoint for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread")
|
||||
|
||||
if record is None and checkpoint_tuple is None:
|
||||
raise HTTPException(status_code=404, detail=f"Thread {thread_id} not found")
|
||||
|
||||
# If the thread exists in the checkpointer but not the store (e.g. legacy
|
||||
# data), synthesize a minimal store record from the checkpoint metadata.
|
||||
# If the thread exists in the checkpointer but not in thread_meta (e.g.
|
||||
# legacy data created before thread_meta adoption), synthesize a minimal
|
||||
# record from the checkpoint metadata.
|
||||
if record is None and checkpoint_tuple is not None:
|
||||
ckpt_meta = getattr(checkpoint_tuple, "metadata", {}) or {}
|
||||
record = {
|
||||
@@ -488,21 +387,26 @@ async def get_thread(thread_id: str, request: Request) -> ThreadResponse:
|
||||
"metadata": {k: v for k, v in ckpt_meta.items() if k not in ("created_at", "updated_at", "step", "source", "writes", "parents")},
|
||||
}
|
||||
|
||||
status = _derive_thread_status(checkpoint_tuple) if checkpoint_tuple is not None else record.get("status", "idle") # type: ignore[union-attr]
|
||||
if record is None:
|
||||
raise HTTPException(status_code=404, detail=f"Thread {thread_id} not found")
|
||||
|
||||
status = _derive_thread_status(checkpoint_tuple) if checkpoint_tuple is not None else record.get("status", "idle")
|
||||
checkpoint = getattr(checkpoint_tuple, "checkpoint", {}) or {} if checkpoint_tuple is not None else {}
|
||||
channel_values = checkpoint.get("channel_values", {})
|
||||
|
||||
return ThreadResponse(
|
||||
thread_id=thread_id,
|
||||
status=status,
|
||||
created_at=str(record.get("created_at", "")), # type: ignore[union-attr]
|
||||
updated_at=str(record.get("updated_at", "")), # type: ignore[union-attr]
|
||||
metadata=record.get("metadata", {}), # type: ignore[union-attr]
|
||||
created_at=str(record.get("created_at", "")),
|
||||
updated_at=str(record.get("updated_at", "")),
|
||||
metadata=record.get("metadata", {}),
|
||||
values=serialize_channel_values(channel_values),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@router.get("/{thread_id}/state", response_model=ThreadStateResponse)
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread_state(thread_id: str, request: Request) -> ThreadStateResponse:
|
||||
"""Get the latest state snapshot for a thread.
|
||||
|
||||
@@ -515,7 +419,7 @@ async def get_thread_state(thread_id: str, request: Request) -> ThreadStateRespo
|
||||
try:
|
||||
checkpoint_tuple = await checkpointer.aget_tuple(config)
|
||||
except Exception:
|
||||
logger.exception("Failed to get state for thread %s", thread_id)
|
||||
logger.exception("Failed to get state for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread state")
|
||||
|
||||
if checkpoint_tuple is None:
|
||||
@@ -539,8 +443,10 @@ async def get_thread_state(thread_id: str, request: Request) -> ThreadStateRespo
|
||||
next_tasks = [t.name for t in tasks_raw if hasattr(t, "name")]
|
||||
tasks = [{"id": getattr(t, "id", ""), "name": getattr(t, "name", "")} for t in tasks_raw]
|
||||
|
||||
values = serialize_channel_values(channel_values)
|
||||
|
||||
return ThreadStateResponse(
|
||||
values=serialize_channel_values(channel_values),
|
||||
values=values,
|
||||
next=next_tasks,
|
||||
metadata=metadata,
|
||||
checkpoint={"id": checkpoint_id, "ts": str(metadata.get("created_at", ""))},
|
||||
@@ -552,15 +458,19 @@ async def get_thread_state(thread_id: str, request: Request) -> ThreadStateRespo
|
||||
|
||||
|
||||
@router.post("/{thread_id}/state", response_model=ThreadStateResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, request: Request) -> ThreadStateResponse:
|
||||
"""Update thread state (e.g. for human-in-the-loop resume or title rename).
|
||||
|
||||
Writes a new checkpoint that merges *body.values* into the latest
|
||||
channel values, then syncs any updated ``title`` field back to the Store
|
||||
so that ``/threads/search`` reflects the change immediately.
|
||||
channel values, then syncs any updated ``title`` field through the
|
||||
ThreadMetaStore abstraction so that ``/threads/search`` reflects the
|
||||
change immediately in both sqlite and memory backends.
|
||||
"""
|
||||
from app.gateway.deps import get_thread_store
|
||||
|
||||
checkpointer = get_checkpointer(request)
|
||||
store = get_store(request)
|
||||
thread_store = get_thread_store(request)
|
||||
|
||||
# checkpoint_ns must be present in the config for aput — default to ""
|
||||
# (the root graph namespace). checkpoint_id is optional; omitting it
|
||||
@@ -577,7 +487,7 @@ async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, re
|
||||
try:
|
||||
checkpoint_tuple = await checkpointer.aget_tuple(read_config)
|
||||
except Exception:
|
||||
logger.exception("Failed to get state for thread %s", thread_id)
|
||||
logger.exception("Failed to get state for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread state")
|
||||
|
||||
if checkpoint_tuple is None:
|
||||
@@ -611,19 +521,22 @@ async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, re
|
||||
try:
|
||||
new_config = await checkpointer.aput(write_config, checkpoint, metadata, {})
|
||||
except Exception:
|
||||
logger.exception("Failed to update state for thread %s", thread_id)
|
||||
logger.exception("Failed to update state for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to update thread state")
|
||||
|
||||
new_checkpoint_id: str | None = None
|
||||
if isinstance(new_config, dict):
|
||||
new_checkpoint_id = new_config.get("configurable", {}).get("checkpoint_id")
|
||||
|
||||
# Sync title changes to the Store so /threads/search reflects them immediately.
|
||||
if store is not None and body.values and "title" in body.values:
|
||||
try:
|
||||
await _store_upsert(store, thread_id, values={"title": body.values["title"]})
|
||||
except Exception:
|
||||
logger.debug("Failed to sync title to store for thread %s (non-fatal)", thread_id)
|
||||
# Sync title changes through the ThreadMetaStore abstraction so /threads/search
|
||||
# reflects them immediately in both sqlite and memory backends.
|
||||
if body.values and "title" in body.values:
|
||||
new_title = body.values["title"]
|
||||
if new_title: # Skip empty strings and None
|
||||
try:
|
||||
await thread_store.update_display_name(thread_id, new_title)
|
||||
except Exception:
|
||||
logger.debug("Failed to sync title to thread_meta for %s (non-fatal)", sanitize_log_param(thread_id))
|
||||
|
||||
return ThreadStateResponse(
|
||||
values=serialize_channel_values(channel_values),
|
||||
@@ -635,8 +548,16 @@ async def update_thread_state(thread_id: str, body: ThreadStateUpdateRequest, re
|
||||
|
||||
|
||||
@router.post("/{thread_id}/history", response_model=list[HistoryEntry])
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def get_thread_history(thread_id: str, body: ThreadHistoryRequest, request: Request) -> list[HistoryEntry]:
|
||||
"""Get checkpoint history for a thread."""
|
||||
"""Get checkpoint history for a thread.
|
||||
|
||||
Messages are read from the checkpointer's channel values (the
|
||||
authoritative source) and serialized via
|
||||
:func:`~deerflow.runtime.serialization.serialize_channel_values`.
|
||||
Only the latest (first) checkpoint carries the ``messages`` key to
|
||||
avoid duplicating them across every entry.
|
||||
"""
|
||||
checkpointer = get_checkpointer(request)
|
||||
|
||||
config: dict[str, Any] = {"configurable": {"thread_id": thread_id}}
|
||||
@@ -644,6 +565,7 @@ async def get_thread_history(thread_id: str, body: ThreadHistoryRequest, request
|
||||
config["configurable"]["checkpoint_id"] = body.before
|
||||
|
||||
entries: list[HistoryEntry] = []
|
||||
is_latest_checkpoint = True
|
||||
try:
|
||||
async for checkpoint_tuple in checkpointer.alist(config, limit=body.limit):
|
||||
ckpt_config = getattr(checkpoint_tuple, "config", {})
|
||||
@@ -658,22 +580,42 @@ async def get_thread_history(thread_id: str, body: ThreadHistoryRequest, request
|
||||
|
||||
channel_values = checkpoint.get("channel_values", {})
|
||||
|
||||
# Build values from checkpoint channel_values
|
||||
values: dict[str, Any] = {}
|
||||
if title := channel_values.get("title"):
|
||||
values["title"] = title
|
||||
if thread_data := channel_values.get("thread_data"):
|
||||
values["thread_data"] = thread_data
|
||||
|
||||
# Attach messages only to the latest checkpoint entry.
|
||||
if is_latest_checkpoint:
|
||||
messages = channel_values.get("messages")
|
||||
if messages:
|
||||
values["messages"] = serialize_channel_values({"messages": messages}).get("messages", [])
|
||||
is_latest_checkpoint = False
|
||||
|
||||
# Derive next tasks
|
||||
tasks_raw = getattr(checkpoint_tuple, "tasks", []) or []
|
||||
next_tasks = [t.name for t in tasks_raw if hasattr(t, "name")]
|
||||
|
||||
# Strip LangGraph internal keys from metadata
|
||||
user_meta = {k: v for k, v in metadata.items() if k not in ("created_at", "updated_at", "step", "source", "writes", "parents")}
|
||||
# Keep step for ordering context
|
||||
if "step" in metadata:
|
||||
user_meta["step"] = metadata["step"]
|
||||
|
||||
entries.append(
|
||||
HistoryEntry(
|
||||
checkpoint_id=checkpoint_id,
|
||||
parent_checkpoint_id=parent_id,
|
||||
metadata=metadata,
|
||||
values=serialize_channel_values(channel_values),
|
||||
metadata=user_meta,
|
||||
values=values,
|
||||
created_at=str(metadata.get("created_at", "")),
|
||||
next=next_tasks,
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to get history for thread %s", thread_id)
|
||||
logger.exception("Failed to get history for thread %s", sanitize_log_param(thread_id))
|
||||
raise HTTPException(status_code=500, detail="Failed to get thread history")
|
||||
|
||||
return entries
|
||||
|
||||
@@ -4,11 +4,15 @@ import logging
|
||||
import os
|
||||
import stat
|
||||
|
||||
from fastapi import APIRouter, File, HTTPException, UploadFile
|
||||
from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
from app.gateway.authz import require_permission
|
||||
from app.gateway.deps import get_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.paths import get_paths
|
||||
from deerflow.sandbox.sandbox_provider import get_sandbox_provider
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
from deerflow.sandbox.sandbox_provider import SandboxProvider, get_sandbox_provider
|
||||
from deerflow.uploads.manager import (
|
||||
PathTraversalError,
|
||||
delete_file_safe,
|
||||
@@ -53,10 +57,40 @@ def _make_file_sandbox_writable(file_path: os.PathLike[str] | str) -> None:
|
||||
os.chmod(file_path, writable_mode, **chmod_kwargs)
|
||||
|
||||
|
||||
def _uses_thread_data_mounts(sandbox_provider: SandboxProvider) -> bool:
|
||||
return bool(getattr(sandbox_provider, "uses_thread_data_mounts", False))
|
||||
|
||||
|
||||
def _get_uploads_config_value(app_config: AppConfig, key: str, default: object) -> object:
|
||||
"""Read a value from the uploads config, supporting dict and attribute access."""
|
||||
uploads_cfg = getattr(app_config, "uploads", None)
|
||||
if isinstance(uploads_cfg, dict):
|
||||
return uploads_cfg.get(key, default)
|
||||
return getattr(uploads_cfg, key, default)
|
||||
|
||||
|
||||
def _auto_convert_documents_enabled(app_config: AppConfig) -> bool:
|
||||
"""Return whether automatic host-side document conversion is enabled.
|
||||
|
||||
The secure default is disabled unless an operator explicitly opts in via
|
||||
uploads.auto_convert_documents in config.yaml.
|
||||
"""
|
||||
try:
|
||||
raw = _get_uploads_config_value(app_config, "auto_convert_documents", False)
|
||||
if isinstance(raw, str):
|
||||
return raw.strip().lower() in {"1", "true", "yes", "on"}
|
||||
return bool(raw)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
@router.post("", response_model=UploadResponse)
|
||||
@require_permission("threads", "write", owner_check=True, require_existing=True)
|
||||
async def upload_files(
|
||||
thread_id: str,
|
||||
request: Request,
|
||||
files: list[UploadFile] = File(...),
|
||||
app_config: AppConfig = Depends(get_config),
|
||||
) -> UploadResponse:
|
||||
"""Upload multiple files to a thread's uploads directory."""
|
||||
if not files:
|
||||
@@ -66,12 +100,16 @@ async def upload_files(
|
||||
uploads_dir = ensure_uploads_dir(thread_id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id)
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id, user_id=get_effective_user_id())
|
||||
uploaded_files = []
|
||||
|
||||
sandbox_provider = get_sandbox_provider()
|
||||
sandbox_id = sandbox_provider.acquire(thread_id)
|
||||
sandbox = sandbox_provider.get(sandbox_id)
|
||||
sandbox_provider = get_sandbox_provider(app_config)
|
||||
sync_to_sandbox = not _uses_thread_data_mounts(sandbox_provider)
|
||||
sandbox = None
|
||||
if sync_to_sandbox:
|
||||
sandbox_id = sandbox_provider.acquire(thread_id)
|
||||
sandbox = sandbox_provider.get(sandbox_id)
|
||||
auto_convert_documents = _auto_convert_documents_enabled(app_config)
|
||||
|
||||
for file in files:
|
||||
if not file.filename:
|
||||
@@ -90,7 +128,7 @@ async def upload_files(
|
||||
|
||||
virtual_path = upload_virtual_path(safe_filename)
|
||||
|
||||
if sandbox_id != "local":
|
||||
if sync_to_sandbox and sandbox is not None:
|
||||
_make_file_sandbox_writable(file_path)
|
||||
sandbox.update_file(virtual_path, content)
|
||||
|
||||
@@ -105,12 +143,12 @@ async def upload_files(
|
||||
logger.info(f"Saved file: {safe_filename} ({len(content)} bytes) to {file_info['path']}")
|
||||
|
||||
file_ext = file_path.suffix.lower()
|
||||
if file_ext in CONVERTIBLE_EXTENSIONS:
|
||||
if auto_convert_documents and file_ext in CONVERTIBLE_EXTENSIONS:
|
||||
md_path = await convert_file_to_markdown(file_path)
|
||||
if md_path:
|
||||
md_virtual_path = upload_virtual_path(md_path.name)
|
||||
|
||||
if sandbox_id != "local":
|
||||
if sync_to_sandbox and sandbox is not None:
|
||||
_make_file_sandbox_writable(md_path)
|
||||
sandbox.update_file(md_virtual_path, md_path.read_bytes())
|
||||
|
||||
@@ -133,7 +171,8 @@ async def upload_files(
|
||||
|
||||
|
||||
@router.get("/list", response_model=dict)
|
||||
async def list_uploaded_files(thread_id: str) -> dict:
|
||||
@require_permission("threads", "read", owner_check=True)
|
||||
async def list_uploaded_files(thread_id: str, request: Request) -> dict:
|
||||
"""List all files in a thread's uploads directory."""
|
||||
try:
|
||||
uploads_dir = get_uploads_dir(thread_id)
|
||||
@@ -143,7 +182,7 @@ async def list_uploaded_files(thread_id: str) -> dict:
|
||||
enrich_file_listing(result, thread_id)
|
||||
|
||||
# Gateway additionally includes the sandbox-relative path.
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id)
|
||||
sandbox_uploads = get_paths().sandbox_uploads_dir(thread_id, user_id=get_effective_user_id())
|
||||
for f in result["files"]:
|
||||
f["path"] = str(sandbox_uploads / f["filename"])
|
||||
|
||||
@@ -151,7 +190,8 @@ async def list_uploaded_files(thread_id: str) -> dict:
|
||||
|
||||
|
||||
@router.delete("/{filename}")
|
||||
async def delete_uploaded_file(thread_id: str, filename: str) -> dict:
|
||||
@require_permission("threads", "delete", owner_check=True, require_existing=True)
|
||||
async def delete_uploaded_file(thread_id: str, filename: str, request: Request) -> dict:
|
||||
"""Delete a file from a thread's uploads directory."""
|
||||
try:
|
||||
uploads_dir = get_uploads_dir(thread_id)
|
||||
|
||||
+141
-92
@@ -8,15 +8,19 @@ frames, and consuming stream bridge events. Router modules
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import dataclasses
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from app.gateway.deps import get_checkpointer, get_run_manager, get_store, get_stream_bridge
|
||||
from app.gateway.deps import get_run_context, get_run_manager, get_run_store, get_stream_bridge
|
||||
from app.gateway.utils import sanitize_log_param
|
||||
from deerflow.runtime import (
|
||||
END_SENTINEL,
|
||||
HEARTBEAT_SENTINEL,
|
||||
@@ -93,25 +97,89 @@ def normalize_input(raw_input: dict[str, Any] | None) -> dict[str, Any]:
|
||||
return raw_input
|
||||
|
||||
|
||||
_DEFAULT_ASSISTANT_ID = "lead_agent"
|
||||
|
||||
|
||||
def resolve_agent_factory(assistant_id: str | None):
|
||||
"""Resolve the agent factory callable from config."""
|
||||
"""Resolve the agent factory callable from config.
|
||||
|
||||
Custom agents are implemented as ``lead_agent`` + an ``agent_name``
|
||||
injected into ``configurable`` or ``context`` — see
|
||||
:func:`build_run_config`. All ``assistant_id`` values therefore map to the
|
||||
same factory; the routing happens inside ``make_lead_agent`` when it reads
|
||||
``cfg["agent_name"]``.
|
||||
"""
|
||||
from deerflow.agents.lead_agent.agent import make_lead_agent
|
||||
|
||||
if assistant_id and assistant_id != "lead_agent":
|
||||
logger.info("assistant_id=%s requested; falling back to lead_agent", assistant_id)
|
||||
return make_lead_agent
|
||||
|
||||
|
||||
def build_run_config(thread_id: str, request_config: dict[str, Any] | None, metadata: dict[str, Any] | None) -> dict[str, Any]:
|
||||
"""Build a RunnableConfig dict for the agent."""
|
||||
configurable = {"thread_id": thread_id}
|
||||
if request_config:
|
||||
configurable.update(request_config.get("configurable", {}))
|
||||
config: dict[str, Any] = {"configurable": configurable, "recursion_limit": 100}
|
||||
def build_run_config(
|
||||
thread_id: str,
|
||||
request_config: dict[str, Any] | None,
|
||||
metadata: dict[str, Any] | None,
|
||||
*,
|
||||
assistant_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Build a RunnableConfig dict for the agent.
|
||||
|
||||
When *assistant_id* refers to a custom agent (anything other than
|
||||
``"lead_agent"`` / ``None``), the name is forwarded as ``agent_name`` in
|
||||
whichever runtime options container is active: ``context`` for
|
||||
LangGraph >= 0.6.0 requests, otherwise ``configurable``.
|
||||
``make_lead_agent`` reads this key to load the matching
|
||||
``agents/<name>/SOUL.md`` and per-agent config — without it the agent
|
||||
silently runs as the default lead agent.
|
||||
|
||||
This mirrors the channel manager's ``_resolve_run_params`` logic so that
|
||||
the LangGraph Platform-compatible HTTP API and the IM channel path behave
|
||||
identically.
|
||||
"""
|
||||
config: dict[str, Any] = {"recursion_limit": 100}
|
||||
if request_config:
|
||||
# LangGraph >= 0.6.0 introduced ``context`` as the preferred way to
|
||||
# pass thread-level data and rejects requests that include both
|
||||
# ``configurable`` and ``context``. If the caller already sends
|
||||
# ``context``, honour it and skip our own ``configurable`` dict.
|
||||
if "context" in request_config:
|
||||
if "configurable" in request_config:
|
||||
logger.warning(
|
||||
"build_run_config: client sent both 'context' and 'configurable'; preferring 'context' (LangGraph >= 0.6.0). thread_id=%s, caller_configurable keys=%s",
|
||||
thread_id,
|
||||
list(request_config.get("configurable", {}).keys()),
|
||||
)
|
||||
context_value = request_config["context"]
|
||||
if context_value is None:
|
||||
context = {}
|
||||
elif isinstance(context_value, Mapping):
|
||||
context = dict(context_value)
|
||||
else:
|
||||
raise ValueError("request config 'context' must be a mapping or null.")
|
||||
config["context"] = context
|
||||
else:
|
||||
configurable = {"thread_id": thread_id}
|
||||
configurable.update(request_config.get("configurable", {}))
|
||||
config["configurable"] = configurable
|
||||
for k, v in request_config.items():
|
||||
if k != "configurable":
|
||||
if k not in ("configurable", "context"):
|
||||
config[k] = v
|
||||
else:
|
||||
config["configurable"] = {"thread_id": thread_id}
|
||||
|
||||
# Inject custom agent name when the caller specified a non-default assistant.
|
||||
# Honour an explicit agent_name in the active runtime options container.
|
||||
if assistant_id and assistant_id != _DEFAULT_ASSISTANT_ID:
|
||||
normalized = assistant_id.strip().lower().replace("_", "-")
|
||||
if not normalized or not re.fullmatch(r"[a-z0-9-]+", normalized):
|
||||
raise ValueError(f"Invalid assistant_id {assistant_id!r}: must contain only letters, digits, and hyphens after normalization.")
|
||||
if "configurable" in config:
|
||||
target = config["configurable"]
|
||||
elif "context" in config:
|
||||
target = config["context"]
|
||||
else:
|
||||
target = config.setdefault("configurable", {})
|
||||
if target is not None and "agent_name" not in target:
|
||||
target["agent_name"] = normalized
|
||||
if metadata:
|
||||
config.setdefault("metadata", {}).update(metadata)
|
||||
return config
|
||||
@@ -122,71 +190,6 @@ def build_run_config(thread_id: str, request_config: dict[str, Any] | None, meta
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _upsert_thread_in_store(store, thread_id: str, metadata: dict | None) -> None:
|
||||
"""Create or refresh the thread record in the Store.
|
||||
|
||||
Called from :func:`start_run` so that threads created via the stateless
|
||||
``/runs/stream`` endpoint (which never calls ``POST /threads``) still
|
||||
appear in ``/threads/search`` results.
|
||||
"""
|
||||
# Deferred import to avoid circular import with the threads router module.
|
||||
from app.gateway.routers.threads import _store_upsert
|
||||
|
||||
try:
|
||||
await _store_upsert(store, thread_id, metadata=metadata)
|
||||
except Exception:
|
||||
logger.warning("Failed to upsert thread %s in store (non-fatal)", thread_id)
|
||||
|
||||
|
||||
async def _sync_thread_title_after_run(
|
||||
run_task: asyncio.Task,
|
||||
thread_id: str,
|
||||
checkpointer: Any,
|
||||
store: Any,
|
||||
) -> None:
|
||||
"""Wait for *run_task* to finish, then persist the generated title to the Store.
|
||||
|
||||
TitleMiddleware writes the generated title to the LangGraph agent state
|
||||
(checkpointer) but the Gateway's Store record is not updated automatically.
|
||||
This coroutine closes that gap by reading the final checkpoint after the
|
||||
run completes and syncing ``values.title`` into the Store record so that
|
||||
subsequent ``/threads/search`` responses include the correct title.
|
||||
|
||||
Runs as a fire-and-forget :func:`asyncio.create_task`; failures are
|
||||
logged at DEBUG level and never propagate.
|
||||
"""
|
||||
# Wait for the background run task to complete (any outcome).
|
||||
# asyncio.wait does not propagate task exceptions — it just returns
|
||||
# when the task is done, cancelled, or failed.
|
||||
await asyncio.wait({run_task})
|
||||
|
||||
# Deferred import to avoid circular import with the threads router module.
|
||||
from app.gateway.routers.threads import _store_get, _store_put
|
||||
|
||||
try:
|
||||
ckpt_config = {"configurable": {"thread_id": thread_id, "checkpoint_ns": ""}}
|
||||
ckpt_tuple = await checkpointer.aget_tuple(ckpt_config)
|
||||
if ckpt_tuple is None:
|
||||
return
|
||||
|
||||
channel_values = ckpt_tuple.checkpoint.get("channel_values", {})
|
||||
title = channel_values.get("title")
|
||||
if not title:
|
||||
return
|
||||
|
||||
existing = await _store_get(store, thread_id)
|
||||
if existing is None:
|
||||
return
|
||||
|
||||
updated = dict(existing)
|
||||
updated.setdefault("values", {})["title"] = title
|
||||
updated["updated_at"] = time.time()
|
||||
await _store_put(store, updated)
|
||||
logger.debug("Synced title %r for thread %s", title, thread_id)
|
||||
except Exception:
|
||||
logger.debug("Failed to sync title for thread %s (non-fatal)", thread_id, exc_info=True)
|
||||
|
||||
|
||||
async def start_run(
|
||||
body: Any,
|
||||
thread_id: str,
|
||||
@@ -206,11 +209,25 @@ async def start_run(
|
||||
"""
|
||||
bridge = get_stream_bridge(request)
|
||||
run_mgr = get_run_manager(request)
|
||||
checkpointer = get_checkpointer(request)
|
||||
store = get_store(request)
|
||||
run_ctx = get_run_context(request)
|
||||
|
||||
disconnect = DisconnectMode.cancel if body.on_disconnect == "cancel" else DisconnectMode.continue_
|
||||
|
||||
# Resolve follow_up_to_run_id: explicit from request, or auto-detect from latest successful run
|
||||
follow_up_to_run_id = getattr(body, "follow_up_to_run_id", None)
|
||||
if follow_up_to_run_id is None:
|
||||
run_store = get_run_store(request)
|
||||
try:
|
||||
recent_runs = await run_store.list_by_thread(thread_id, limit=1)
|
||||
if recent_runs and recent_runs[0].get("status") == "success":
|
||||
follow_up_to_run_id = recent_runs[0]["run_id"]
|
||||
except Exception:
|
||||
pass # Don't block run creation
|
||||
|
||||
# Enrich base context with per-run field
|
||||
if follow_up_to_run_id:
|
||||
run_ctx = dataclasses.replace(run_ctx, follow_up_to_run_id=follow_up_to_run_id)
|
||||
|
||||
try:
|
||||
record = await run_mgr.create_or_reject(
|
||||
thread_id,
|
||||
@@ -219,21 +236,55 @@ async def start_run(
|
||||
metadata=body.metadata or {},
|
||||
kwargs={"input": body.input, "config": body.config},
|
||||
multitask_strategy=body.multitask_strategy,
|
||||
follow_up_to_run_id=follow_up_to_run_id,
|
||||
)
|
||||
except ConflictError as exc:
|
||||
raise HTTPException(status_code=409, detail=str(exc)) from exc
|
||||
except UnsupportedStrategyError as exc:
|
||||
raise HTTPException(status_code=501, detail=str(exc)) from exc
|
||||
|
||||
# Ensure the thread is visible in /threads/search, even for threads that
|
||||
# were never explicitly created via POST /threads (e.g. stateless runs).
|
||||
store = get_store(request)
|
||||
if store is not None:
|
||||
await _upsert_thread_in_store(store, thread_id, body.metadata)
|
||||
# Upsert thread metadata so the thread appears in /threads/search,
|
||||
# even for threads that were never explicitly created via POST /threads
|
||||
# (e.g. stateless runs).
|
||||
try:
|
||||
existing = await run_ctx.thread_store.get(thread_id)
|
||||
if existing is None:
|
||||
await run_ctx.thread_store.create(
|
||||
thread_id,
|
||||
assistant_id=body.assistant_id,
|
||||
metadata=body.metadata,
|
||||
)
|
||||
else:
|
||||
await run_ctx.thread_store.update_status(thread_id, "running")
|
||||
except Exception:
|
||||
logger.warning("Failed to upsert thread_meta for %s (non-fatal)", sanitize_log_param(thread_id))
|
||||
|
||||
agent_factory = resolve_agent_factory(body.assistant_id)
|
||||
graph_input = normalize_input(body.input)
|
||||
config = build_run_config(thread_id, body.config, body.metadata)
|
||||
config = build_run_config(thread_id, body.config, body.metadata, assistant_id=body.assistant_id)
|
||||
|
||||
# Merge DeerFlow-specific context overrides into configurable.
|
||||
# The ``context`` field is a custom extension for the langgraph-compat layer
|
||||
# that carries agent configuration (model_name, thinking_enabled, etc.).
|
||||
# Only agent-relevant keys are forwarded; unknown keys (e.g. thread_id) are ignored.
|
||||
context = getattr(body, "context", None)
|
||||
if context:
|
||||
_CONTEXT_CONFIGURABLE_KEYS = {
|
||||
"model_name",
|
||||
"mode",
|
||||
"thinking_enabled",
|
||||
"reasoning_effort",
|
||||
"is_plan_mode",
|
||||
"subagent_enabled",
|
||||
"max_concurrent_subagents",
|
||||
"agent_name",
|
||||
"is_bootstrap",
|
||||
}
|
||||
configurable = config.setdefault("configurable", {})
|
||||
for key in _CONTEXT_CONFIGURABLE_KEYS:
|
||||
if key in context:
|
||||
configurable.setdefault(key, context[key])
|
||||
|
||||
stream_modes = normalize_stream_modes(body.stream_mode)
|
||||
|
||||
task = asyncio.create_task(
|
||||
@@ -241,8 +292,7 @@ async def start_run(
|
||||
bridge,
|
||||
run_mgr,
|
||||
record,
|
||||
checkpointer=checkpointer,
|
||||
store=store,
|
||||
ctx=run_ctx,
|
||||
agent_factory=agent_factory,
|
||||
graph_input=graph_input,
|
||||
config=config,
|
||||
@@ -254,11 +304,9 @@ async def start_run(
|
||||
)
|
||||
record.task = task
|
||||
|
||||
# After the run completes, sync the title generated by TitleMiddleware from
|
||||
# the checkpointer into the Store record so that /threads/search returns the
|
||||
# correct title instead of an empty values dict.
|
||||
if store is not None:
|
||||
asyncio.create_task(_sync_thread_title_after_run(task, thread_id, checkpointer, store))
|
||||
# Title sync is handled by worker.py's finally block which reads the
|
||||
# title from the checkpoint and calls thread_store.update_display_name
|
||||
# after the run completes.
|
||||
|
||||
return record
|
||||
|
||||
@@ -275,8 +323,9 @@ async def sse_consumer(
|
||||
- ``cancel``: abort the background task on client disconnect.
|
||||
- ``continue``: let the task run; events are discarded.
|
||||
"""
|
||||
last_event_id = request.headers.get("Last-Event-ID")
|
||||
try:
|
||||
async for entry in bridge.subscribe(record.run_id):
|
||||
async for entry in bridge.subscribe(record.run_id, last_event_id=last_event_id):
|
||||
if await request.is_disconnected():
|
||||
break
|
||||
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
"""Shared utility helpers for the Gateway layer."""
|
||||
|
||||
|
||||
def sanitize_log_param(value: str) -> str:
|
||||
"""Strip control characters to prevent log injection."""
|
||||
return value.replace("\n", "").replace("\r", "").replace("\x00", "")
|
||||
+78
-13
@@ -19,24 +19,78 @@ import asyncio
|
||||
import logging
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from deerflow.agents import make_lead_agent
|
||||
try:
|
||||
from prompt_toolkit import PromptSession
|
||||
from prompt_toolkit.history import InMemoryHistory
|
||||
|
||||
_HAS_PROMPT_TOOLKIT = True
|
||||
except ImportError:
|
||||
_HAS_PROMPT_TOOLKIT = False
|
||||
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
_LOG_FMT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
_LOG_DATEFMT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
|
||||
def _logging_level_from_config(name: str) -> int:
|
||||
"""Map ``config.yaml`` ``log_level`` string to a ``logging`` level constant."""
|
||||
mapping = logging.getLevelNamesMapping()
|
||||
return mapping.get((name or "info").strip().upper(), logging.INFO)
|
||||
|
||||
|
||||
def _setup_logging(log_level: str) -> None:
|
||||
"""Send application logs to ``debug.log`` at *log_level*; do not print them on the console.
|
||||
|
||||
Idempotent: any pre-existing handlers on the root logger (e.g. installed by
|
||||
``logging.basicConfig`` in transitively imported modules) are removed so the
|
||||
debug session output only lands in ``debug.log``.
|
||||
"""
|
||||
level = _logging_level_from_config(log_level)
|
||||
root = logging.root
|
||||
for h in list(root.handlers):
|
||||
root.removeHandler(h)
|
||||
h.close()
|
||||
root.setLevel(level)
|
||||
|
||||
file_handler = logging.FileHandler("debug.log", mode="a", encoding="utf-8")
|
||||
file_handler.setLevel(level)
|
||||
file_handler.setFormatter(logging.Formatter(_LOG_FMT, datefmt=_LOG_DATEFMT))
|
||||
root.addHandler(file_handler)
|
||||
|
||||
|
||||
def _update_logging_level(log_level: str) -> None:
|
||||
"""Update the root logger and existing handlers to *log_level*."""
|
||||
level = _logging_level_from_config(log_level)
|
||||
root = logging.root
|
||||
root.setLevel(level)
|
||||
for handler in root.handlers:
|
||||
handler.setLevel(level)
|
||||
|
||||
|
||||
async def main():
|
||||
# Install file logging first so warnings emitted while loading config do not
|
||||
# leak onto the interactive terminal via Python's lastResort handler.
|
||||
_setup_logging("info")
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
app_config = get_app_config()
|
||||
_update_logging_level(app_config.log_level)
|
||||
|
||||
# Delay the rest of the deerflow imports until *after* logging is installed
|
||||
# so that any import-time side effects (e.g. deerflow.agents starts a
|
||||
# background skill-loader thread on import) emit logs to debug.log instead
|
||||
# of leaking onto the interactive terminal via Python's lastResort handler.
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.agents import make_lead_agent
|
||||
from deerflow.mcp import initialize_mcp_tools
|
||||
|
||||
# Initialize MCP tools at startup
|
||||
try:
|
||||
from deerflow.mcp import initialize_mcp_tools
|
||||
|
||||
await initialize_mcp_tools()
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to initialize MCP tools: {e}")
|
||||
@@ -52,16 +106,27 @@ async def main():
|
||||
}
|
||||
}
|
||||
|
||||
runtime = Runtime(context={"thread_id": config["configurable"]["thread_id"]})
|
||||
config["configurable"]["__pregel_runtime"] = runtime
|
||||
|
||||
agent = make_lead_agent(config)
|
||||
|
||||
session = PromptSession(history=InMemoryHistory()) if _HAS_PROMPT_TOOLKIT else None
|
||||
|
||||
print("=" * 50)
|
||||
print("Lead Agent Debug Mode")
|
||||
print("Type 'quit' or 'exit' to stop")
|
||||
print(f"Logs: debug.log (log_level={app_config.log_level})")
|
||||
if not _HAS_PROMPT_TOOLKIT:
|
||||
print("Tip: `uv sync --group dev` to enable arrow-key & history support")
|
||||
print("=" * 50)
|
||||
|
||||
while True:
|
||||
try:
|
||||
user_input = input("\nYou: ").strip()
|
||||
if session:
|
||||
user_input = (await session.prompt_async("\nYou: ")).strip()
|
||||
else:
|
||||
user_input = input("\nYou: ").strip()
|
||||
if not user_input:
|
||||
continue
|
||||
if user_input.lower() in ("quit", "exit"):
|
||||
@@ -70,15 +135,15 @@ async def main():
|
||||
|
||||
# Invoke the agent
|
||||
state = {"messages": [HumanMessage(content=user_input)]}
|
||||
result = await agent.ainvoke(state, config=config, context={"thread_id": "debug-thread-001"})
|
||||
result = await agent.ainvoke(state, config=config)
|
||||
|
||||
# Print the response
|
||||
if result.get("messages"):
|
||||
last_message = result["messages"][-1]
|
||||
print(f"\nAgent: {last_message.content}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nInterrupted. Goodbye!")
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
print("\nGoodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"\nError: {e}")
|
||||
|
||||
+25
-1
@@ -86,6 +86,7 @@ Content-Type: application/json
|
||||
]
|
||||
},
|
||||
"config": {
|
||||
"recursion_limit": 100,
|
||||
"configurable": {
|
||||
"model_name": "gpt-4",
|
||||
"thinking_enabled": false,
|
||||
@@ -100,6 +101,21 @@ Content-Type: application/json
|
||||
- Use: `values`, `messages-tuple`, `custom`, `updates`, `events`, `debug`, `tasks`, `checkpoints`
|
||||
- Do not use: `tools` (deprecated/invalid in current `langgraph-api` and will trigger schema validation errors)
|
||||
|
||||
**Recursion Limit:**
|
||||
|
||||
`config.recursion_limit` caps the number of graph steps LangGraph will execute
|
||||
in a single run. The `/api/langgraph/*` endpoints go straight to the LangGraph
|
||||
server and therefore inherit LangGraph's native default of **25**, which is
|
||||
too low for plan-mode or subagent-heavy runs — the agent typically errors out
|
||||
with `GraphRecursionError` after the first round of subagent results comes
|
||||
back, before the lead agent can synthesize the final answer.
|
||||
|
||||
DeerFlow's own Gateway and IM-channel paths mitigate this by defaulting to
|
||||
`100` in `build_run_config` (see `backend/app/gateway/services.py`), but
|
||||
clients calling the LangGraph API directly must set `recursion_limit`
|
||||
explicitly in the request body. `100` matches the Gateway default and is a
|
||||
safe starting point; increase it if you run deeply nested subagent graphs.
|
||||
|
||||
**Configurable Options:**
|
||||
- `model_name` (string): Override the default model
|
||||
- `thinking_enabled` (boolean): Enable extended thinking for supported models
|
||||
@@ -626,6 +642,14 @@ curl -X POST http://localhost:2026/api/langgraph/threads/abc123/runs \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"input": {"messages": [{"role": "user", "content": "Hello"}]},
|
||||
"config": {"configurable": {"model_name": "gpt-4"}}
|
||||
"config": {
|
||||
"recursion_limit": 100,
|
||||
"configurable": {"model_name": "gpt-4"}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
> The `/api/langgraph/*` endpoints bypass DeerFlow's Gateway and inherit
|
||||
> LangGraph's native `recursion_limit` default of 25, which is too low for
|
||||
> plan-mode or subagent runs. Set `config.recursion_limit` explicitly — see
|
||||
> the [Create Run](#create-run) section for details.
|
||||
|
||||
@@ -199,7 +199,7 @@ class ThreadState(AgentState):
|
||||
│ Built-in Tools │ │ Configured Tools │ │ MCP Tools │
|
||||
│ (packages/harness/deerflow/tools/) │ │ (config.yaml) │ │ (extensions.json) │
|
||||
├─────────────────────┤ ├─────────────────────┤ ├─────────────────────┤
|
||||
│ - present_file │ │ - web_search │ │ - github │
|
||||
│ - present_files │ │ - web_search │ │ - github │
|
||||
│ - ask_clarification │ │ - web_fetch │ │ - filesystem │
|
||||
│ - view_image │ │ - bash │ │ - postgres │
|
||||
│ │ │ - read_file │ │ - brave-search │
|
||||
|
||||
@@ -0,0 +1,77 @@
|
||||
# Docker Test Gap (Section 七 7.4)
|
||||
|
||||
This file documents the only **un-executed** test cases from
|
||||
`backend/docs/AUTH_TEST_PLAN.md` after the full release validation pass.
|
||||
|
||||
## Why this gap exists
|
||||
|
||||
The release validation environment (sg_dev: `10.251.229.92`) **does not have
|
||||
a Docker daemon installed**. The TC-DOCKER cases are container-runtime
|
||||
behavior tests that need an actual Docker engine to spin up
|
||||
`docker/docker-compose.yaml` services.
|
||||
|
||||
```bash
|
||||
$ ssh sg_dev "which docker; docker --version"
|
||||
# (empty)
|
||||
# bash: docker: command not found
|
||||
```
|
||||
|
||||
All other test plan sections were executed against either:
|
||||
- The local dev box (Mac, all services running locally), or
|
||||
- The deployed sg_dev instance (gateway + frontend + nginx via SSH tunnel)
|
||||
|
||||
## Cases not executed
|
||||
|
||||
| Case | Title | What it covers | Why not run |
|
||||
|---|---|---|---|
|
||||
| TC-DOCKER-01 | `users.db` volume persistence | Verify the `DEER_FLOW_HOME` bind mount survives container restart | needs `docker compose up` |
|
||||
| TC-DOCKER-02 | Session persistence across container restart | `AUTH_JWT_SECRET` env var keeps cookies valid after `docker compose down && up` | needs `docker compose down/up` |
|
||||
| TC-DOCKER-03 | Per-worker rate limiter divergence | Confirms in-process `_login_attempts` dict doesn't share state across `gunicorn` workers (4 by default in the compose file); known limitation, documented | needs multi-worker container |
|
||||
| TC-DOCKER-04 | IM channels skip AuthMiddleware | Verify Feishu/Slack/Telegram dispatchers run in-container against `http://langgraph:2024` without going through nginx | needs `docker logs` |
|
||||
| TC-DOCKER-05 | Admin credentials surfacing | **Updated post-simplify** — was "log scrape", now "0600 credential file in `DEER_FLOW_HOME`". The file-based behavior is already validated by TC-1.1 + TC-UPG-13 on sg_dev (non-Docker), so the only Docker-specific gap is verifying the volume mount carries the file out to the host | needs container + host volume |
|
||||
| TC-DOCKER-06 | Gateway-mode Docker deploy | `./scripts/deploy.sh --gateway` produces a 3-container topology (no `langgraph` container); same auth flow as standard mode | needs `docker compose --profile gateway` |
|
||||
|
||||
## Coverage already provided by non-Docker tests
|
||||
|
||||
The **auth-relevant** behavior in each Docker case is already exercised by
|
||||
the test cases that ran on sg_dev or local:
|
||||
|
||||
| Docker case | Auth behavior covered by |
|
||||
|---|---|
|
||||
| TC-DOCKER-01 (volume persistence) | TC-REENT-01 on sg_dev (admin row survives gateway restart) — same SQLite file, just no container layer between |
|
||||
| TC-DOCKER-02 (session persistence) | TC-API-02/03/06 (cookie roundtrip), plus TC-REENT-04 (multi-cookie) — JWT verification is process-state-free, container restart is equivalent to `pkill uvicorn && uv run uvicorn` |
|
||||
| TC-DOCKER-03 (per-worker rate limit) | TC-GW-04 + TC-REENT-09 (single-worker rate limit + 5min expiry). The cross-worker divergence is an architectural property of the in-memory dict; no auth code path differs |
|
||||
| TC-DOCKER-04 (IM channels skip auth) | Code-level only: `app/channels/manager.py` uses `langgraph_sdk` directly with no cookie handling. The langgraph_auth handler is bypassed by going through SDK, not HTTP |
|
||||
| TC-DOCKER-05 (credential surfacing) | TC-1.1 on sg_dev (file at `~/deer-flow/backend/.deer-flow/admin_initial_credentials.txt`, mode 0600, password 22 chars) — the only Docker-unique step is whether the bind mount projects this path onto the host, which is a `docker compose` config check, not a runtime behavior change |
|
||||
| TC-DOCKER-06 (gateway-mode container) | Section 七 7.2 covered by TC-GW-01..05 + Section 二 (gateway-mode auth flow on sg_dev) — same Gateway code, container is just a packaging change |
|
||||
|
||||
## Reproduction steps when Docker becomes available
|
||||
|
||||
Anyone with `docker` + `docker compose` installed can reproduce the gap by
|
||||
running the test plan section verbatim. Pre-flight:
|
||||
|
||||
```bash
|
||||
# Required on the host
|
||||
docker --version # >=24.x
|
||||
docker compose version # plugin >=2.x
|
||||
|
||||
# Required env var (otherwise sessions reset on every container restart)
|
||||
echo "AUTH_JWT_SECRET=$(python3 -c 'import secrets; print(secrets.token_urlsafe(32))')" \
|
||||
>> .env
|
||||
|
||||
# Optional: pin DEER_FLOW_HOME to a stable host path
|
||||
echo "DEER_FLOW_HOME=$HOME/deer-flow-data" >> .env
|
||||
```
|
||||
|
||||
Then run TC-DOCKER-01..06 from the test plan as written.
|
||||
|
||||
## Decision log
|
||||
|
||||
- **Not blocking the release.** The auth-relevant behavior in every Docker
|
||||
case has an already-validated equivalent on bare metal. The gap is purely
|
||||
about *container packaging* details (bind mounts, multi-worker, log
|
||||
collection), not about whether the auth code paths work.
|
||||
- **TC-DOCKER-05 was updated in place** in `AUTH_TEST_PLAN.md` to reflect
|
||||
the post-simplify reality (credentials file → 0600 file, no log leak).
|
||||
The old "grep 'Password:' in docker logs" expectation would have failed
|
||||
silently and given a false sense of coverage.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,129 @@
|
||||
# Authentication Upgrade Guide
|
||||
|
||||
DeerFlow 内置了认证模块。本文档面向从无认证版本升级的用户。
|
||||
|
||||
## 核心概念
|
||||
|
||||
认证模块采用**始终强制**策略:
|
||||
|
||||
- 首次启动时自动创建 admin 账号,随机密码打印到控制台日志
|
||||
- 认证从一开始就是强制的,无竞争窗口
|
||||
- 历史对话(升级前创建的 thread)自动迁移到 admin 名下
|
||||
|
||||
## 升级步骤
|
||||
|
||||
### 1. 更新代码
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
cd backend && make install
|
||||
```
|
||||
|
||||
### 2. 首次启动
|
||||
|
||||
```bash
|
||||
make dev
|
||||
```
|
||||
|
||||
控制台会输出:
|
||||
|
||||
```
|
||||
============================================================
|
||||
Admin account created on first boot
|
||||
Email: admin@deerflow.dev
|
||||
Password: aB3xK9mN_pQ7rT2w
|
||||
Change it after login: Settings → Account
|
||||
============================================================
|
||||
```
|
||||
|
||||
如果未登录就重启了服务,不用担心——只要 setup 未完成,每次启动都会重置密码并重新打印到控制台。
|
||||
|
||||
### 3. 登录
|
||||
|
||||
访问 `http://localhost:2026/login`,使用控制台输出的邮箱和密码登录。
|
||||
|
||||
### 4. 修改密码
|
||||
|
||||
登录后进入 Settings → Account → Change Password。
|
||||
|
||||
### 5. 添加用户(可选)
|
||||
|
||||
其他用户通过 `/login` 页面注册,自动获得 **user** 角色。每个用户只能看到自己的对话。
|
||||
|
||||
## 安全机制
|
||||
|
||||
| 机制 | 说明 |
|
||||
|------|------|
|
||||
| JWT HttpOnly Cookie | Token 不暴露给 JavaScript,防止 XSS 窃取 |
|
||||
| CSRF Double Submit Cookie | 所有 POST/PUT/DELETE 请求需携带 `X-CSRF-Token` |
|
||||
| bcrypt 密码哈希 | 密码不以明文存储 |
|
||||
| 多租户隔离 | 用户只能访问自己的 thread |
|
||||
| HTTPS 自适应 | 检测 `x-forwarded-proto`,自动设置 `Secure` cookie 标志 |
|
||||
|
||||
## 常见操作
|
||||
|
||||
### 忘记密码
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
# 重置 admin 密码
|
||||
python -m app.gateway.auth.reset_admin
|
||||
|
||||
# 重置指定用户密码
|
||||
python -m app.gateway.auth.reset_admin --email user@example.com
|
||||
```
|
||||
|
||||
会输出新的随机密码。
|
||||
|
||||
### 完全重置
|
||||
|
||||
删除用户数据库,重启后自动创建新 admin:
|
||||
|
||||
```bash
|
||||
rm -f backend/.deer-flow/users.db
|
||||
# 重启服务,控制台输出新密码
|
||||
```
|
||||
|
||||
## 数据存储
|
||||
|
||||
| 文件 | 内容 |
|
||||
|------|------|
|
||||
| `.deer-flow/users.db` | SQLite 用户数据库(密码哈希、角色) |
|
||||
| `.env` 中的 `AUTH_JWT_SECRET` | JWT 签名密钥(未设置时自动生成临时密钥,重启后 session 失效) |
|
||||
|
||||
### 生产环境建议
|
||||
|
||||
```bash
|
||||
# 生成持久化 JWT 密钥,避免重启后所有用户需重新登录
|
||||
python -c "import secrets; print(secrets.token_urlsafe(32))"
|
||||
# 将输出添加到 .env:
|
||||
# AUTH_JWT_SECRET=<生成的密钥>
|
||||
```
|
||||
|
||||
## API 端点
|
||||
|
||||
| 端点 | 方法 | 说明 |
|
||||
|------|------|------|
|
||||
| `/api/v1/auth/login/local` | POST | 邮箱密码登录(OAuth2 form) |
|
||||
| `/api/v1/auth/register` | POST | 注册新用户(user 角色) |
|
||||
| `/api/v1/auth/logout` | POST | 登出(清除 cookie) |
|
||||
| `/api/v1/auth/me` | GET | 获取当前用户信息 |
|
||||
| `/api/v1/auth/change-password` | POST | 修改密码 |
|
||||
| `/api/v1/auth/setup-status` | GET | 检查 admin 是否存在 |
|
||||
|
||||
## 兼容性
|
||||
|
||||
- **标准模式**(`make dev`):完全兼容,admin 自动创建
|
||||
- **Gateway 模式**(`make dev-pro`):完全兼容
|
||||
- **Docker 部署**:完全兼容,`.deer-flow/users.db` 需持久化卷挂载
|
||||
- **IM 渠道**(Feishu/Slack/Telegram):通过 LangGraph SDK 通信,不经过认证层
|
||||
- **DeerFlowClient**(嵌入式):不经过 HTTP,不受认证影响
|
||||
|
||||
## 故障排查
|
||||
|
||||
| 症状 | 原因 | 解决 |
|
||||
|------|------|------|
|
||||
| 启动后没看到密码 | admin 已存在(非首次启动) | 用 `reset_admin` 重置,或删 `users.db` |
|
||||
| 登录后 POST 返回 403 | CSRF token 缺失 | 确认前端已更新 |
|
||||
| 重启后需要重新登录 | `AUTH_JWT_SECRET` 未持久化 | 在 `.env` 中设置固定密钥 |
|
||||
@@ -248,7 +248,7 @@ def after_agent(self, state: TitleMiddlewareState, runtime: Runtime) -> dict | N
|
||||
- [`packages/harness/deerflow/agents/thread_state.py`](../packages/harness/deerflow/agents/thread_state.py) - ThreadState 定义
|
||||
- [`packages/harness/deerflow/agents/middlewares/title_middleware.py`](../packages/harness/deerflow/agents/middlewares/title_middleware.py) - TitleMiddleware 实现
|
||||
- [`packages/harness/deerflow/config/title_config.py`](../packages/harness/deerflow/config/title_config.py) - 配置管理
|
||||
- [`config.yaml`](../config.yaml) - 配置文件
|
||||
- [`config.yaml`](../../config.example.yaml) - 配置文件
|
||||
- [`packages/harness/deerflow/agents/lead_agent/agent.py`](../packages/harness/deerflow/agents/lead_agent/agent.py) - Middleware 注册
|
||||
|
||||
## 参考资料
|
||||
|
||||
@@ -192,8 +192,8 @@ tools:
|
||||
```
|
||||
|
||||
**Built-in Tools**:
|
||||
- `web_search` - Search the web (Tavily)
|
||||
- `web_fetch` - Fetch web pages (Jina AI)
|
||||
- `web_search` - Search the web (DuckDuckGo, Tavily, Exa, InfoQuest, Firecrawl)
|
||||
- `web_fetch` - Fetch web pages (Jina AI, Exa, InfoQuest, Firecrawl)
|
||||
- `ls` - List directory contents
|
||||
- `read_file` - Read file contents
|
||||
- `write_file` - Write file contents
|
||||
@@ -257,6 +257,8 @@ sandbox:
|
||||
read_only: false
|
||||
```
|
||||
|
||||
When you configure `sandbox.mounts`, DeerFlow exposes those `container_path` values in the agent prompt so the agent can discover and operate on mounted directories directly instead of assuming everything must live under `/mnt/user-data`.
|
||||
|
||||
### Skills
|
||||
|
||||
Configure the skills directory for specialized workflows:
|
||||
@@ -276,6 +278,12 @@ skills:
|
||||
- Skills are automatically discovered and loaded
|
||||
- Available in both local and Docker sandbox via path mapping
|
||||
|
||||
**Per-Agent Skill Filtering**:
|
||||
Custom agents can restrict which skills they load by defining a `skills` field in their `config.yaml` (located at `workspace/agents/<agent_name>/config.yaml`):
|
||||
- **Omitted or `null`**: Loads all globally enabled skills (default fallback).
|
||||
- **`[]` (empty list)**: Disables all skills for this specific agent.
|
||||
- **`["skill-name"]`**: Loads only the explicitly specified skills.
|
||||
|
||||
### Title Generation
|
||||
|
||||
Automatic conversation title generation:
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
## 概述
|
||||
|
||||
DeerFlow 后端提供了完整的文件上传功能,支持多文件上传,并自动将 Office 文档和 PDF 转换为 Markdown 格式。
|
||||
DeerFlow 后端提供了完整的文件上传功能,支持多文件上传,并可选地将 Office 文档和 PDF 转换为 Markdown 格式。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- ✅ 支持多文件同时上传
|
||||
- ✅ 自动转换文档为 Markdown(PDF、PPT、Excel、Word)
|
||||
- ✅ 可选地转换文档为 Markdown(PDF、PPT、Excel、Word)
|
||||
- ✅ 文件存储在线程隔离的目录中
|
||||
- ✅ Agent 自动感知已上传的文件
|
||||
- ✅ 支持文件列表查询和删除
|
||||
@@ -86,7 +86,7 @@ DELETE /api/threads/{thread_id}/uploads/{filename}
|
||||
|
||||
## 支持的文档格式
|
||||
|
||||
以下格式会自动转换为 Markdown:
|
||||
以下格式在显式启用 `uploads.auto_convert_documents: true` 时会自动转换为 Markdown:
|
||||
- PDF (`.pdf`)
|
||||
- PowerPoint (`.ppt`, `.pptx`)
|
||||
- Excel (`.xls`, `.xlsx`)
|
||||
@@ -94,6 +94,8 @@ DELETE /api/threads/{thread_id}/uploads/{filename}
|
||||
|
||||
转换后的 Markdown 文件会保存在同一目录下,文件名为原文件名 + `.md` 扩展名。
|
||||
|
||||
默认情况下,自动转换是关闭的,以避免在网关主机上对不受信任的 Office/PDF 上传执行解析。只有在受信任部署中明确接受此风险时,才应将 `uploads.auto_convert_documents` 设置为 `true`。
|
||||
|
||||
## Agent 集成
|
||||
|
||||
### 自动文件列举
|
||||
@@ -207,6 +209,7 @@ backend/.deer-flow/threads/
|
||||
- 最大文件大小:100MB(可在 nginx.conf 中配置 `client_max_body_size`)
|
||||
- 文件名安全性:系统会自动验证文件路径,防止目录遍历攻击
|
||||
- 线程隔离:每个线程的上传文件相互隔离,无法跨线程访问
|
||||
- 自动文档转换默认关闭;如需启用,需在 `config.yaml` 中显式设置 `uploads.auto_convert_documents: true`
|
||||
|
||||
## 技术实现
|
||||
|
||||
|
||||
@@ -296,7 +296,7 @@ These are the tool names your provider will see in `request.tool_name`:
|
||||
| `web_search` | Web search query |
|
||||
| `web_fetch` | Fetch URL content |
|
||||
| `image_search` | Image search |
|
||||
| `present_file` | Present file to user |
|
||||
| `present_files` | Present file to user |
|
||||
| `view_image` | Display image |
|
||||
| `ask_clarification` | Ask user a question |
|
||||
| `task` | Delegate to subagent |
|
||||
|
||||
@@ -45,6 +45,41 @@ Example:
|
||||
}
|
||||
```
|
||||
|
||||
## Custom Tool Interceptors
|
||||
|
||||
You can register custom interceptors that run before every MCP tool call. This is useful for injecting per-request headers (e.g., user auth tokens from the LangGraph execution context), logging, or metrics.
|
||||
|
||||
Declare interceptors in `extensions_config.json` using the `mcpInterceptors` field:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpInterceptors": [
|
||||
"my_package.mcp.auth:build_auth_interceptor"
|
||||
],
|
||||
"mcpServers": { ... }
|
||||
}
|
||||
```
|
||||
|
||||
Each entry is a Python import path in `module:variable` format (resolved via `resolve_variable`). The variable must be a **no-arg builder function** that returns an async interceptor compatible with `MultiServerMCPClient`’s `tool_interceptors` interface, or `None` to skip.
|
||||
|
||||
Example interceptor that injects auth headers from LangGraph metadata:
|
||||
|
||||
```python
|
||||
def build_auth_interceptor():
|
||||
async def interceptor(request, handler):
|
||||
from langgraph.config import get_config
|
||||
metadata = get_config().get("metadata", {})
|
||||
headers = dict(request.headers or {})
|
||||
if token := metadata.get("auth_token"):
|
||||
headers["X-Auth-Token"] = token
|
||||
return await handler(request.override(headers=headers))
|
||||
return interceptor
|
||||
```
|
||||
|
||||
- A single string value is accepted and normalized to a one-element list.
|
||||
- Invalid paths or builder failures are logged as warnings without blocking other interceptors.
|
||||
- The builder return value must be `callable`; non-callable values are skipped with a warning.
|
||||
|
||||
## How It Works
|
||||
|
||||
MCP servers expose tools that are automatically discovered and integrated into DeerFlow’s agent system at runtime. Once enabled, these tools become available to agents without additional code changes.
|
||||
|
||||
@@ -15,6 +15,7 @@ This directory contains detailed documentation for the DeerFlow backend.
|
||||
|
||||
| Document | Description |
|
||||
|----------|-------------|
|
||||
| [STREAMING.md](STREAMING.md) | Token-level streaming design: Gateway vs DeerFlowClient paths, `stream_mode` semantics, per-id dedup |
|
||||
| [FILE_UPLOAD.md](FILE_UPLOAD.md) | File upload functionality |
|
||||
| [PATH_EXAMPLES.md](PATH_EXAMPLES.md) | Path types and usage examples |
|
||||
| [summarization.md](summarization.md) | Context summarization feature |
|
||||
@@ -47,6 +48,7 @@ docs/
|
||||
├── PATH_EXAMPLES.md # Path usage examples
|
||||
├── summarization.md # Summarization feature
|
||||
├── plan_mode_usage.md # Plan mode feature
|
||||
├── STREAMING.md # Token-level streaming design
|
||||
├── AUTO_TITLE_GENERATION.md # Title generation
|
||||
├── TITLE_GENERATION_IMPLEMENTATION.md # Title implementation details
|
||||
└── TODO.md # Roadmap and issues
|
||||
|
||||
@@ -0,0 +1,351 @@
|
||||
# DeerFlow 流式输出设计
|
||||
|
||||
本文档解释 DeerFlow 是如何把 LangGraph agent 的事件流端到端送到两类消费者(HTTP 客户端、嵌入式 Python 调用方)的:两条路径为什么**必须**并存、它们各自的契约是什么、以及设计里那些 non-obvious 的不变式。
|
||||
|
||||
---
|
||||
|
||||
## TL;DR
|
||||
|
||||
- DeerFlow 有**两条并行**的流式路径:**Gateway 路径**(async / HTTP SSE / JSON 序列化)服务浏览器和 IM 渠道;**DeerFlowClient 路径**(sync / in-process / 原生 LangChain 对象)服务 Jupyter、脚本、测试。它们**无法合并**——消费者模型不同。
|
||||
- 两条路径都从 `create_agent()` 工厂出发,核心都是订阅 LangGraph 的 `stream_mode=["values", "messages", "custom"]`。`values` 是节点级 state 快照,`messages` 是 LLM token 级 delta,`custom` 是显式 `StreamWriter` 事件。**这三种模式不是详细程度的梯度,是三个独立的事件源**,要 token 流就必须显式订阅 `messages`。
|
||||
- 嵌入式 client 为每个 `stream()` 调用维护三个 `set[str]`:`seen_ids` / `streamed_ids` / `counted_usage_ids`。三者看起来相似但管理**三个独立的不变式**,不能合并。
|
||||
|
||||
---
|
||||
|
||||
## 为什么有两条流式路径
|
||||
|
||||
两条路径服务的消费者模型根本不同:
|
||||
|
||||
| 维度 | Gateway 路径 | DeerFlowClient 路径 |
|
||||
|---|---|---|
|
||||
| 入口 | FastAPI `/runs/stream` endpoint | `DeerFlowClient.stream(message)` |
|
||||
| 触发层 | `runtime/runs/worker.py::run_agent` | `packages/harness/deerflow/client.py::DeerFlowClient.stream` |
|
||||
| 执行模型 | `async def` + `agent.astream()` | sync generator + `agent.stream()` |
|
||||
| 事件传输 | `StreamBridge`(asyncio Queue)+ `sse_consumer` | 直接 `yield` |
|
||||
| 序列化 | `serialize(chunk)` → 纯 JSON dict,匹配 LangGraph Platform wire 格式 | `StreamEvent.data`,携带原生 LangChain 对象 |
|
||||
| 消费者 | 前端 `useStream` React hook、飞书/Slack/Telegram channel、LangGraph SDK 客户端 | Jupyter notebook、集成测试、内部 Python 脚本 |
|
||||
| 生命周期管理 | `RunManager`:run_id 跟踪、disconnect 语义、multitask 策略、heartbeat | 无;函数返回即结束 |
|
||||
| 断连恢复 | `Last-Event-ID` SSE 重连 | 无需要 |
|
||||
|
||||
**两条路径的存在是 DRY 的刻意妥协**:Gateway 的全部基础设施(async + Queue + JSON + RunManager)**都是为了跨网络边界把事件送给 HTTP 消费者**。当生产者(agent)和消费者(Python 调用栈)在同一个进程时,这整套东西都是纯开销。
|
||||
|
||||
### 为什么不能让 DeerFlowClient 复用 Gateway
|
||||
|
||||
曾经考虑过三种复用方案,都被否决:
|
||||
|
||||
1. **让 `client.stream()` 变成 `async def client.astream()`**
|
||||
breaking change。用户用不上的 `async for` / `asyncio.run()` 要硬塞进 Jupyter notebook 和同步脚本。DeerFlowClient 的一大卖点("把 agent 当普通函数调用")直接消失。
|
||||
|
||||
2. **在 `client.stream()` 内部起一个独立事件循环线程,用 `StreamBridge` 在 sync/async 之间做桥接**
|
||||
引入线程池、队列、信号量。为了"消除重复",把**复杂度**代替代码行数引进来。是典型的"wrong abstraction"——开销高于复用收益。
|
||||
|
||||
3. **让 `run_agent` 自己兼容 sync mode**
|
||||
给 Gateway 加一条用不到的死分支,污染 worker.py 的焦点。
|
||||
|
||||
所以两条路径的事件处理逻辑会**相似但不共享**。这是刻意设计,不是疏忽。
|
||||
|
||||
---
|
||||
|
||||
## LangGraph `stream_mode` 三层语义
|
||||
|
||||
LangGraph 的 `agent.stream(stream_mode=[...])` 是**多路复用**接口:一次订阅多个 mode,每个 mode 是一个独立的事件源。三种核心 mode:
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
classDef values fill:#B8C5D1,stroke:#5A6B7A,color:#2C3E50
|
||||
classDef messages fill:#C9B8A8,stroke:#7A6B5A,color:#2C3E50
|
||||
classDef custom fill:#B5C4B1,stroke:#5A7A5A,color:#2C3E50
|
||||
|
||||
subgraph LG["LangGraph agent graph"]
|
||||
direction TB
|
||||
Node1["node: LLM call"]
|
||||
Node2["node: tool call"]
|
||||
Node3["node: reducer"]
|
||||
end
|
||||
|
||||
LG -->|"每个节点完成后"| V["values: 完整 state 快照"]
|
||||
Node1 -->|"LLM 每产生一个 token"| M["messages: (AIMessageChunk, meta)"]
|
||||
Node1 -->|"StreamWriter.write()"| C["custom: 任意 dict"]
|
||||
|
||||
class V values
|
||||
class M messages
|
||||
class C custom
|
||||
```
|
||||
|
||||
| Mode | 发射时机 | Payload | 粒度 |
|
||||
|---|---|---|---|
|
||||
| `values` | 每个 graph 节点完成后 | 完整 state dict(title、messages、artifacts)| 节点级 |
|
||||
| `messages` | LLM 每次 yield 一个 chunk;tool 节点完成时 | `(AIMessageChunk \| ToolMessage, metadata_dict)` | token 级 |
|
||||
| `custom` | 用户代码显式调用 `StreamWriter.write()` | 任意 dict | 应用定义 |
|
||||
|
||||
### 两套命名的由来
|
||||
|
||||
同一件事在**三个协议层**有三个名字:
|
||||
|
||||
```
|
||||
Application HTTP / SSE LangGraph Graph
|
||||
┌──────────────┐ ┌──────────────┐ ┌──────────────┐
|
||||
│ frontend │ │ LangGraph │ │ agent.astream│
|
||||
│ useStream │──"messages- │ Platform SDK │──"messages"──│ graph.astream│
|
||||
│ Feishu IM │ tuple"──────│ HTTP wire │ │ │
|
||||
└──────────────┘ └──────────────┘ └──────────────┘
|
||||
```
|
||||
|
||||
- **Graph 层**(`agent.stream` / `agent.astream`):LangGraph Python 直接 API,mode 叫 **`"messages"`**。
|
||||
- **Platform SDK 层**(`langgraph-sdk` HTTP client):跨进程 HTTP 契约,mode 叫 **`"messages-tuple"`**。
|
||||
- **Gateway worker** 显式做翻译:`if m == "messages-tuple": lg_modes.append("messages")`(`runtime/runs/worker.py:117-121`)。
|
||||
|
||||
**后果**:`DeerFlowClient.stream()` 直接调 `agent.stream()`(Graph 层),所以必须传 `"messages"`。`app/channels/manager.py` 通过 `langgraph-sdk` 走 HTTP SDK,所以传 `"messages-tuple"`。**这两个字符串不能互相替代**,也不能抽成"一个共享常量"——它们是不同协议层的 type alias,共享只会让某一层说不是它母语的话。
|
||||
|
||||
---
|
||||
|
||||
## Gateway 路径:async + HTTP SSE
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client as HTTP Client
|
||||
participant API as FastAPI<br/>thread_runs.py
|
||||
participant Svc as services.py<br/>start_run
|
||||
participant Worker as worker.py<br/>run_agent (async)
|
||||
participant Bridge as StreamBridge<br/>(asyncio.Queue)
|
||||
participant Agent as LangGraph<br/>agent.astream
|
||||
participant SSE as sse_consumer
|
||||
|
||||
Client->>API: POST /runs/stream
|
||||
API->>Svc: start_run(body)
|
||||
Svc->>Bridge: create bridge
|
||||
Svc->>Worker: asyncio.create_task(run_agent(...))
|
||||
Svc-->>API: StreamingResponse(sse_consumer)
|
||||
API-->>Client: event-stream opens
|
||||
|
||||
par worker (producer)
|
||||
Worker->>Agent: astream(stream_mode=lg_modes)
|
||||
loop 每个 chunk
|
||||
Agent-->>Worker: (mode, chunk)
|
||||
Worker->>Bridge: publish(run_id, event, serialize(chunk))
|
||||
end
|
||||
Worker->>Bridge: publish_end(run_id)
|
||||
and sse_consumer (consumer)
|
||||
SSE->>Bridge: subscribe(run_id)
|
||||
loop 每个 event
|
||||
Bridge-->>SSE: StreamEvent
|
||||
SSE-->>Client: "event: <name>\ndata: <json>\n\n"
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
关键组件:
|
||||
|
||||
- `runtime/runs/worker.py::run_agent` — 在 `asyncio.Task` 里跑 `agent.astream()`,把每个 chunk 通过 `serialize(chunk, mode=mode)` 转成 JSON,再 `bridge.publish()`。
|
||||
- `runtime/stream_bridge` — 抽象 Queue。`publish/subscribe` 解耦生产者和消费者,支持 `Last-Event-ID` 重连、心跳、多订阅者 fan-out。
|
||||
- `app/gateway/services.py::sse_consumer` — 从 bridge 订阅,格式化为 SSE wire 帧。
|
||||
- `runtime/serialization.py::serialize` — mode-aware 序列化;`messages` mode 下 `serialize_messages_tuple` 把 `(chunk, metadata)` 转成 `[chunk.model_dump(), metadata]`。
|
||||
|
||||
**`StreamBridge` 的存在价值**:当生产者(`run_agent` 任务)和消费者(HTTP 连接)在不同的 asyncio task 里运行时,需要一个可以跨 task 传递事件的中介。Queue 同时还承担断连重连的 buffer 和多订阅者的 fan-out。
|
||||
|
||||
---
|
||||
|
||||
## DeerFlowClient 路径:sync + in-process
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User as Python caller
|
||||
participant Client as DeerFlowClient.stream
|
||||
participant Agent as LangGraph<br/>agent.stream (sync)
|
||||
|
||||
User->>Client: for event in client.stream("hi"):
|
||||
Client->>Agent: stream(stream_mode=["values","messages","custom"])
|
||||
loop 每个 chunk
|
||||
Agent-->>Client: (mode, chunk)
|
||||
Client->>Client: 分发 mode<br/>构建 StreamEvent
|
||||
Client-->>User: yield StreamEvent
|
||||
end
|
||||
Client-->>User: yield StreamEvent(type="end")
|
||||
```
|
||||
|
||||
对比之下,sync 路径的每个环节都是显著更少的移动部件:
|
||||
|
||||
- 没有 `RunManager` —— 一次 `stream()` 调用对应一次生命周期,无需 run_id。
|
||||
- 没有 `StreamBridge` —— 直接 `yield`,生产和消费在同一个 Python 调用栈,不需要跨 task 中介。
|
||||
- 没有 JSON 序列化 —— `StreamEvent.data` 直接装原生 LangChain 对象(`AIMessage.content`、`usage_metadata` 的 `UsageMetadata` TypedDict)。Jupyter 用户拿到的是真正的类型,不是匿名 dict。
|
||||
- 没有 asyncio —— 调用者可以直接 `for event in ...`,不必写 `async for`。
|
||||
|
||||
---
|
||||
|
||||
## 消费语义:delta vs cumulative
|
||||
|
||||
LangGraph `messages` mode 给出的是 **delta**:每个 `AIMessageChunk.content` 只包含这一次新 yield 的 token,**不是**从头的累计文本。
|
||||
|
||||
这个语义和 LangChain 的 `fs2 Stream` 风格一致:**上游发增量,下游负责累加**。Gateway 路径里前端 `useStream` React hook 自己维护累加器;DeerFlowClient 路径里 `chat()` 方法替调用者做累加。
|
||||
|
||||
### `DeerFlowClient.chat()` 的 O(n) 累加器
|
||||
|
||||
```python
|
||||
chunks: dict[str, list[str]] = {}
|
||||
last_id: str = ""
|
||||
for event in self.stream(message, thread_id=thread_id, **kwargs):
|
||||
if event.type == "messages-tuple" and event.data.get("type") == "ai":
|
||||
msg_id = event.data.get("id") or ""
|
||||
delta = event.data.get("content", "")
|
||||
if delta:
|
||||
chunks.setdefault(msg_id, []).append(delta)
|
||||
last_id = msg_id
|
||||
return "".join(chunks.get(last_id, ()))
|
||||
```
|
||||
|
||||
**为什么不是 `buffers[id] = buffers.get(id,"") + delta`**:CPython 的字符串 in-place concat 优化仅在 refcount=1 且 LHS 是 local name 时生效;这里字符串存在 dict 里被 reassign,优化失效,每次都是 O(n) 拷贝 → 总体 O(n²)。实测 50 KB / 5000 chunk 的回复要 100-300ms 纯拷贝开销。用 `list` + `"".join()` 是 O(n)。
|
||||
|
||||
---
|
||||
|
||||
## 三个 id set 为什么不能合并
|
||||
|
||||
`DeerFlowClient.stream()` 在一次调用生命周期内维护三个 `set[str]`:
|
||||
|
||||
```python
|
||||
seen_ids: set[str] = set() # values 路径内部 dedup
|
||||
streamed_ids: set[str] = set() # messages → values 跨模式 dedup
|
||||
counted_usage_ids: set[str] = set() # usage_metadata 幂等计数
|
||||
```
|
||||
|
||||
乍看像是"三份几乎一样的东西",实际每个管**不同的不变式**。
|
||||
|
||||
| Set | 负责的不变式 | 被谁填充 | 被谁查询 |
|
||||
|---|---|---|---|
|
||||
| `seen_ids` | 连续两个 `values` 快照里同一条 message 只生成一个 `messages-tuple` 事件 | values 分支每处理一条消息就加入 | values 分支处理下一条消息前检查 |
|
||||
| `streamed_ids` | 如果一条消息已经通过 `messages` 模式 token 级流过,values 快照到达时**不要**再合成一次完整 `messages-tuple` | messages 分支每发一个 AI/tool 事件就加入 | values 分支看到消息时检查 |
|
||||
| `counted_usage_ids` | 同一个 `usage_metadata` 在 messages 末尾 chunk 和 values 快照的 final AIMessage 里各带一份,**累计总量只算一次** | `_account_usage()` 每次接受 usage 就加入 | `_account_usage()` 每次调用时检查 |
|
||||
|
||||
### 为什么不能只用一个 set
|
||||
|
||||
关键观察:**同一个 message id 在这三个 set 里的加入时机不同**。
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant M as messages mode
|
||||
participant V as values mode
|
||||
participant SS as streamed_ids
|
||||
participant SU as counted_usage_ids
|
||||
participant SE as seen_ids
|
||||
|
||||
Note over M: 第一个 AI text chunk 到达
|
||||
M->>SS: add(msg_id)
|
||||
Note over M: 最后一个 chunk 带 usage
|
||||
M->>SU: add(msg_id)
|
||||
Note over V: snapshot 到达,包含同一条 AI message
|
||||
V->>SE: add(msg_id)
|
||||
V->>SS: 查询 → 已存在,跳过文本合成
|
||||
V->>SU: 查询 → 已存在,不重复计数
|
||||
```
|
||||
|
||||
- `seen_ids` **永远在 values 快照到达时**加入,所以它是 "values 已处理" 的标记。一条只出现在 messages 流里的消息(罕见但可能),`seen_ids` 里永远没有它。
|
||||
- `streamed_ids` **在 messages 流的第一个有效事件时**加入。一条只通过 values 快照到达的非 AI 消息(HumanMessage、被 truncate 的 tool 消息),`streamed_ids` 里永远没有它。
|
||||
- `counted_usage_ids` **只在看到非空 `usage_metadata` 时**加入。一条完全没有 usage 的消息(tool message、错误消息)永远不会进去。
|
||||
|
||||
**集合包含关系**:`counted_usage_ids ⊆ (streamed_ids ∪ seen_ids)` 大致成立,但**不是严格子集**,因为一条消息可以在 messages 模式流完 text 但**在最后那个带 usage 的 chunk 之前**就被 values snapshot 赶上——此时它已经在 `streamed_ids` 里,但还不在 `counted_usage_ids` 里。把它们合并成一个 dict-of-flags 会让这个微妙的时序依赖**从类型系统里消失**,变成注释里的一句话。三个独立的 set 把不变式显式化了:每个 set 名对应一个可以口头回答的问题。
|
||||
|
||||
---
|
||||
|
||||
## 端到端:一次真实对话的事件时序
|
||||
|
||||
假设调用 `client.stream("Count from 1 to 15")`,LLM 给出 "one\ntwo\n...\nfifteen"(88 字符),tokenizer 把它拆成 ~35 个 BPE chunk。下面是事件到达序列的精简版:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant U as User
|
||||
participant C as DeerFlowClient
|
||||
participant A as LangGraph<br/>agent.stream
|
||||
|
||||
U->>C: stream("Count ... 15")
|
||||
C->>A: stream(mode=["values","messages","custom"])
|
||||
|
||||
A-->>C: ("values", {messages: [HumanMessage]})
|
||||
C-->>U: StreamEvent(type="values", ...)
|
||||
|
||||
Note over A,C: LLM 开始 yield token
|
||||
loop 35 次,约 476ms
|
||||
A-->>C: ("messages", (AIMessageChunk(content="ele"), meta))
|
||||
C->>C: streamed_ids.add(ai-1)
|
||||
C-->>U: StreamEvent(type="messages-tuple",<br/>data={type:ai, content:"ele", id:ai-1})
|
||||
end
|
||||
|
||||
Note over A: LLM finish_reason=stop,最后一个 chunk 带 usage
|
||||
A-->>C: ("messages", (AIMessageChunk(content="", usage_metadata={...}), meta))
|
||||
C->>C: counted_usage_ids.add(ai-1)<br/>(无文本,不 yield)
|
||||
|
||||
A-->>C: ("values", {messages: [..., AIMessage(complete)]})
|
||||
C->>C: ai-1 in streamed_ids → 跳过合成
|
||||
C->>C: 捕获 usage (已在 counted_usage_ids,no-op)
|
||||
C-->>U: StreamEvent(type="values", ...)
|
||||
|
||||
C-->>U: StreamEvent(type="end", data={usage:{...}})
|
||||
```
|
||||
|
||||
关键观察:
|
||||
|
||||
1. 用户看到 **35 个 messages-tuple 事件**,跨越约 476ms,每个事件带一个 token delta 和同一个 `id=ai-1`。
|
||||
2. 最后一个 `values` 快照里的 `AIMessage` **不会**再触发一个完整的 `messages-tuple` 事件——因为 `ai-1 in streamed_ids` 跳过了合成。
|
||||
3. `end` 事件里的 `usage` 正好等于那一份 cumulative usage,**不是它的两倍**——`counted_usage_ids` 在 messages 末尾 chunk 上已经吸收了,values 分支的重复访问是 no-op。
|
||||
4. 消费者拿到的 `content` 是**增量**:"ele" 只包含 3 个字符,不是 "one\ntwo\n...ele"。想要完整文本要按 `id` 累加,`chat()` 已经帮你做了。
|
||||
|
||||
---
|
||||
|
||||
## 为什么这个设计容易出 bug,以及测试策略
|
||||
|
||||
本文档的直接起因是 bytedance/deer-flow#1969:`DeerFlowClient.stream()` 原本只订阅 `["values", "custom"]`,**漏了 `"messages"`**。结果 `client.stream("hello")` 等价于一次性返回,视觉上和 `chat()` 没区别。
|
||||
|
||||
这类 bug 有三个结构性原因:
|
||||
|
||||
1. **多协议层命名**:`messages` / `messages-tuple` / HTTP SSE `messages` 是同一概念的三个名字。在其中一层出错不会在另外两层报错。
|
||||
2. **多消费者模型**:Gateway 和 DeerFlowClient 是两套独立实现,**没有单一的"订阅哪些 mode"的 single source of truth**。前者订阅对了不代表后者也订阅对了。
|
||||
3. **mock 测试绕开了真实路径**:老测试用 `agent.stream.return_value = iter([dict_chunk, ...])` 喂 values 形状的 dict 模拟 state 快照。这样构造的输入**永远不会进入 `messages` mode 分支**,所以即使 `stream_mode` 里少一个元素,CI 依然全绿。
|
||||
|
||||
### 防御手段
|
||||
|
||||
真正的防线是**显式断言 "messages" mode 被订阅 + 用真实 chunk shape mock**:
|
||||
|
||||
```python
|
||||
# tests/test_client.py::test_messages_mode_emits_token_deltas
|
||||
agent.stream.return_value = iter([
|
||||
("messages", (AIMessageChunk(content="Hel", id="ai-1"), {})),
|
||||
("messages", (AIMessageChunk(content="lo ", id="ai-1"), {})),
|
||||
("messages", (AIMessageChunk(content="world!", id="ai-1"), {})),
|
||||
("values", {"messages": [HumanMessage(...), AIMessage(content="Hello world!", id="ai-1")]}),
|
||||
])
|
||||
# ...
|
||||
assert [e.data["content"] for e in ai_text_events] == ["Hel", "lo ", "world!"]
|
||||
assert len(ai_text_events) == 3 # values snapshot must NOT re-synthesize
|
||||
assert "messages" in agent.stream.call_args.kwargs["stream_mode"]
|
||||
```
|
||||
|
||||
**为什么这比"抽一个共享常量"更有效**:共享常量只能保证"用它的人写对字符串",但新增消费者的人可能根本不知道常量在哪。行为断言强制任何改动都要穿过**实际执行路径**,改回 `["values", "custom"]` 会立刻让 `assert "messages" in ...` 失败。
|
||||
|
||||
### 活体信号:BPE 子词边界
|
||||
|
||||
回归的最终验证是让真实 LLM 数 1-15,然后看是否能在输出里看到 tokenizer 的子词切分:
|
||||
|
||||
```
|
||||
[5.460s] 'ele' / 'ven' eleven 被拆成两个 token
|
||||
[5.508s] 'tw' / 'elve' twelve 拆两个
|
||||
[5.568s] 'th' / 'irteen' thirteen 拆两个
|
||||
[5.623s] 'four'/ 'teen' fourteen 拆两个
|
||||
[5.677s] 'f' / 'if' / 'teen' fifteen 拆三个
|
||||
```
|
||||
|
||||
子词切分是 tokenizer 的外部事实,**无法伪造**。能看到它就说明数据流**逐 chunk** 地穿过了整条管道,没有被任何中间层缓冲成整段。这种"活体信号"在流式系统里是比单元测试更高置信度的证据。
|
||||
|
||||
---
|
||||
|
||||
## 相关源码定位
|
||||
|
||||
| 关心什么 | 看这里 |
|
||||
|---|---|
|
||||
| DeerFlowClient 嵌入式流 | `packages/harness/deerflow/client.py::DeerFlowClient.stream` |
|
||||
| `chat()` 的 delta 累加器 | `packages/harness/deerflow/client.py::DeerFlowClient.chat` |
|
||||
| Gateway async 流 | `packages/harness/deerflow/runtime/runs/worker.py::run_agent` |
|
||||
| HTTP SSE 帧输出 | `app/gateway/services.py::sse_consumer` / `format_sse` |
|
||||
| 序列化到 wire 格式 | `packages/harness/deerflow/runtime/serialization.py` |
|
||||
| LangGraph mode 命名翻译 | `packages/harness/deerflow/runtime/runs/worker.py:117-121` |
|
||||
| 飞书渠道的增量卡片更新 | `app/channels/manager.py::_handle_streaming_chat` |
|
||||
| Channels 自带的 delta/cumulative 防御性累加 | `app/channels/manager.py::_merge_stream_text` |
|
||||
| Frontend useStream 支持的 mode 集合 | `frontend/src/core/api/stream-mode.ts` |
|
||||
| 核心回归测试 | `backend/tests/test_client.py::TestStream::test_messages_mode_emits_token_deltas` |
|
||||
@@ -30,7 +30,7 @@
|
||||
|
||||
### 2. 配置文件
|
||||
|
||||
#### [`config.yaml`](../config.yaml)
|
||||
#### [`config.yaml`](../../config.example.yaml)
|
||||
- ✅ 添加 title 配置段:
|
||||
```yaml
|
||||
title:
|
||||
@@ -51,7 +51,7 @@ title:
|
||||
- ✅ 故障排查指南
|
||||
- ✅ State vs Metadata 对比
|
||||
|
||||
#### [`BACKEND_TODO.md`](../BACKEND_TODO.md)
|
||||
#### [`TODO.md`](TODO.md)
|
||||
- ✅ 添加功能完成记录
|
||||
|
||||
### 4. 测试
|
||||
@@ -124,7 +124,7 @@ title:
|
||||
# checkpointer.py
|
||||
from langgraph.checkpoint.sqlite import SqliteSaver
|
||||
|
||||
checkpointer = SqliteSaver.from_conn_string("checkpoints.db")
|
||||
checkpointer = SqliteSaver.from_conn_string("deerflow.db")
|
||||
```
|
||||
|
||||
```json
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
- [x] Add Plan Mode with TodoList middleware
|
||||
- [x] Add vision model support with ViewImageMiddleware
|
||||
- [x] Skills system with SKILL.md format
|
||||
- [x] Replace `time.sleep(5)` with `asyncio.sleep()` in `packages/harness/deerflow/tools/builtins/task_tool.py` (subagent polling)
|
||||
|
||||
## Planned Features
|
||||
|
||||
@@ -21,10 +22,9 @@
|
||||
- [ ] Support for more document formats in upload
|
||||
- [ ] Skill marketplace / remote skill installation
|
||||
- [ ] Optimize async concurrency in agent hot path (IM channels multi-task scenario)
|
||||
- Replace `time.sleep(5)` with `asyncio.sleep()` in `packages/harness/deerflow/tools/builtins/task_tool.py` (subagent polling)
|
||||
- Replace `subprocess.run()` with `asyncio.create_subprocess_shell()` in `packages/harness/deerflow/sandbox/local/local_sandbox.py`
|
||||
- [ ] Replace `subprocess.run()` with `asyncio.create_subprocess_shell()` in `packages/harness/deerflow/sandbox/local/local_sandbox.py`
|
||||
- Replace sync `requests` with `httpx.AsyncClient` in community tools (tavily, jina_ai, firecrawl, infoquest, image_search)
|
||||
- Replace sync `model.invoke()` with async `model.ainvoke()` in title_middleware and memory updater
|
||||
- [x] Replace sync `model.invoke()` with async `model.ainvoke()` in title_middleware and memory updater
|
||||
- Consider `asyncio.to_thread()` wrapper for remaining blocking file I/O
|
||||
- For production: use `langgraph up` (multi-worker) instead of `langgraph dev` (single-worker)
|
||||
|
||||
|
||||
@@ -0,0 +1,446 @@
|
||||
# [RFC] 在 DeerFlow 中增加 `grep` 与 `glob` 文件搜索工具
|
||||
|
||||
## Summary
|
||||
|
||||
我认为这个方向是对的,而且值得做。
|
||||
|
||||
如果 DeerFlow 想更接近 Claude Code 这类 coding agent 的实际工作流,仅有 `ls` / `read_file` / `write_file` / `str_replace` 还不够。模型在进入修改前,通常还需要两类能力:
|
||||
|
||||
- `glob`: 快速按路径模式找文件
|
||||
- `grep`: 快速按内容模式找候选位置
|
||||
|
||||
这两类工具的价值,不是“功能上 bash 也能做”,而是它们能以更低 token 成本、更强约束、更稳定的输出格式,替代模型频繁走 `bash find` / `bash grep` / `rg` 的习惯。
|
||||
|
||||
但前提是实现方式要对:**它们应该是只读、结构化、受限、可审计的原生工具,而不是对 shell 命令的简单包装。**
|
||||
|
||||
## Problem
|
||||
|
||||
当前 DeerFlow 的文件工具层主要覆盖:
|
||||
|
||||
- `ls`: 浏览目录结构
|
||||
- `read_file`: 读取文件内容
|
||||
- `write_file`: 写文件
|
||||
- `str_replace`: 做局部字符串替换
|
||||
- `bash`: 兜底执行命令
|
||||
|
||||
这套能力能完成任务,但在代码库探索阶段效率不高。
|
||||
|
||||
典型问题:
|
||||
|
||||
1. 模型想找 “所有 `*.tsx` 的 page 文件” 时,只能反复 `ls` 多层目录,或者退回 `bash find`
|
||||
2. 模型想找 “某个 symbol / 文案 / 配置键在哪里出现” 时,只能逐文件 `read_file`,或者退回 `bash grep` / `rg`
|
||||
3. 一旦退回 `bash`,工具调用就失去结构化输出,结果也更难做裁剪、分页、审计和跨 sandbox 一致化
|
||||
4. 对没有开启 host bash 的本地模式,`bash` 甚至可能不可用,此时缺少足够强的只读检索能力
|
||||
|
||||
结论:DeerFlow 现在缺的不是“再多一个 shell 命令”,而是**文件系统检索层**。
|
||||
|
||||
## Goals
|
||||
|
||||
- 为 agent 提供稳定的路径搜索和内容搜索能力
|
||||
- 减少对 `bash` 的依赖,特别是在仓库探索阶段
|
||||
- 保持与现有 sandbox 安全模型一致
|
||||
- 输出格式结构化,便于模型后续串联 `read_file` / `str_replace`
|
||||
- 让本地 sandbox、容器 sandbox、未来 MCP 文件系统工具都能遵守同一语义
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- 不做通用 shell 兼容层
|
||||
- 不暴露完整 grep/find/rg CLI 语法
|
||||
- 不在第一版支持二进制检索、复杂 PCRE 特性、上下文窗口高亮渲染等重功能
|
||||
- 不把它做成“任意磁盘搜索”,仍然只允许在 DeerFlow 已授权的路径内执行
|
||||
|
||||
## Why This Is Worth Doing
|
||||
|
||||
参考 Claude Code 这一类 agent 的设计思路,`glob` 和 `grep` 的核心价值不是新能力本身,而是把“探索代码库”的常见动作从开放式 shell 降到受控工具层。
|
||||
|
||||
这样有几个直接收益:
|
||||
|
||||
1. **更低的模型负担**
|
||||
模型不需要自己拼 `find`, `grep`, `rg`, `xargs`, quoting 等命令细节。
|
||||
|
||||
2. **更稳定的跨环境行为**
|
||||
本地、Docker、AIO sandbox 不必依赖容器里是否装了 `rg`,也不会因为 shell 差异导致行为漂移。
|
||||
|
||||
3. **更强的安全与审计**
|
||||
调用参数就是“搜索什么、在哪搜、最多返回多少”,天然比任意命令更容易审计和限流。
|
||||
|
||||
4. **更好的 token 效率**
|
||||
`grep` 返回的是命中摘要而不是整段文件,模型只对少数候选路径再调用 `read_file`。
|
||||
|
||||
5. **对 `tool_search` 友好**
|
||||
当 DeerFlow 持续扩展工具集时,`grep` / `glob` 会成为非常高频的基础工具,值得保留为 built-in,而不是让模型总是退回通用 bash。
|
||||
|
||||
## Proposal
|
||||
|
||||
增加两个 built-in sandbox tools:
|
||||
|
||||
- `glob`
|
||||
- `grep`
|
||||
|
||||
推荐继续放在:
|
||||
|
||||
- `backend/packages/harness/deerflow/sandbox/tools.py`
|
||||
|
||||
并在 `config.example.yaml` 中默认加入 `file:read` 组。
|
||||
|
||||
### 1. `glob` 工具
|
||||
|
||||
用途:按路径模式查找文件或目录。
|
||||
|
||||
建议 schema:
|
||||
|
||||
```python
|
||||
@tool("glob", parse_docstring=True)
|
||||
def glob_tool(
|
||||
runtime: ToolRuntime[ContextT, ThreadState],
|
||||
description: str,
|
||||
pattern: str,
|
||||
path: str,
|
||||
include_dirs: bool = False,
|
||||
max_results: int = 200,
|
||||
) -> str:
|
||||
...
|
||||
```
|
||||
|
||||
参数语义:
|
||||
|
||||
- `description`: 与现有工具保持一致
|
||||
- `pattern`: glob 模式,例如 `**/*.py`、`src/**/test_*.ts`
|
||||
- `path`: 搜索根目录,必须是绝对路径
|
||||
- `include_dirs`: 是否返回目录
|
||||
- `max_results`: 最大返回条数,防止一次性打爆上下文
|
||||
|
||||
建议返回格式:
|
||||
|
||||
```text
|
||||
Found 3 paths under /mnt/user-data/workspace
|
||||
1. /mnt/user-data/workspace/backend/app.py
|
||||
2. /mnt/user-data/workspace/backend/tests/test_app.py
|
||||
3. /mnt/user-data/workspace/scripts/build.py
|
||||
```
|
||||
|
||||
如果后续想更适合前端消费,也可以改成 JSON 字符串;但第一版为了兼容现有工具风格,返回可读文本即可。
|
||||
|
||||
### 2. `grep` 工具
|
||||
|
||||
用途:按内容模式搜索文件,返回命中位置摘要。
|
||||
|
||||
建议 schema:
|
||||
|
||||
```python
|
||||
@tool("grep", parse_docstring=True)
|
||||
def grep_tool(
|
||||
runtime: ToolRuntime[ContextT, ThreadState],
|
||||
description: str,
|
||||
pattern: str,
|
||||
path: str,
|
||||
glob: str | None = None,
|
||||
literal: bool = False,
|
||||
case_sensitive: bool = False,
|
||||
max_results: int = 100,
|
||||
) -> str:
|
||||
...
|
||||
```
|
||||
|
||||
参数语义:
|
||||
|
||||
- `pattern`: 搜索词或正则
|
||||
- `path`: 搜索根目录,必须是绝对路径
|
||||
- `glob`: 可选路径过滤,例如 `**/*.py`
|
||||
- `literal`: 为 `True` 时按普通字符串匹配,不解释为正则
|
||||
- `case_sensitive`: 是否大小写敏感
|
||||
- `max_results`: 最大返回命中数,不是文件数
|
||||
|
||||
建议返回格式:
|
||||
|
||||
```text
|
||||
Found 4 matches under /mnt/user-data/workspace
|
||||
/mnt/user-data/workspace/backend/config.py:12: TOOL_GROUPS = [...]
|
||||
/mnt/user-data/workspace/backend/config.py:48: def load_tool_config(...):
|
||||
/mnt/user-data/workspace/backend/tools.py:91: "tool_groups"
|
||||
/mnt/user-data/workspace/backend/tests/test_config.py:22: assert "tool_groups" in data
|
||||
```
|
||||
|
||||
第一版建议只返回:
|
||||
|
||||
- 文件路径
|
||||
- 行号
|
||||
- 命中行摘要
|
||||
|
||||
不返回上下文块,避免结果过大。模型如果需要上下文,再调用 `read_file(path, start_line, end_line)`。
|
||||
|
||||
## Design Principles
|
||||
|
||||
### A. 不做 shell wrapper
|
||||
|
||||
不建议把 `grep` 实现为:
|
||||
|
||||
```python
|
||||
subprocess.run("grep ...")
|
||||
```
|
||||
|
||||
也不建议在容器里直接拼 `find` / `rg` 命令。
|
||||
|
||||
原因:
|
||||
|
||||
- 会引入 shell quoting 和注入面
|
||||
- 会依赖不同 sandbox 内镜像是否安装同一套命令
|
||||
- Windows / macOS / Linux 行为不一致
|
||||
- 很难稳定控制输出条数与格式
|
||||
|
||||
正确方向是:
|
||||
|
||||
- `glob` 使用 Python 标准库路径遍历
|
||||
- `grep` 使用 Python 逐文件扫描
|
||||
- 输出由 DeerFlow 自己格式化
|
||||
|
||||
如果未来为了性能考虑要优先调用 `rg`,也应该封装在 provider 内部,并保证外部语义不变,而不是把 CLI 暴露给模型。
|
||||
|
||||
### B. 继续沿用 DeerFlow 的路径权限模型
|
||||
|
||||
这两个工具必须复用当前 `ls` / `read_file` 的路径校验逻辑:
|
||||
|
||||
- 本地模式走 `validate_local_tool_path(..., read_only=True)`
|
||||
- 支持 `/mnt/skills/...`
|
||||
- 支持 `/mnt/acp-workspace/...`
|
||||
- 支持 thread workspace / uploads / outputs 的虚拟路径解析
|
||||
- 明确拒绝越权路径与 path traversal
|
||||
|
||||
也就是说,它们属于 **file:read**,不是 `bash` 的替代越权入口。
|
||||
|
||||
### C. 结果必须硬限制
|
||||
|
||||
没有硬限制的 `glob` / `grep` 很容易炸上下文。
|
||||
|
||||
建议第一版至少限制:
|
||||
|
||||
- `glob.max_results` 默认 200,最大 1000
|
||||
- `grep.max_results` 默认 100,最大 500
|
||||
- 单行摘要最大长度,例如 200 字符
|
||||
- 二进制文件跳过
|
||||
- 超大文件跳过,例如单文件大于 1 MB 或按配置控制
|
||||
|
||||
此外,命中数超过阈值时应返回:
|
||||
|
||||
- 已展示的条数
|
||||
- 被截断的事实
|
||||
- 建议用户缩小搜索范围
|
||||
|
||||
例如:
|
||||
|
||||
```text
|
||||
Found more than 100 matches, showing first 100. Narrow the path or add a glob filter.
|
||||
```
|
||||
|
||||
### D. 工具语义要彼此互补
|
||||
|
||||
推荐模型工作流应该是:
|
||||
|
||||
1. `glob` 找候选文件
|
||||
2. `grep` 找候选位置
|
||||
3. `read_file` 读局部上下文
|
||||
4. `str_replace` / `write_file` 执行修改
|
||||
|
||||
这样工具边界清晰,也更利于 prompt 中教模型形成稳定习惯。
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
## Option A: 直接在 `sandbox/tools.py` 实现第一版
|
||||
|
||||
这是我推荐的起步方案。
|
||||
|
||||
做法:
|
||||
|
||||
- 在 `sandbox/tools.py` 新增 `glob_tool` 与 `grep_tool`
|
||||
- 在 local sandbox 场景直接使用 Python 文件系统 API
|
||||
- 在非 local sandbox 场景,优先也通过 DeerFlow 自己控制的路径访问层实现
|
||||
|
||||
优点:
|
||||
|
||||
- 改动小
|
||||
- 能尽快验证 agent 效果
|
||||
- 不需要先改 `Sandbox` 抽象
|
||||
|
||||
缺点:
|
||||
|
||||
- `tools.py` 会继续变胖
|
||||
- 如果未来想在 provider 侧做性能优化,需要再抽象一次
|
||||
|
||||
## Option B: 先扩展 `Sandbox` 抽象
|
||||
|
||||
例如新增:
|
||||
|
||||
```python
|
||||
class Sandbox(ABC):
|
||||
def glob(self, path: str, pattern: str, include_dirs: bool = False, max_results: int = 200) -> list[str]:
|
||||
...
|
||||
|
||||
def grep(
|
||||
self,
|
||||
path: str,
|
||||
pattern: str,
|
||||
*,
|
||||
glob: str | None = None,
|
||||
literal: bool = False,
|
||||
case_sensitive: bool = False,
|
||||
max_results: int = 100,
|
||||
) -> list[GrepMatch]:
|
||||
...
|
||||
```
|
||||
|
||||
优点:
|
||||
|
||||
- 抽象更干净
|
||||
- 容器 / 远程 sandbox 可以各自优化
|
||||
|
||||
缺点:
|
||||
|
||||
- 首次引入成本更高
|
||||
- 需要同步改所有 sandbox provider
|
||||
|
||||
结论:
|
||||
|
||||
**第一版建议走 Option A,等工具价值验证后再下沉到 `Sandbox` 抽象层。**
|
||||
|
||||
## Detailed Behavior
|
||||
|
||||
### `glob` 行为
|
||||
|
||||
- 输入根目录不存在:返回清晰错误
|
||||
- 根路径不是目录:返回清晰错误
|
||||
- 模式非法:返回清晰错误
|
||||
- 结果为空:返回 `No files matched`
|
||||
- 默认忽略项应尽量与当前 `list_dir` 对齐,例如:
|
||||
- `.git`
|
||||
- `node_modules`
|
||||
- `__pycache__`
|
||||
- `.venv`
|
||||
- 构建产物目录
|
||||
|
||||
这里建议抽一个共享 ignore 集,避免 `ls` 与 `glob` 结果风格不一致。
|
||||
|
||||
### `grep` 行为
|
||||
|
||||
- 默认只扫描文本文件
|
||||
- 检测到二进制文件直接跳过
|
||||
- 对超大文件直接跳过或只扫前 N KB
|
||||
- regex 编译失败时返回参数错误
|
||||
- 输出中的路径继续使用虚拟路径,而不是暴露宿主真实路径
|
||||
- 建议默认按文件路径、行号排序,保持稳定输出
|
||||
|
||||
## Prompting Guidance
|
||||
|
||||
如果引入这两个工具,建议同步更新系统提示中的文件操作建议:
|
||||
|
||||
- 查找文件名模式时优先用 `glob`
|
||||
- 查找代码符号、配置项、文案时优先用 `grep`
|
||||
- 只有在工具不足以完成目标时才退回 `bash`
|
||||
|
||||
否则模型仍会习惯性先调用 `bash`。
|
||||
|
||||
## Risks
|
||||
|
||||
### 1. 与 `bash` 能力重叠
|
||||
|
||||
这是事实,但不是问题。
|
||||
|
||||
`ls` 和 `read_file` 也都能被 `bash` 替代,但我们仍然保留它们,因为结构化工具更适合 agent。
|
||||
|
||||
### 2. 性能问题
|
||||
|
||||
在大仓库上,纯 Python `grep` 可能比 `rg` 慢。
|
||||
|
||||
缓解方式:
|
||||
|
||||
- 第一版先加结果上限和文件大小上限
|
||||
- 路径上强制要求 root path
|
||||
- 提供 `glob` 过滤缩小扫描范围
|
||||
- 后续如有必要,在 provider 内部做 `rg` 优化,但保持同一 schema
|
||||
|
||||
### 3. 忽略规则不一致
|
||||
|
||||
如果 `ls` 能看到的路径,`glob` 却看不到,模型会困惑。
|
||||
|
||||
缓解方式:
|
||||
|
||||
- 统一 ignore 规则
|
||||
- 在文档里明确“默认跳过常见依赖和构建目录”
|
||||
|
||||
### 4. 正则搜索过于复杂
|
||||
|
||||
如果第一版就支持大量 grep 方言,边界会很乱。
|
||||
|
||||
缓解方式:
|
||||
|
||||
- 第一版只支持 Python `re`
|
||||
- 并提供 `literal=True` 的简单模式
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### A. 不增加工具,完全依赖 `bash`
|
||||
|
||||
不推荐。
|
||||
|
||||
这会让 DeerFlow 在代码探索体验上持续落后,也削弱无 bash 或受限 bash 场景下的能力。
|
||||
|
||||
### B. 只加 `glob`,不加 `grep`
|
||||
|
||||
不推荐。
|
||||
|
||||
只解决“找文件”,没有解决“找位置”。模型最终还是会退回 `bash grep`。
|
||||
|
||||
### C. 只加 `grep`,不加 `glob`
|
||||
|
||||
也不推荐。
|
||||
|
||||
`grep` 缺少路径模式过滤时,扫描范围经常太大;`glob` 是它的天然前置工具。
|
||||
|
||||
### D. 直接接入 MCP filesystem server 的搜索能力
|
||||
|
||||
短期不推荐作为主路径。
|
||||
|
||||
MCP 可以是补充,但 `glob` / `grep` 作为 DeerFlow 的基础 coding tool,最好仍然是 built-in,这样才能在默认安装中稳定可用。
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- `config.example.yaml` 中可默认启用 `glob` 与 `grep`
|
||||
- 两个工具归属 `file:read` 组
|
||||
- 本地 sandbox 下严格遵守现有路径权限
|
||||
- 输出不泄露宿主机真实路径
|
||||
- 大结果集会被截断并明确提示
|
||||
- 模型可以通过 `glob -> grep -> read_file -> str_replace` 完成典型改码流
|
||||
- 在禁用 host bash 的本地模式下,仓库探索能力明显提升
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. 在 `sandbox/tools.py` 中实现 `glob_tool` 与 `grep_tool`
|
||||
2. 抽取与 `list_dir` 一致的 ignore 规则,避免行为漂移
|
||||
3. 在 `config.example.yaml` 默认加入工具配置
|
||||
4. 为本地路径校验、虚拟路径映射、结果截断、二进制跳过补测试
|
||||
5. 更新 README / backend docs / prompt guidance
|
||||
6. 收集实际 agent 调用数据,再决定是否下沉到 `Sandbox` 抽象
|
||||
|
||||
## Suggested Config
|
||||
|
||||
```yaml
|
||||
tools:
|
||||
- name: glob
|
||||
group: file:read
|
||||
use: deerflow.sandbox.tools:glob_tool
|
||||
|
||||
- name: grep
|
||||
group: file:read
|
||||
use: deerflow.sandbox.tools:grep_tool
|
||||
```
|
||||
|
||||
## Final Recommendation
|
||||
|
||||
结论是:**可以加,而且应该加。**
|
||||
|
||||
但我会明确卡三个边界:
|
||||
|
||||
1. `grep` / `glob` 必须是 built-in 的只读结构化工具
|
||||
2. 第一版不要做 shell wrapper,不要把 CLI 方言直接暴露给模型
|
||||
3. 先在 `sandbox/tools.py` 验证价值,再考虑是否下沉到 `Sandbox` provider 抽象
|
||||
|
||||
如果按这个方向做,它会明显提升 DeerFlow 在 coding / repo exploration 场景下的可用性,而且风险可控。
|
||||
@@ -41,6 +41,13 @@ summarization:
|
||||
|
||||
# Custom summary prompt (optional)
|
||||
summary_prompt: null
|
||||
|
||||
# Tool names treated as skill file reads for skill rescue
|
||||
skill_file_read_tool_names:
|
||||
- read_file
|
||||
- read
|
||||
- view
|
||||
- cat
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
@@ -125,6 +132,26 @@ keep:
|
||||
- **Default**: `null` (uses LangChain's default prompt)
|
||||
- **Description**: Custom prompt template for generating summaries. The prompt should guide the model to extract the most important context.
|
||||
|
||||
#### `preserve_recent_skill_count`
|
||||
- **Type**: Integer (≥ 0)
|
||||
- **Default**: `5`
|
||||
- **Description**: Number of most-recently-loaded skill files (tool results whose tool name is in `skill_file_read_tool_names` and whose target path is under `skills.container_path`, e.g. `/mnt/skills/...`) that are rescued from summarization. Prevents the agent from losing skill instructions after compression. Set to `0` to disable skill rescue entirely.
|
||||
|
||||
#### `preserve_recent_skill_tokens`
|
||||
- **Type**: Integer (≥ 0)
|
||||
- **Default**: `25000`
|
||||
- **Description**: Total token budget reserved for rescued skill reads. Once this budget is exhausted, older skill bundles are allowed to be summarized.
|
||||
|
||||
#### `preserve_recent_skill_tokens_per_skill`
|
||||
- **Type**: Integer (≥ 0)
|
||||
- **Default**: `5000`
|
||||
- **Description**: Per-skill token cap. Any individual skill read whose tool result exceeds this size is not rescued (it falls through to the summarizer like ordinary content).
|
||||
|
||||
#### `skill_file_read_tool_names`
|
||||
- **Type**: List of strings
|
||||
- **Default**: `["read_file", "read", "view", "cat"]`
|
||||
- **Description**: Tool names treated as skill file reads during summarization rescue. A tool call is only eligible for skill rescue when its name appears in this list and its target path is under `skills.container_path`.
|
||||
|
||||
**Default Prompt Behavior:**
|
||||
The default LangChain prompt instructs the model to:
|
||||
- Extract highest quality/most relevant context
|
||||
@@ -147,6 +174,7 @@ The default LangChain prompt instructs the model to:
|
||||
- A single summary message is added
|
||||
- Recent messages are preserved
|
||||
6. **AI/Tool Pair Protection**: The system ensures AI messages and their corresponding tool messages stay together
|
||||
7. **Skill Rescue**: Before the summary is generated, the most recently loaded skill files (tool results whose tool name is in `skill_file_read_tool_names` and whose target path is under `skills.container_path`) are lifted out of the summarization set and prepended to the preserved tail. Selection walks newest-first under three budgets: `preserve_recent_skill_count`, `preserve_recent_skill_tokens`, and `preserve_recent_skill_tokens_per_skill`. The triggering AIMessage and all of its paired ToolMessages move together so tool_call ↔ tool_result pairing stays intact.
|
||||
|
||||
### Token Counting
|
||||
|
||||
|
||||
@@ -8,6 +8,9 @@
|
||||
"graphs": {
|
||||
"lead_agent": "deerflow.agents:make_lead_agent"
|
||||
},
|
||||
"auth": {
|
||||
"path": "./app/gateway/langgraph_auth.py:auth"
|
||||
},
|
||||
"checkpointer": {
|
||||
"path": "./packages/harness/deerflow/agents/checkpointer/async_provider.py:make_checkpointer"
|
||||
}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
from .checkpointer import get_checkpointer, make_checkpointer, reset_checkpointer
|
||||
from .factory import create_deerflow_agent
|
||||
from .features import Next, Prev, RuntimeFeatures
|
||||
from .lead_agent import make_lead_agent
|
||||
from .lead_agent.prompt import prime_enabled_skills_cache
|
||||
from .thread_state import SandboxState, ThreadState
|
||||
|
||||
# LangGraph imports deerflow.agents when registering the graph. Prime the
|
||||
# enabled-skills cache here so the request path can usually read a warm cache
|
||||
# without forcing synchronous filesystem work during prompt module import.
|
||||
prime_enabled_skills_cache()
|
||||
|
||||
__all__ = [
|
||||
"create_deerflow_agent",
|
||||
"RuntimeFeatures",
|
||||
@@ -12,7 +17,4 @@ __all__ = [
|
||||
"make_lead_agent",
|
||||
"SandboxState",
|
||||
"ThreadState",
|
||||
"get_checkpointer",
|
||||
"reset_checkpointer",
|
||||
"make_checkpointer",
|
||||
]
|
||||
|
||||
@@ -1,31 +1,42 @@
|
||||
import logging
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain.agents.middleware import AgentMiddleware, SummarizationMiddleware
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.graph.state import CompiledStateGraph
|
||||
|
||||
from deerflow.agents.lead_agent.prompt import apply_prompt_template
|
||||
from deerflow.agents.memory.summarization_hook import memory_flush_hook
|
||||
from deerflow.agents.middlewares.clarification_middleware import ClarificationMiddleware
|
||||
from deerflow.agents.middlewares.loop_detection_middleware import LoopDetectionMiddleware
|
||||
from deerflow.agents.middlewares.memory_middleware import MemoryMiddleware
|
||||
from deerflow.agents.middlewares.subagent_limit_middleware import SubagentLimitMiddleware
|
||||
from deerflow.agents.middlewares.summarization_middleware import BeforeSummarizationHook, DeerFlowSummarizationMiddleware
|
||||
from deerflow.agents.middlewares.title_middleware import TitleMiddleware
|
||||
from deerflow.agents.middlewares.todo_middleware import TodoMiddleware
|
||||
from deerflow.agents.middlewares.token_usage_middleware import TokenUsageMiddleware
|
||||
from deerflow.agents.middlewares.tool_error_handling_middleware import build_lead_runtime_middlewares
|
||||
from deerflow.agents.middlewares.view_image_middleware import ViewImageMiddleware
|
||||
from deerflow.agents.thread_state import ThreadState
|
||||
from deerflow.config.agents_config import load_agent_config
|
||||
from deerflow.config.app_config import get_app_config
|
||||
from deerflow.config.summarization_config import get_summarization_config
|
||||
from deerflow.config.agents_config import load_agent_config, validate_agent_name
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.deer_flow_context import DeerFlowContext
|
||||
from deerflow.models import create_chat_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _resolve_model_name(requested_model_name: str | None = None) -> str:
|
||||
def _get_runtime_config(config: RunnableConfig) -> dict:
|
||||
"""Merge legacy configurable options with LangGraph runtime context."""
|
||||
cfg = dict(config.get("configurable", {}) or {})
|
||||
context = config.get("context", {}) or {}
|
||||
if isinstance(context, dict):
|
||||
cfg.update(context)
|
||||
return cfg
|
||||
|
||||
|
||||
def _resolve_model_name(app_config: AppConfig, requested_model_name: str | None = None) -> str:
|
||||
"""Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured."""
|
||||
app_config = get_app_config()
|
||||
default_model_name = app_config.models[0].name if app_config.models else None
|
||||
if default_model_name is None:
|
||||
raise ValueError("No chat models are configured. Please configure at least one model in config.yaml.")
|
||||
@@ -38,9 +49,9 @@ def _resolve_model_name(requested_model_name: str | None = None) -> str:
|
||||
return default_model_name
|
||||
|
||||
|
||||
def _create_summarization_middleware() -> SummarizationMiddleware | None:
|
||||
def _create_summarization_middleware(app_config: AppConfig) -> DeerFlowSummarizationMiddleware | None:
|
||||
"""Create and configure the summarization middleware from config."""
|
||||
config = get_summarization_config()
|
||||
config = app_config.summarization
|
||||
|
||||
if not config.enabled:
|
||||
return None
|
||||
@@ -56,13 +67,15 @@ def _create_summarization_middleware() -> SummarizationMiddleware | None:
|
||||
# Prepare keep parameter
|
||||
keep = config.keep.to_tuple()
|
||||
|
||||
# Prepare model parameter
|
||||
# Prepare model parameter.
|
||||
# Bind "middleware:summarize" tag so RunJournal identifies these LLM calls
|
||||
# as middleware rather than lead_agent (SummarizationMiddleware is a
|
||||
# LangChain built-in, so we tag the model at creation time).
|
||||
if config.model_name:
|
||||
model = create_chat_model(name=config.model_name, thinking_enabled=False)
|
||||
model = create_chat_model(name=config.model_name, thinking_enabled=False, app_config=app_config)
|
||||
else:
|
||||
# Use a lightweight model for summarization to save costs
|
||||
# Falls back to default model if not explicitly specified
|
||||
model = create_chat_model(thinking_enabled=False)
|
||||
model = create_chat_model(thinking_enabled=False, app_config=app_config)
|
||||
model = model.with_config(tags=["middleware:summarize"])
|
||||
|
||||
# Prepare kwargs
|
||||
kwargs = {
|
||||
@@ -77,7 +90,28 @@ def _create_summarization_middleware() -> SummarizationMiddleware | None:
|
||||
if config.summary_prompt is not None:
|
||||
kwargs["summary_prompt"] = config.summary_prompt
|
||||
|
||||
return SummarizationMiddleware(**kwargs)
|
||||
hooks: list[BeforeSummarizationHook] = []
|
||||
if app_config.memory.enabled:
|
||||
hooks.append(memory_flush_hook)
|
||||
|
||||
# The logic below relies on two assumptions holding true: this factory is
|
||||
# the sole entry point for DeerFlowSummarizationMiddleware, and the runtime
|
||||
# config is not expected to change after startup.
|
||||
try:
|
||||
skills_container_path = app_config.skills.container_path or "/mnt/skills"
|
||||
except Exception:
|
||||
logger.exception("Failed to resolve skills container path; falling back to default")
|
||||
skills_container_path = "/mnt/skills"
|
||||
|
||||
return DeerFlowSummarizationMiddleware(
|
||||
**kwargs,
|
||||
skills_container_path=skills_container_path,
|
||||
skill_file_read_tool_names=config.skill_file_read_tool_names,
|
||||
before_summarization=hooks,
|
||||
preserve_recent_skill_count=config.preserve_recent_skill_count,
|
||||
preserve_recent_skill_tokens=config.preserve_recent_skill_tokens,
|
||||
preserve_recent_skill_tokens_per_skill=config.preserve_recent_skill_tokens_per_skill,
|
||||
)
|
||||
|
||||
|
||||
def _create_todo_list_middleware(is_plan_mode: bool) -> TodoMiddleware | None:
|
||||
@@ -205,10 +239,18 @@ Being proactive with task management demonstrates thoroughness and ensures all r
|
||||
# ViewImageMiddleware should be before ClarificationMiddleware to inject image details before LLM
|
||||
# ToolErrorHandlingMiddleware should be before ClarificationMiddleware to convert tool exceptions to ToolMessages
|
||||
# ClarificationMiddleware should be last to intercept clarification requests after model calls
|
||||
def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_name: str | None = None, custom_middlewares: list[AgentMiddleware] | None = None):
|
||||
def _build_middlewares(
|
||||
app_config: AppConfig,
|
||||
config: RunnableConfig,
|
||||
*,
|
||||
model_name: str | None,
|
||||
agent_name: str | None = None,
|
||||
custom_middlewares: list[AgentMiddleware] | None = None,
|
||||
):
|
||||
"""Build middleware chain based on runtime configuration.
|
||||
|
||||
Args:
|
||||
app_config: Resolved application config.
|
||||
config: Runtime configuration containing configurable options like is_plan_mode.
|
||||
agent_name: If provided, MemoryMiddleware will use per-agent memory storage.
|
||||
custom_middlewares: Optional list of custom middlewares to inject into the chain.
|
||||
@@ -216,21 +258,22 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
Returns:
|
||||
List of middleware instances.
|
||||
"""
|
||||
middlewares = build_lead_runtime_middlewares(lazy_init=True)
|
||||
middlewares = build_lead_runtime_middlewares(app_config=app_config, lazy_init=True)
|
||||
|
||||
# Add summarization middleware if enabled
|
||||
summarization_middleware = _create_summarization_middleware()
|
||||
summarization_middleware = _create_summarization_middleware(app_config)
|
||||
if summarization_middleware is not None:
|
||||
middlewares.append(summarization_middleware)
|
||||
|
||||
# Add TodoList middleware if plan mode is enabled
|
||||
is_plan_mode = config.get("configurable", {}).get("is_plan_mode", False)
|
||||
cfg = _get_runtime_config(config)
|
||||
is_plan_mode = cfg.get("is_plan_mode", False)
|
||||
todo_list_middleware = _create_todo_list_middleware(is_plan_mode)
|
||||
if todo_list_middleware is not None:
|
||||
middlewares.append(todo_list_middleware)
|
||||
|
||||
# Add TokenUsageMiddleware when token_usage tracking is enabled
|
||||
if get_app_config().token_usage.enabled:
|
||||
if app_config.token_usage.enabled:
|
||||
middlewares.append(TokenUsageMiddleware())
|
||||
|
||||
# Add TitleMiddleware
|
||||
@@ -241,7 +284,6 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
|
||||
# Add ViewImageMiddleware only if the current model supports vision.
|
||||
# Use the resolved runtime model_name from make_lead_agent to avoid stale config values.
|
||||
app_config = get_app_config()
|
||||
model_config = app_config.get_model_config(model_name) if model_name else None
|
||||
if model_config is not None and model_config.supports_vision:
|
||||
middlewares.append(ViewImageMiddleware())
|
||||
@@ -253,9 +295,9 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
middlewares.append(DeferredToolFilterMiddleware())
|
||||
|
||||
# Add SubagentLimitMiddleware to truncate excess parallel task calls
|
||||
subagent_enabled = config.get("configurable", {}).get("subagent_enabled", False)
|
||||
subagent_enabled = cfg.get("subagent_enabled", False)
|
||||
if subagent_enabled:
|
||||
max_concurrent_subagents = config.get("configurable", {}).get("max_concurrent_subagents", 3)
|
||||
max_concurrent_subagents = cfg.get("max_concurrent_subagents", 3)
|
||||
middlewares.append(SubagentLimitMiddleware(max_concurrent=max_concurrent_subagents))
|
||||
|
||||
# LoopDetectionMiddleware — detect and break repetitive tool call loops
|
||||
@@ -270,12 +312,33 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
return middlewares
|
||||
|
||||
|
||||
def make_lead_agent(config: RunnableConfig):
|
||||
def make_lead_agent(
|
||||
config: RunnableConfig,
|
||||
app_config: AppConfig | None = None,
|
||||
) -> CompiledStateGraph:
|
||||
"""Build the lead agent from runtime config.
|
||||
|
||||
Args:
|
||||
config: LangGraph ``RunnableConfig`` carrying per-invocation options
|
||||
(``thinking_enabled``, ``model_name``, ``is_plan_mode``, etc.).
|
||||
app_config: Resolved application config. Required for in-process
|
||||
entry points (DeerFlowClient, Gateway Worker). When omitted we
|
||||
are being called via ``langgraph.json`` registration and reload
|
||||
from disk — the LangGraph Server bootstrap path has no other
|
||||
way to thread the value.
|
||||
"""
|
||||
# Lazy import to avoid circular dependency
|
||||
from deerflow.tools import get_available_tools
|
||||
from deerflow.tools.builtins import setup_agent
|
||||
|
||||
cfg = config.get("configurable", {})
|
||||
if app_config is None:
|
||||
# LangGraph Server registers ``make_lead_agent`` via ``langgraph.json``
|
||||
# and hands us only a ``RunnableConfig``. Reload config from disk
|
||||
# here — it's a pure function, equivalent to the process-global the
|
||||
# old code path would have read.
|
||||
app_config = AppConfig.from_file()
|
||||
|
||||
cfg = _get_runtime_config(config)
|
||||
|
||||
thinking_enabled = cfg.get("thinking_enabled", True)
|
||||
reasoning_effort = cfg.get("reasoning_effort", None)
|
||||
@@ -284,17 +347,16 @@ def make_lead_agent(config: RunnableConfig):
|
||||
subagent_enabled = cfg.get("subagent_enabled", False)
|
||||
max_concurrent_subagents = cfg.get("max_concurrent_subagents", 3)
|
||||
is_bootstrap = cfg.get("is_bootstrap", False)
|
||||
agent_name = cfg.get("agent_name")
|
||||
agent_name = validate_agent_name(cfg.get("agent_name"))
|
||||
|
||||
agent_config = load_agent_config(agent_name) if not is_bootstrap else None
|
||||
# Custom agent model or fallback to global/default model resolution
|
||||
agent_model_name = agent_config.model if agent_config and agent_config.model else _resolve_model_name()
|
||||
# Custom agent model from agent config (if any), or None to let _resolve_model_name pick the default
|
||||
agent_model_name = agent_config.model if agent_config and agent_config.model else None
|
||||
|
||||
# Final model name resolution with request override, then agent config, then global default
|
||||
model_name = requested_model_name or agent_model_name
|
||||
# Final model name resolution: request → agent config → global default, with fallback for unknown names
|
||||
model_name = _resolve_model_name(app_config, requested_model_name or agent_model_name)
|
||||
|
||||
app_config = get_app_config()
|
||||
model_config = app_config.get_model_config(model_name) if model_name else None
|
||||
model_config = app_config.get_model_config(model_name)
|
||||
|
||||
if model_config is None:
|
||||
raise ValueError("No chat model could be resolved. Please configure at least one model in config.yaml or provide a valid 'model_name'/'model' in the request.")
|
||||
@@ -325,24 +387,30 @@ def make_lead_agent(config: RunnableConfig):
|
||||
"reasoning_effort": reasoning_effort,
|
||||
"is_plan_mode": is_plan_mode,
|
||||
"subagent_enabled": subagent_enabled,
|
||||
"tool_groups": agent_config.tool_groups if agent_config else None,
|
||||
"available_skills": ["bootstrap"] if is_bootstrap else (agent_config.skills if agent_config and agent_config.skills is not None else None),
|
||||
}
|
||||
)
|
||||
|
||||
if is_bootstrap:
|
||||
# Special bootstrap agent with minimal prompt for initial custom agent creation flow
|
||||
return create_agent(
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled),
|
||||
tools=get_available_tools(model_name=model_name, subagent_enabled=subagent_enabled) + [setup_agent],
|
||||
middleware=_build_middlewares(config, model_name=model_name),
|
||||
system_prompt=apply_prompt_template(subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, available_skills=set(["bootstrap"])),
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled, app_config=app_config),
|
||||
tools=get_available_tools(model_name=model_name, subagent_enabled=subagent_enabled, app_config=app_config) + [setup_agent],
|
||||
middleware=_build_middlewares(app_config, config, model_name=model_name),
|
||||
system_prompt=apply_prompt_template(app_config, subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, available_skills=set(["bootstrap"])),
|
||||
state_schema=ThreadState,
|
||||
context_schema=DeerFlowContext,
|
||||
)
|
||||
|
||||
# Default lead agent (unchanged behavior)
|
||||
return create_agent(
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled, reasoning_effort=reasoning_effort),
|
||||
tools=get_available_tools(model_name=model_name, groups=agent_config.tool_groups if agent_config else None, subagent_enabled=subagent_enabled),
|
||||
middleware=_build_middlewares(config, model_name=model_name, agent_name=agent_name),
|
||||
system_prompt=apply_prompt_template(subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, agent_name=agent_name),
|
||||
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled, reasoning_effort=reasoning_effort, app_config=app_config),
|
||||
tools=get_available_tools(model_name=model_name, groups=agent_config.tool_groups if agent_config else None, subagent_enabled=subagent_enabled, app_config=app_config),
|
||||
middleware=_build_middlewares(app_config, config, model_name=model_name, agent_name=agent_name),
|
||||
system_prompt=apply_prompt_template(
|
||||
app_config, subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, agent_name=agent_name, available_skills=set(agent_config.skills) if agent_config and agent_config.skills is not None else None
|
||||
),
|
||||
state_schema=ThreadState,
|
||||
context_schema=DeerFlowContext,
|
||||
)
|
||||
|
||||
@@ -1,30 +1,218 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from functools import lru_cache
|
||||
|
||||
from deerflow.config.agents_config import load_agent_soul
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.skills import load_skills
|
||||
from deerflow.skills.types import Skill
|
||||
from deerflow.subagents import get_available_subagent_names
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_ENABLED_SKILLS_REFRESH_WAIT_TIMEOUT_SECONDS = 5.0
|
||||
_enabled_skills_lock = threading.Lock()
|
||||
_enabled_skills_cache: list[Skill] | None = None
|
||||
_enabled_skills_refresh_active = False
|
||||
_enabled_skills_refresh_version = 0
|
||||
_enabled_skills_refresh_event = threading.Event()
|
||||
|
||||
def _build_subagent_section(max_concurrent: int) -> str:
|
||||
|
||||
def _load_enabled_skills_sync(app_config: AppConfig | None) -> list[Skill]:
|
||||
return list(load_skills(app_config, enabled_only=True))
|
||||
|
||||
|
||||
def _start_enabled_skills_refresh_thread(app_config: AppConfig | None) -> None:
|
||||
threading.Thread(
|
||||
target=_refresh_enabled_skills_cache_worker,
|
||||
args=(app_config,),
|
||||
name="deerflow-enabled-skills-loader",
|
||||
daemon=True,
|
||||
).start()
|
||||
|
||||
|
||||
def _refresh_enabled_skills_cache_worker(app_config: AppConfig | None) -> None:
|
||||
global _enabled_skills_cache, _enabled_skills_refresh_active
|
||||
|
||||
while True:
|
||||
with _enabled_skills_lock:
|
||||
target_version = _enabled_skills_refresh_version
|
||||
|
||||
try:
|
||||
skills = _load_enabled_skills_sync(app_config)
|
||||
except (OSError, ImportError):
|
||||
logger.exception("Failed to load enabled skills for prompt injection")
|
||||
skills = []
|
||||
|
||||
with _enabled_skills_lock:
|
||||
if _enabled_skills_refresh_version == target_version:
|
||||
_enabled_skills_cache = skills
|
||||
_enabled_skills_refresh_active = False
|
||||
_enabled_skills_refresh_event.set()
|
||||
return
|
||||
|
||||
# A newer invalidation happened while loading. Keep the worker alive
|
||||
# and loop again so the cache always converges on the latest version.
|
||||
_enabled_skills_cache = None
|
||||
|
||||
|
||||
def _ensure_enabled_skills_cache(app_config: AppConfig | None) -> threading.Event:
|
||||
global _enabled_skills_refresh_active
|
||||
|
||||
with _enabled_skills_lock:
|
||||
if _enabled_skills_cache is not None:
|
||||
_enabled_skills_refresh_event.set()
|
||||
return _enabled_skills_refresh_event
|
||||
if _enabled_skills_refresh_active:
|
||||
return _enabled_skills_refresh_event
|
||||
_enabled_skills_refresh_active = True
|
||||
_enabled_skills_refresh_event.clear()
|
||||
|
||||
_start_enabled_skills_refresh_thread(app_config)
|
||||
return _enabled_skills_refresh_event
|
||||
|
||||
|
||||
def _invalidate_enabled_skills_cache(app_config: AppConfig | None) -> threading.Event:
|
||||
global _enabled_skills_cache, _enabled_skills_refresh_active, _enabled_skills_refresh_version
|
||||
|
||||
_get_cached_skills_prompt_section.cache_clear()
|
||||
with _enabled_skills_lock:
|
||||
_enabled_skills_cache = None
|
||||
_enabled_skills_refresh_version += 1
|
||||
_enabled_skills_refresh_event.clear()
|
||||
if _enabled_skills_refresh_active:
|
||||
return _enabled_skills_refresh_event
|
||||
_enabled_skills_refresh_active = True
|
||||
|
||||
_start_enabled_skills_refresh_thread(app_config)
|
||||
return _enabled_skills_refresh_event
|
||||
|
||||
|
||||
def prime_enabled_skills_cache(app_config: AppConfig | None = None) -> None:
|
||||
_ensure_enabled_skills_cache(app_config)
|
||||
|
||||
|
||||
def warm_enabled_skills_cache(app_config: AppConfig | None = None, timeout_seconds: float = _ENABLED_SKILLS_REFRESH_WAIT_TIMEOUT_SECONDS) -> bool:
|
||||
if _ensure_enabled_skills_cache(app_config).wait(timeout=timeout_seconds):
|
||||
return True
|
||||
|
||||
logger.warning("Timed out waiting %.1fs for enabled skills cache warm-up", timeout_seconds)
|
||||
return False
|
||||
|
||||
|
||||
def _get_enabled_skills(app_config: AppConfig | None = None):
|
||||
with _enabled_skills_lock:
|
||||
cached = _enabled_skills_cache
|
||||
|
||||
if cached is not None:
|
||||
return list(cached)
|
||||
|
||||
_ensure_enabled_skills_cache(app_config)
|
||||
return []
|
||||
|
||||
|
||||
def _skill_mutability_label(category: str) -> str:
|
||||
return "[custom, editable]" if category == "custom" else "[built-in]"
|
||||
|
||||
|
||||
def clear_skills_system_prompt_cache(app_config: AppConfig | None = None) -> None:
|
||||
_invalidate_enabled_skills_cache(app_config)
|
||||
|
||||
|
||||
async def refresh_skills_system_prompt_cache_async(app_config: AppConfig | None = None) -> None:
|
||||
await asyncio.to_thread(_invalidate_enabled_skills_cache(app_config).wait)
|
||||
|
||||
|
||||
def _reset_skills_system_prompt_cache_state() -> None:
|
||||
global _enabled_skills_cache, _enabled_skills_refresh_active, _enabled_skills_refresh_version
|
||||
|
||||
_get_cached_skills_prompt_section.cache_clear()
|
||||
with _enabled_skills_lock:
|
||||
_enabled_skills_cache = None
|
||||
_enabled_skills_refresh_active = False
|
||||
_enabled_skills_refresh_version = 0
|
||||
_enabled_skills_refresh_event.clear()
|
||||
|
||||
|
||||
def _refresh_enabled_skills_cache(app_config: AppConfig | None = None) -> None:
|
||||
"""Backward-compatible test helper for direct synchronous reload."""
|
||||
try:
|
||||
skills = _load_enabled_skills_sync(app_config)
|
||||
except Exception:
|
||||
logger.exception("Failed to load enabled skills for prompt injection")
|
||||
skills = []
|
||||
|
||||
with _enabled_skills_lock:
|
||||
_enabled_skills_cache = skills
|
||||
_enabled_skills_refresh_active = False
|
||||
_enabled_skills_refresh_event.set()
|
||||
|
||||
|
||||
def _build_skill_evolution_section(skill_evolution_enabled: bool) -> str:
|
||||
if not skill_evolution_enabled:
|
||||
return ""
|
||||
return """
|
||||
## Skill Self-Evolution
|
||||
After completing a task, consider creating or updating a skill when:
|
||||
- The task required 5+ tool calls to resolve
|
||||
- You overcame non-obvious errors or pitfalls
|
||||
- The user corrected your approach and the corrected version worked
|
||||
- You discovered a non-trivial, recurring workflow
|
||||
If you used a skill and encountered issues not covered by it, patch it immediately.
|
||||
Prefer patch over edit. Before creating a new skill, confirm with the user first.
|
||||
Skip simple one-off tasks.
|
||||
"""
|
||||
|
||||
|
||||
def _build_available_subagents_description(available_names: list[str], bash_available: bool, app_config: AppConfig) -> str:
|
||||
"""Dynamically build subagent type descriptions from registry.
|
||||
|
||||
Mirrors Codex's pattern where agent_type_description is dynamically generated
|
||||
from all registered roles, so the LLM knows about every available type.
|
||||
"""
|
||||
# Built-in descriptions (kept for backward compatibility with existing prompt quality)
|
||||
builtin_descriptions = {
|
||||
"general-purpose": "For ANY non-trivial task - web research, code exploration, file operations, analysis, etc.",
|
||||
"bash": (
|
||||
"For command execution (git, build, test, deploy operations)" if bash_available else "Not available in the current sandbox configuration. Use direct file/web tools or switch to AioSandboxProvider for isolated shell access."
|
||||
),
|
||||
}
|
||||
|
||||
# Lazy import moved outside loop to avoid repeated import overhead
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
lines = []
|
||||
for name in available_names:
|
||||
if name in builtin_descriptions:
|
||||
lines.append(f"- **{name}**: {builtin_descriptions[name]}")
|
||||
else:
|
||||
config = get_subagent_config(name, app_config)
|
||||
if config is not None:
|
||||
desc = config.description.split("\n")[0].strip() # First line only for brevity
|
||||
lines.append(f"- **{name}**: {desc}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _build_subagent_section(max_concurrent: int, app_config: AppConfig) -> str:
|
||||
"""Build the subagent system prompt section with dynamic concurrency limit.
|
||||
|
||||
Args:
|
||||
max_concurrent: Maximum number of concurrent subagent calls allowed per response.
|
||||
app_config: Application config used to gate bash availability.
|
||||
|
||||
Returns:
|
||||
Formatted subagent section string.
|
||||
"""
|
||||
n = max_concurrent
|
||||
bash_available = "bash" in get_available_subagent_names()
|
||||
available_subagents = (
|
||||
"- **general-purpose**: For ANY non-trivial task - web research, code exploration, file operations, analysis, etc.\n- **bash**: For command execution (git, build, test, deploy operations)"
|
||||
if bash_available
|
||||
else "- **general-purpose**: For ANY non-trivial task - web research, code exploration, file operations, analysis, etc.\n"
|
||||
"- **bash**: Not available in the current sandbox configuration. Use direct file/web tools or switch to AioSandboxProvider for isolated shell access."
|
||||
)
|
||||
available_names = get_available_subagent_names(app_config)
|
||||
bash_available = "bash" in available_names
|
||||
|
||||
# Dynamically build subagent type descriptions from registry (aligned with Codex's
|
||||
# agent_type_description pattern where all registered roles are listed in the tool spec).
|
||||
available_subagents = _build_available_subagents_description(available_names, bash_available, app_config)
|
||||
direct_tool_examples = "bash, ls, read_file, web_search, etc." if bash_available else "ls, read_file, web_search, etc."
|
||||
direct_execution_example = (
|
||||
'# User asks: "Run the tests"\n# Thinking: Cannot decompose into parallel sub-tasks\n# → Execute directly\n\nbash("npm test") # Direct execution, not task()'
|
||||
@@ -261,7 +449,10 @@ You: "Deploying to staging..." [proceed]
|
||||
- Use `read_file` tool to read uploaded files using their paths from the list
|
||||
- For PDF, PPT, Excel, and Word files, converted Markdown versions (*.md) are available alongside originals
|
||||
- All temporary work happens in `/mnt/user-data/workspace`
|
||||
- Final deliverables must be copied to `/mnt/user-data/outputs` and presented using `present_file` tool
|
||||
- Treat `/mnt/user-data/workspace` as your default current working directory for coding and file-editing tasks
|
||||
- When writing scripts or commands that create/read files from the workspace, prefer relative paths such as `hello.txt`, `../uploads/data.csv`, and `../outputs/report.md`
|
||||
- Avoid hardcoding `/mnt/user-data/...` inside generated scripts when a relative path from the workspace is enough
|
||||
- Final deliverables must be copied to `/mnt/user-data/outputs` and presented using `present_files` tool
|
||||
{acp_section}
|
||||
</working_directory>
|
||||
|
||||
@@ -348,65 +539,51 @@ combined with a FastAPI gateway for REST API access [citation:FastAPI](https://f
|
||||
"""
|
||||
|
||||
|
||||
def _get_memory_context(agent_name: str | None = None) -> str:
|
||||
def _get_memory_context(app_config: AppConfig, agent_name: str | None = None) -> str:
|
||||
"""Get memory context for injection into system prompt.
|
||||
|
||||
Args:
|
||||
agent_name: If provided, loads per-agent memory. If None, loads global memory.
|
||||
|
||||
Returns:
|
||||
Formatted memory context string wrapped in XML tags, or empty string if disabled.
|
||||
Returns an empty string when memory is disabled or the stored memory file
|
||||
cannot be read/parsed. A corrupt memory.json degrades the prompt to
|
||||
no-memory; it never kills the agent.
|
||||
"""
|
||||
from deerflow.agents.memory import format_memory_for_injection, get_memory_data
|
||||
from deerflow.runtime.user_context import get_effective_user_id
|
||||
|
||||
memory_config = app_config.memory
|
||||
if not memory_config.enabled or not memory_config.injection_enabled:
|
||||
return ""
|
||||
|
||||
try:
|
||||
from deerflow.agents.memory import format_memory_for_injection, get_memory_data
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
memory_data = get_memory_data(memory_config, agent_name, user_id=get_effective_user_id())
|
||||
except (OSError, ValueError, UnicodeDecodeError):
|
||||
logger.exception("Failed to load memory data for prompt injection")
|
||||
return ""
|
||||
|
||||
config = get_memory_config()
|
||||
if not config.enabled or not config.injection_enabled:
|
||||
return ""
|
||||
memory_content = format_memory_for_injection(memory_data, max_tokens=memory_config.max_injection_tokens)
|
||||
if not memory_content.strip():
|
||||
return ""
|
||||
|
||||
memory_data = get_memory_data(agent_name)
|
||||
memory_content = format_memory_for_injection(memory_data, max_tokens=config.max_injection_tokens)
|
||||
|
||||
if not memory_content.strip():
|
||||
return ""
|
||||
|
||||
return f"""<memory>
|
||||
return f"""<memory>
|
||||
{memory_content}
|
||||
</memory>
|
||||
"""
|
||||
except Exception as e:
|
||||
logger.error("Failed to load memory context: %s", e)
|
||||
return ""
|
||||
|
||||
|
||||
def get_skills_prompt_section(available_skills: set[str] | None = None) -> str:
|
||||
"""Generate the skills prompt section with available skills list.
|
||||
|
||||
Returns the <skill_system>...</skill_system> block listing all enabled skills,
|
||||
suitable for injection into any agent's system prompt.
|
||||
"""
|
||||
skills = load_skills(enabled_only=True)
|
||||
|
||||
try:
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
config = get_app_config()
|
||||
container_base_path = config.skills.container_path
|
||||
except Exception:
|
||||
container_base_path = "/mnt/skills"
|
||||
|
||||
if not skills:
|
||||
return ""
|
||||
|
||||
if available_skills is not None:
|
||||
skills = [skill for skill in skills if skill.name in available_skills]
|
||||
|
||||
skill_items = "\n".join(
|
||||
f" <skill>\n <name>{skill.name}</name>\n <description>{skill.description}</description>\n <location>{skill.get_container_file_path(container_base_path)}</location>\n </skill>" for skill in skills
|
||||
)
|
||||
skills_list = f"<available_skills>\n{skill_items}\n</available_skills>"
|
||||
|
||||
@lru_cache(maxsize=32)
|
||||
def _get_cached_skills_prompt_section(
|
||||
skill_signature: tuple[tuple[str, str, str, str], ...],
|
||||
available_skills_key: tuple[str, ...] | None,
|
||||
container_base_path: str,
|
||||
skill_evolution_section: str,
|
||||
) -> str:
|
||||
filtered = [(name, description, category, location) for name, description, category, location in skill_signature if available_skills_key is None or name in available_skills_key]
|
||||
skills_list = ""
|
||||
if filtered:
|
||||
skill_items = "\n".join(
|
||||
f" <skill>\n <name>{name}</name>\n <description>{description} {_skill_mutability_label(category)}</description>\n <location>{location}</location>\n </skill>"
|
||||
for name, description, category, location in filtered
|
||||
)
|
||||
skills_list = f"<available_skills>\n{skill_items}\n</available_skills>"
|
||||
return f"""<skill_system>
|
||||
You have access to skills that provide optimized workflows for specific tasks. Each skill contains best practices, frameworks, and references to additional resources.
|
||||
|
||||
@@ -418,12 +595,33 @@ You have access to skills that provide optimized workflows for specific tasks. E
|
||||
5. Follow the skill's instructions precisely
|
||||
|
||||
**Skills are located at:** {container_base_path}
|
||||
|
||||
{skill_evolution_section}
|
||||
{skills_list}
|
||||
|
||||
</skill_system>"""
|
||||
|
||||
|
||||
def get_skills_prompt_section(app_config: AppConfig, available_skills: set[str] | None = None) -> str:
|
||||
"""Generate the skills prompt section with available skills list."""
|
||||
skills = _get_enabled_skills(app_config)
|
||||
|
||||
container_base_path = app_config.skills.container_path
|
||||
skill_evolution_enabled = app_config.skill_evolution.enabled
|
||||
|
||||
if not skills and not skill_evolution_enabled:
|
||||
return ""
|
||||
|
||||
if available_skills is not None and not any(skill.name in available_skills for skill in skills):
|
||||
return ""
|
||||
|
||||
skill_signature = tuple((skill.name, skill.description, skill.category, skill.get_container_file_path(container_base_path)) for skill in skills)
|
||||
available_key = tuple(sorted(available_skills)) if available_skills is not None else None
|
||||
if not skill_signature and available_key is not None:
|
||||
return ""
|
||||
skill_evolution_section = _build_skill_evolution_section(skill_evolution_enabled)
|
||||
return _get_cached_skills_prompt_section(skill_signature, available_key, container_base_path, skill_evolution_section)
|
||||
|
||||
|
||||
def get_agent_soul(agent_name: str | None) -> str:
|
||||
# Append SOUL.md (agent personality) if present
|
||||
soul = load_agent_soul(agent_name)
|
||||
@@ -432,7 +630,7 @@ def get_agent_soul(agent_name: str | None) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
def get_deferred_tools_prompt_section() -> str:
|
||||
def get_deferred_tools_prompt_section(app_config: AppConfig) -> str:
|
||||
"""Generate <available-deferred-tools> block for the system prompt.
|
||||
|
||||
Lists only deferred tool names so the agent knows what exists
|
||||
@@ -441,12 +639,7 @@ def get_deferred_tools_prompt_section() -> str:
|
||||
"""
|
||||
from deerflow.tools.builtins.tool_search import get_deferred_registry
|
||||
|
||||
try:
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
if not get_app_config().tool_search.enabled:
|
||||
return ""
|
||||
except FileNotFoundError:
|
||||
if not app_config.tool_search.enabled:
|
||||
return ""
|
||||
|
||||
registry = get_deferred_registry()
|
||||
@@ -457,15 +650,9 @@ def get_deferred_tools_prompt_section() -> str:
|
||||
return f"<available-deferred-tools>\n{names}\n</available-deferred-tools>"
|
||||
|
||||
|
||||
def _build_acp_section() -> str:
|
||||
def _build_acp_section(app_config: AppConfig) -> str:
|
||||
"""Build the ACP agent prompt section, only if ACP agents are configured."""
|
||||
try:
|
||||
from deerflow.config.acp_config import get_acp_agents
|
||||
|
||||
agents = get_acp_agents()
|
||||
if not agents:
|
||||
return ""
|
||||
except Exception:
|
||||
if not app_config.acp_agents:
|
||||
return ""
|
||||
|
||||
return (
|
||||
@@ -473,17 +660,40 @@ def _build_acp_section() -> str:
|
||||
"- ACP agents (e.g. codex, claude_code) run in their own independent workspace — NOT in `/mnt/user-data/`\n"
|
||||
"- When writing prompts for ACP agents, describe the task only — do NOT reference `/mnt/user-data` paths\n"
|
||||
"- ACP agent results are accessible at `/mnt/acp-workspace/` (read-only) — use `ls`, `read_file`, or `bash cp` to retrieve output files\n"
|
||||
"- To deliver ACP output to the user: copy from `/mnt/acp-workspace/<file>` to `/mnt/user-data/outputs/<file>`, then use `present_file`"
|
||||
"- To deliver ACP output to the user: copy from `/mnt/acp-workspace/<file>` to `/mnt/user-data/outputs/<file>`, then use `present_files`"
|
||||
)
|
||||
|
||||
|
||||
def apply_prompt_template(subagent_enabled: bool = False, max_concurrent_subagents: int = 3, *, agent_name: str | None = None, available_skills: set[str] | None = None) -> str:
|
||||
def _build_custom_mounts_section(app_config: AppConfig) -> str:
|
||||
"""Build a prompt section for explicitly configured sandbox mounts."""
|
||||
mounts = app_config.sandbox.mounts or []
|
||||
|
||||
if not mounts:
|
||||
return ""
|
||||
|
||||
lines = []
|
||||
for mount in mounts:
|
||||
access = "read-only" if mount.read_only else "read-write"
|
||||
lines.append(f"- Custom mount: `{mount.container_path}` - Host directory mapped into the sandbox ({access})")
|
||||
|
||||
mounts_list = "\n".join(lines)
|
||||
return f"\n**Custom Mounted Directories:**\n{mounts_list}\n- If the user needs files outside `/mnt/user-data`, use these absolute container paths directly when they match the requested directory"
|
||||
|
||||
|
||||
def apply_prompt_template(
|
||||
app_config: AppConfig,
|
||||
subagent_enabled: bool = False,
|
||||
max_concurrent_subagents: int = 3,
|
||||
*,
|
||||
agent_name: str | None = None,
|
||||
available_skills: set[str] | None = None,
|
||||
) -> str:
|
||||
# Get memory context
|
||||
memory_context = _get_memory_context(agent_name)
|
||||
memory_context = _get_memory_context(app_config, agent_name)
|
||||
|
||||
# Include subagent section only if enabled (from runtime parameter)
|
||||
n = max_concurrent_subagents
|
||||
subagent_section = _build_subagent_section(n) if subagent_enabled else ""
|
||||
subagent_section = _build_subagent_section(n, app_config) if subagent_enabled else ""
|
||||
|
||||
# Add subagent reminder to critical_reminders if enabled
|
||||
subagent_reminder = (
|
||||
@@ -504,13 +714,15 @@ def apply_prompt_template(subagent_enabled: bool = False, max_concurrent_subagen
|
||||
)
|
||||
|
||||
# Get skills section
|
||||
skills_section = get_skills_prompt_section(available_skills)
|
||||
skills_section = get_skills_prompt_section(app_config, available_skills)
|
||||
|
||||
# Get deferred tools section (tool_search)
|
||||
deferred_tools_section = get_deferred_tools_prompt_section()
|
||||
deferred_tools_section = get_deferred_tools_prompt_section(app_config)
|
||||
|
||||
# Build ACP agent section only if ACP agents are configured
|
||||
acp_section = _build_acp_section()
|
||||
acp_section = _build_acp_section(app_config)
|
||||
custom_mounts_section = _build_custom_mounts_section(app_config)
|
||||
acp_and_mounts_section = "\n".join(section for section in (acp_section, custom_mounts_section) if section)
|
||||
|
||||
# Format the prompt with dynamic skills and memory
|
||||
prompt = SYSTEM_PROMPT_TEMPLATE.format(
|
||||
@@ -522,7 +734,7 @@ def apply_prompt_template(subagent_enabled: bool = False, max_concurrent_subagen
|
||||
subagent_section=subagent_section,
|
||||
subagent_reminder=subagent_reminder,
|
||||
subagent_thinking=subagent_thinking,
|
||||
acp_section=acp_section,
|
||||
acp_section=acp_and_mounts_section,
|
||||
)
|
||||
|
||||
return prompt + f"\n<current_date>{datetime.now().strftime('%Y-%m-%d, %A')}</current_date>"
|
||||
|
||||
@@ -0,0 +1,109 @@
|
||||
"""Shared helpers for turning conversations into memory update inputs."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from copy import copy
|
||||
from typing import Any
|
||||
|
||||
_UPLOAD_BLOCK_RE = re.compile(r"<uploaded_files>[\s\S]*?</uploaded_files>\n*", re.IGNORECASE)
|
||||
_CORRECTION_PATTERNS = (
|
||||
re.compile(r"\bthat(?:'s| is) (?:wrong|incorrect)\b", re.IGNORECASE),
|
||||
re.compile(r"\byou misunderstood\b", re.IGNORECASE),
|
||||
re.compile(r"\btry again\b", re.IGNORECASE),
|
||||
re.compile(r"\bredo\b", re.IGNORECASE),
|
||||
re.compile(r"不对"),
|
||||
re.compile(r"你理解错了"),
|
||||
re.compile(r"你理解有误"),
|
||||
re.compile(r"重试"),
|
||||
re.compile(r"重新来"),
|
||||
re.compile(r"换一种"),
|
||||
re.compile(r"改用"),
|
||||
)
|
||||
_REINFORCEMENT_PATTERNS = (
|
||||
re.compile(r"\byes[,.]?\s+(?:exactly|perfect|that(?:'s| is) (?:right|correct|it))\b", re.IGNORECASE),
|
||||
re.compile(r"\bperfect(?:[.!?]|$)", re.IGNORECASE),
|
||||
re.compile(r"\bexactly\s+(?:right|correct)\b", re.IGNORECASE),
|
||||
re.compile(r"\bthat(?:'s| is)\s+(?:exactly\s+)?(?:right|correct|what i (?:wanted|needed|meant))\b", re.IGNORECASE),
|
||||
re.compile(r"\bkeep\s+(?:doing\s+)?that\b", re.IGNORECASE),
|
||||
re.compile(r"\bjust\s+(?:like\s+)?(?:that|this)\b", re.IGNORECASE),
|
||||
re.compile(r"\bthis is (?:great|helpful)\b(?:[.!?]|$)", re.IGNORECASE),
|
||||
re.compile(r"\bthis is what i wanted\b(?:[.!?]|$)", re.IGNORECASE),
|
||||
re.compile(r"对[,,]?\s*就是这样(?:[。!?!?.]|$)"),
|
||||
re.compile(r"完全正确(?:[。!?!?.]|$)"),
|
||||
re.compile(r"(?:对[,,]?\s*)?就是这个意思(?:[。!?!?.]|$)"),
|
||||
re.compile(r"正是我想要的(?:[。!?!?.]|$)"),
|
||||
re.compile(r"继续保持(?:[。!?!?.]|$)"),
|
||||
)
|
||||
|
||||
|
||||
def extract_message_text(message: Any) -> str:
|
||||
"""Extract plain text from message content for filtering and signal detection."""
|
||||
content = getattr(message, "content", "")
|
||||
if isinstance(content, list):
|
||||
text_parts: list[str] = []
|
||||
for part in content:
|
||||
if isinstance(part, str):
|
||||
text_parts.append(part)
|
||||
elif isinstance(part, dict):
|
||||
text_val = part.get("text")
|
||||
if isinstance(text_val, str):
|
||||
text_parts.append(text_val)
|
||||
return " ".join(text_parts)
|
||||
return str(content)
|
||||
|
||||
|
||||
def filter_messages_for_memory(messages: list[Any]) -> list[Any]:
|
||||
"""Keep only user inputs and final assistant responses for memory updates."""
|
||||
filtered = []
|
||||
skip_next_ai = False
|
||||
for msg in messages:
|
||||
msg_type = getattr(msg, "type", None)
|
||||
|
||||
if msg_type == "human":
|
||||
content_str = extract_message_text(msg)
|
||||
if "<uploaded_files>" in content_str:
|
||||
stripped = _UPLOAD_BLOCK_RE.sub("", content_str).strip()
|
||||
if not stripped:
|
||||
skip_next_ai = True
|
||||
continue
|
||||
clean_msg = copy(msg)
|
||||
clean_msg.content = stripped
|
||||
filtered.append(clean_msg)
|
||||
skip_next_ai = False
|
||||
else:
|
||||
filtered.append(msg)
|
||||
skip_next_ai = False
|
||||
elif msg_type == "ai":
|
||||
tool_calls = getattr(msg, "tool_calls", None)
|
||||
if not tool_calls:
|
||||
if skip_next_ai:
|
||||
skip_next_ai = False
|
||||
continue
|
||||
filtered.append(msg)
|
||||
|
||||
return filtered
|
||||
|
||||
|
||||
def detect_correction(messages: list[Any]) -> bool:
|
||||
"""Detect explicit user corrections in recent conversation turns."""
|
||||
recent_user_msgs = [msg for msg in messages[-6:] if getattr(msg, "type", None) == "human"]
|
||||
|
||||
for msg in recent_user_msgs:
|
||||
content = extract_message_text(msg).strip()
|
||||
if content and any(pattern.search(content) for pattern in _CORRECTION_PATTERNS):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def detect_reinforcement(messages: list[Any]) -> bool:
|
||||
"""Detect explicit positive reinforcement signals in recent conversation turns."""
|
||||
recent_user_msgs = [msg for msg in messages[-6:] if getattr(msg, "type", None) == "human"]
|
||||
|
||||
for msg in recent_user_msgs:
|
||||
content = extract_message_text(msg).strip()
|
||||
if content and any(pattern.search(content) for pattern in _REINFORCEMENT_PATTERNS):
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -29,6 +29,17 @@ Instructions:
|
||||
2. Extract relevant facts, preferences, and context with specific details (numbers, names, technologies)
|
||||
3. Update the memory sections as needed following the detailed length guidelines below
|
||||
|
||||
Before extracting facts, perform a structured reflection on the conversation:
|
||||
1. Error/Retry Detection: Did the agent encounter errors, require retries, or produce incorrect results?
|
||||
If yes, record the root cause and correct approach as a high-confidence fact with category "correction".
|
||||
2. User Correction Detection: Did the user correct the agent's direction, understanding, or output?
|
||||
If yes, record the correct interpretation or approach as a high-confidence fact with category "correction".
|
||||
Include what went wrong in "sourceError" only when category is "correction" and the mistake is explicit in the conversation.
|
||||
3. Project Constraint Discovery: Were any project-specific constraints discovered during the conversation?
|
||||
If yes, record them as facts with the most appropriate category and confidence.
|
||||
|
||||
{correction_hint}
|
||||
|
||||
Memory Section Guidelines:
|
||||
|
||||
**User Context** (Current state - concise summaries):
|
||||
@@ -62,6 +73,7 @@ Memory Section Guidelines:
|
||||
* context: Background facts (job title, projects, locations, languages)
|
||||
* behavior: Working patterns, communication habits, problem-solving approaches
|
||||
* goal: Stated objectives, learning targets, project ambitions
|
||||
* correction: Explicit agent mistakes or user corrections, including the correct approach
|
||||
- Confidence levels:
|
||||
* 0.9-1.0: Explicitly stated facts ("I work on X", "My role is Y")
|
||||
* 0.7-0.8: Strongly implied from actions/discussions
|
||||
@@ -94,7 +106,7 @@ Output Format (JSON):
|
||||
"longTermBackground": {{ "summary": "...", "shouldUpdate": true/false }}
|
||||
}},
|
||||
"newFacts": [
|
||||
{{ "content": "...", "category": "preference|knowledge|context|behavior|goal", "confidence": 0.0-1.0 }}
|
||||
{{ "content": "...", "category": "preference|knowledge|context|behavior|goal|correction", "confidence": 0.0-1.0 }}
|
||||
],
|
||||
"factsToRemove": ["fact_id_1", "fact_id_2"]
|
||||
}}
|
||||
@@ -104,6 +116,8 @@ Important Rules:
|
||||
- Follow length guidelines: workContext/personalContext are concise (1-3 sentences), topOfMind and history sections are detailed (paragraphs)
|
||||
- Include specific metrics, version numbers, and proper nouns in facts
|
||||
- Only add facts that are clearly stated (0.9+) or strongly implied (0.7+)
|
||||
- Use category "correction" for explicit agent mistakes or user corrections; assign confidence >= 0.95 when the correction is explicit
|
||||
- Include "sourceError" only for explicit correction facts when the prior mistake or wrong approach is clearly stated; omit it otherwise
|
||||
- Remove facts that are contradicted by new information
|
||||
- When updating topOfMind, integrate new focus areas while removing completed/abandoned ones
|
||||
Keep 3-5 concurrent focus themes that are still active and relevant
|
||||
@@ -126,7 +140,7 @@ Message:
|
||||
Extract facts in this JSON format:
|
||||
{{
|
||||
"facts": [
|
||||
{{ "content": "...", "category": "preference|knowledge|context|behavior|goal", "confidence": 0.0-1.0 }}
|
||||
{{ "content": "...", "category": "preference|knowledge|context|behavior|goal|correction", "confidence": 0.0-1.0 }}
|
||||
]
|
||||
}}
|
||||
|
||||
@@ -136,6 +150,7 @@ Categories:
|
||||
- context: Background context (location, job, projects)
|
||||
- behavior: Behavioral patterns
|
||||
- goal: User's goals or objectives
|
||||
- correction: Explicit corrections or mistakes to avoid repeating
|
||||
|
||||
Rules:
|
||||
- Only extract clear, specific facts
|
||||
@@ -231,6 +246,10 @@ def format_memory_for_injection(memory_data: dict[str, Any], max_tokens: int = 2
|
||||
if earlier.get("summary"):
|
||||
history_sections.append(f"Earlier: {earlier['summary']}")
|
||||
|
||||
background = history_data.get("longTermBackground", {})
|
||||
if background.get("summary"):
|
||||
history_sections.append(f"Background: {background['summary']}")
|
||||
|
||||
if history_sections:
|
||||
sections.append("History:\n" + "\n".join(f"- {s}" for s in history_sections))
|
||||
|
||||
@@ -262,7 +281,11 @@ def format_memory_for_injection(memory_data: dict[str, Any], max_tokens: int = 2
|
||||
continue
|
||||
category = str(fact.get("category", "context")).strip() or "context"
|
||||
confidence = _coerce_confidence(fact.get("confidence"), default=0.0)
|
||||
line = f"- [{category} | {confidence:.2f}] {content}"
|
||||
source_error = fact.get("sourceError")
|
||||
if category == "correction" and isinstance(source_error, str) and source_error.strip():
|
||||
line = f"- [{category} | {confidence:.2f}] {content} (avoid: {source_error.strip()})"
|
||||
else:
|
||||
line = f"- [{category} | {confidence:.2f}] {content}"
|
||||
|
||||
# Each additional line is preceded by a newline (except the first).
|
||||
line_text = ("\n" + line) if fact_lines else line
|
||||
|
||||
@@ -4,22 +4,31 @@ import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Module-level config pointer set by the middleware that owns the queue.
|
||||
# The queue runs on a background Timer thread where ``Runtime`` and FastAPI
|
||||
# request context are not accessible; the enqueuer (which does have runtime
|
||||
# context) is responsible for plumbing ``AppConfig`` through ``add()``.
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConversationContext:
|
||||
"""Context for a conversation to be processed for memory update."""
|
||||
|
||||
thread_id: str
|
||||
messages: list[Any]
|
||||
timestamp: datetime = field(default_factory=datetime.utcnow)
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
|
||||
agent_name: str | None = None
|
||||
user_id: str | None = None
|
||||
correction_detected: bool = False
|
||||
reinforcement_detected: bool = False
|
||||
|
||||
|
||||
class MemoryUpdateQueue:
|
||||
@@ -28,62 +37,128 @@ class MemoryUpdateQueue:
|
||||
This queue collects conversation contexts and processes them after
|
||||
a configurable debounce period. Multiple conversations received within
|
||||
the debounce window are batched together.
|
||||
|
||||
The queue captures an ``AppConfig`` reference at construction time and
|
||||
reuses it for the MemoryUpdater it spawns. Callers must construct a
|
||||
fresh queue when the config changes rather than reaching into a global.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the memory update queue."""
|
||||
def __init__(self, app_config: AppConfig):
|
||||
"""Initialize the memory update queue.
|
||||
|
||||
Args:
|
||||
app_config: Application config. The queue reads its own
|
||||
``memory`` section for debounce timing and hands the full
|
||||
config to :class:`MemoryUpdater`.
|
||||
"""
|
||||
self._app_config = app_config
|
||||
self._queue: list[ConversationContext] = []
|
||||
self._lock = threading.Lock()
|
||||
self._timer: threading.Timer | None = None
|
||||
self._processing = False
|
||||
|
||||
def add(self, thread_id: str, messages: list[Any], agent_name: str | None = None) -> None:
|
||||
"""Add a conversation to the update queue.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID.
|
||||
messages: The conversation messages.
|
||||
agent_name: If provided, memory is stored per-agent. If None, uses global memory.
|
||||
"""
|
||||
config = get_memory_config()
|
||||
def add(
|
||||
self,
|
||||
thread_id: str,
|
||||
messages: list[Any],
|
||||
agent_name: str | None = None,
|
||||
user_id: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
) -> None:
|
||||
"""Add a conversation to the update queue."""
|
||||
config = self._app_config.memory
|
||||
if not config.enabled:
|
||||
return
|
||||
|
||||
context = ConversationContext(
|
||||
thread_id=thread_id,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
# Check if this thread already has a pending update
|
||||
# If so, replace it with the newer one
|
||||
self._queue = [c for c in self._queue if c.thread_id != thread_id]
|
||||
self._queue.append(context)
|
||||
|
||||
# Reset or start the debounce timer
|
||||
self._enqueue_locked(
|
||||
thread_id=thread_id,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
user_id=user_id,
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
)
|
||||
self._reset_timer()
|
||||
|
||||
logger.info("Memory update queued for thread %s, queue size: %d", thread_id, len(self._queue))
|
||||
|
||||
def add_nowait(
|
||||
self,
|
||||
thread_id: str,
|
||||
messages: list[Any],
|
||||
agent_name: str | None = None,
|
||||
user_id: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
) -> None:
|
||||
"""Add a conversation and start processing immediately in the background."""
|
||||
config = self._app_config.memory
|
||||
if not config.enabled:
|
||||
return
|
||||
|
||||
with self._lock:
|
||||
self._enqueue_locked(
|
||||
thread_id=thread_id,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
user_id=user_id,
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
)
|
||||
self._schedule_timer(0)
|
||||
|
||||
logger.info("Memory update queued for immediate processing on thread %s, queue size: %d", thread_id, len(self._queue))
|
||||
|
||||
def _enqueue_locked(
|
||||
self,
|
||||
*,
|
||||
thread_id: str,
|
||||
messages: list[Any],
|
||||
agent_name: str | None,
|
||||
user_id: str | None = None,
|
||||
correction_detected: bool,
|
||||
reinforcement_detected: bool,
|
||||
) -> None:
|
||||
existing_context = next(
|
||||
(context for context in self._queue if context.thread_id == thread_id),
|
||||
None,
|
||||
)
|
||||
merged_correction_detected = correction_detected or (existing_context.correction_detected if existing_context is not None else False)
|
||||
merged_reinforcement_detected = reinforcement_detected or (existing_context.reinforcement_detected if existing_context is not None else False)
|
||||
context = ConversationContext(
|
||||
thread_id=thread_id,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
user_id=user_id,
|
||||
correction_detected=merged_correction_detected,
|
||||
reinforcement_detected=merged_reinforcement_detected,
|
||||
)
|
||||
|
||||
self._queue = [c for c in self._queue if c.thread_id != thread_id]
|
||||
self._queue.append(context)
|
||||
|
||||
def _reset_timer(self) -> None:
|
||||
"""Reset the debounce timer."""
|
||||
config = get_memory_config()
|
||||
config = self._app_config.memory
|
||||
self._schedule_timer(config.debounce_seconds)
|
||||
|
||||
logger.debug("Memory update timer set for %ss", config.debounce_seconds)
|
||||
|
||||
def _schedule_timer(self, delay_seconds: float) -> None:
|
||||
"""Schedule queue processing after the provided delay."""
|
||||
# Cancel existing timer if any
|
||||
if self._timer is not None:
|
||||
self._timer.cancel()
|
||||
|
||||
# Start new timer
|
||||
self._timer = threading.Timer(
|
||||
config.debounce_seconds,
|
||||
delay_seconds,
|
||||
self._process_queue,
|
||||
)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
logger.debug("Memory update timer set for %ss", config.debounce_seconds)
|
||||
|
||||
def _process_queue(self) -> None:
|
||||
"""Process all queued conversation contexts."""
|
||||
# Import here to avoid circular dependency
|
||||
@@ -91,8 +166,8 @@ class MemoryUpdateQueue:
|
||||
|
||||
with self._lock:
|
||||
if self._processing:
|
||||
# Already processing, reschedule
|
||||
self._reset_timer()
|
||||
# Preserve immediate flush semantics even if another worker is active.
|
||||
self._schedule_timer(0)
|
||||
return
|
||||
|
||||
if not self._queue:
|
||||
@@ -106,7 +181,7 @@ class MemoryUpdateQueue:
|
||||
logger.info("Processing %d queued memory updates", len(contexts_to_process))
|
||||
|
||||
try:
|
||||
updater = MemoryUpdater()
|
||||
updater = MemoryUpdater(self._app_config)
|
||||
|
||||
for context in contexts_to_process:
|
||||
try:
|
||||
@@ -115,6 +190,9 @@ class MemoryUpdateQueue:
|
||||
messages=context.messages,
|
||||
thread_id=context.thread_id,
|
||||
agent_name=context.agent_name,
|
||||
correction_detected=context.correction_detected,
|
||||
reinforcement_detected=context.reinforcement_detected,
|
||||
user_id=context.user_id,
|
||||
)
|
||||
if success:
|
||||
logger.info("Memory updated successfully for thread %s", context.thread_id)
|
||||
@@ -143,6 +221,13 @@ class MemoryUpdateQueue:
|
||||
|
||||
self._process_queue()
|
||||
|
||||
def flush_nowait(self) -> None:
|
||||
"""Start queue processing immediately in a background thread."""
|
||||
with self._lock:
|
||||
# Daemon thread: queued messages may be lost if the process exits
|
||||
# before _process_queue completes. Acceptable for best-effort memory updates.
|
||||
self._schedule_timer(0)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear the queue without processing.
|
||||
|
||||
@@ -168,31 +253,35 @@ class MemoryUpdateQueue:
|
||||
return self._processing
|
||||
|
||||
|
||||
# Global singleton instance
|
||||
_memory_queue: MemoryUpdateQueue | None = None
|
||||
# Queues keyed by ``id(AppConfig)`` so tests and multi-client setups with
|
||||
# distinct configs do not share a debounce queue.
|
||||
_memory_queues: dict[int, MemoryUpdateQueue] = {}
|
||||
_queue_lock = threading.Lock()
|
||||
|
||||
|
||||
def get_memory_queue() -> MemoryUpdateQueue:
|
||||
"""Get the global memory update queue singleton.
|
||||
|
||||
Returns:
|
||||
The memory update queue instance.
|
||||
"""
|
||||
global _memory_queue
|
||||
def get_memory_queue(app_config: AppConfig) -> MemoryUpdateQueue:
|
||||
"""Get or create the memory update queue for the given app config."""
|
||||
key = id(app_config)
|
||||
with _queue_lock:
|
||||
if _memory_queue is None:
|
||||
_memory_queue = MemoryUpdateQueue()
|
||||
return _memory_queue
|
||||
queue = _memory_queues.get(key)
|
||||
if queue is None:
|
||||
queue = MemoryUpdateQueue(app_config)
|
||||
_memory_queues[key] = queue
|
||||
return queue
|
||||
|
||||
|
||||
def reset_memory_queue() -> None:
|
||||
"""Reset the global memory queue.
|
||||
def reset_memory_queue(app_config: AppConfig | None = None) -> None:
|
||||
"""Reset memory queue(s).
|
||||
|
||||
This is useful for testing.
|
||||
Pass an ``app_config`` to reset only its queue, or omit to reset all
|
||||
(useful at test teardown).
|
||||
"""
|
||||
global _memory_queue
|
||||
with _queue_lock:
|
||||
if _memory_queue is not None:
|
||||
_memory_queue.clear()
|
||||
_memory_queue = None
|
||||
if app_config is not None:
|
||||
queue = _memory_queues.pop(id(app_config), None)
|
||||
if queue is not None:
|
||||
queue.clear()
|
||||
return
|
||||
for queue in _memory_queues.values():
|
||||
queue.clear()
|
||||
_memory_queues.clear()
|
||||
|
||||
@@ -4,22 +4,28 @@ import abc
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from deerflow.config.agents_config import AGENT_NAME_PATTERN
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.config.memory_config import MemoryConfig
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def utc_now_iso_z() -> str:
|
||||
"""Current UTC time as ISO-8601 with ``Z`` suffix (matches prior naive-UTC output)."""
|
||||
return datetime.now(UTC).isoformat().removesuffix("+00:00") + "Z"
|
||||
|
||||
|
||||
def create_empty_memory() -> dict[str, Any]:
|
||||
"""Create an empty memory structure."""
|
||||
return {
|
||||
"version": "1.0",
|
||||
"lastUpdated": datetime.utcnow().isoformat() + "Z",
|
||||
"lastUpdated": utc_now_iso_z(),
|
||||
"user": {
|
||||
"workContext": {"summary": "", "updatedAt": ""},
|
||||
"personalContext": {"summary": "", "updatedAt": ""},
|
||||
@@ -38,17 +44,17 @@ class MemoryStorage(abc.ABC):
|
||||
"""Abstract base class for memory storage providers."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def load(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def load(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Load memory data for the given agent."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def reload(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def reload(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Force reload memory data for the given agent."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None) -> bool:
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> bool:
|
||||
"""Save memory data for the given agent."""
|
||||
pass
|
||||
|
||||
@@ -56,11 +62,20 @@ class MemoryStorage(abc.ABC):
|
||||
class FileMemoryStorage(MemoryStorage):
|
||||
"""File-based memory storage provider."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the file memory storage."""
|
||||
# Per-agent memory cache: keyed by agent_name (None = global)
|
||||
def __init__(self, memory_config: MemoryConfig):
|
||||
"""Initialize the file memory storage.
|
||||
|
||||
Args:
|
||||
memory_config: Memory configuration (storage_path etc.). Stored on
|
||||
the instance so per-request lookups don't need to reach for
|
||||
ambient state.
|
||||
"""
|
||||
self._memory_config = memory_config
|
||||
# Per-user/agent memory cache: keyed by (user_id, agent_name) tuple (None = global)
|
||||
# Value: (memory_data, file_mtime)
|
||||
self._memory_cache: dict[str | None, tuple[dict[str, Any], float | None]] = {}
|
||||
self._memory_cache: dict[tuple[str | None, str | None], tuple[dict[str, Any], float | None]] = {}
|
||||
# Guards all reads and writes to _memory_cache across concurrent callers.
|
||||
self._cache_lock = threading.Lock()
|
||||
|
||||
def _validate_agent_name(self, agent_name: str) -> None:
|
||||
"""Validate that the agent name is safe to use in filesystem paths.
|
||||
@@ -73,21 +88,28 @@ class FileMemoryStorage(MemoryStorage):
|
||||
if not AGENT_NAME_PATTERN.match(agent_name):
|
||||
raise ValueError(f"Invalid agent name {agent_name!r}: names must match {AGENT_NAME_PATTERN.pattern}")
|
||||
|
||||
def _get_memory_file_path(self, agent_name: str | None = None) -> Path:
|
||||
def _get_memory_file_path(self, agent_name: str | None = None, *, user_id: str | None = None) -> Path:
|
||||
"""Get the path to the memory file."""
|
||||
config = self._memory_config
|
||||
if user_id is not None:
|
||||
if agent_name is not None:
|
||||
self._validate_agent_name(agent_name)
|
||||
return get_paths().user_agent_memory_file(user_id, agent_name)
|
||||
if config.storage_path and Path(config.storage_path).is_absolute():
|
||||
return Path(config.storage_path)
|
||||
return get_paths().user_memory_file(user_id)
|
||||
# Legacy: no user_id
|
||||
if agent_name is not None:
|
||||
self._validate_agent_name(agent_name)
|
||||
return get_paths().agent_memory_file(agent_name)
|
||||
|
||||
config = get_memory_config()
|
||||
if config.storage_path:
|
||||
p = Path(config.storage_path)
|
||||
return p if p.is_absolute() else get_paths().base_dir / p
|
||||
return get_paths().memory_file
|
||||
|
||||
def _load_memory_from_file(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def _load_memory_from_file(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Load memory data from file."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
|
||||
if not file_path.exists():
|
||||
return create_empty_memory()
|
||||
@@ -100,46 +122,55 @@ class FileMemoryStorage(MemoryStorage):
|
||||
logger.warning("Failed to load memory file: %s", e)
|
||||
return create_empty_memory()
|
||||
|
||||
def load(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def load(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Load memory data (cached with file modification time check)."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
|
||||
try:
|
||||
current_mtime = file_path.stat().st_mtime if file_path.exists() else None
|
||||
except OSError:
|
||||
current_mtime = None
|
||||
|
||||
cached = self._memory_cache.get(agent_name)
|
||||
cache_key = (user_id, agent_name)
|
||||
with self._cache_lock:
|
||||
cached = self._memory_cache.get(cache_key)
|
||||
if cached is not None and cached[1] == current_mtime:
|
||||
return cached[0]
|
||||
|
||||
if cached is None or cached[1] != current_mtime:
|
||||
memory_data = self._load_memory_from_file(agent_name)
|
||||
self._memory_cache[agent_name] = (memory_data, current_mtime)
|
||||
return memory_data
|
||||
memory_data = self._load_memory_from_file(agent_name, user_id=user_id)
|
||||
|
||||
return cached[0]
|
||||
with self._cache_lock:
|
||||
self._memory_cache[cache_key] = (memory_data, current_mtime)
|
||||
|
||||
def reload(self, agent_name: str | None = None) -> dict[str, Any]:
|
||||
return memory_data
|
||||
|
||||
def reload(self, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Reload memory data from file, forcing cache invalidation."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
memory_data = self._load_memory_from_file(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
memory_data = self._load_memory_from_file(agent_name, user_id=user_id)
|
||||
|
||||
try:
|
||||
mtime = file_path.stat().st_mtime if file_path.exists() else None
|
||||
except OSError:
|
||||
mtime = None
|
||||
|
||||
self._memory_cache[agent_name] = (memory_data, mtime)
|
||||
cache_key = (user_id, agent_name)
|
||||
with self._cache_lock:
|
||||
self._memory_cache[cache_key] = (memory_data, mtime)
|
||||
return memory_data
|
||||
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None) -> bool:
|
||||
def save(self, memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> bool:
|
||||
"""Save memory data to file and update cache."""
|
||||
file_path = self._get_memory_file_path(agent_name)
|
||||
file_path = self._get_memory_file_path(agent_name, user_id=user_id)
|
||||
|
||||
try:
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
memory_data["lastUpdated"] = datetime.utcnow().isoformat() + "Z"
|
||||
# Shallow-copy before adding lastUpdated so the caller's dict is not
|
||||
# mutated as a side-effect, and the cache reference is not silently
|
||||
# updated before the file write succeeds.
|
||||
memory_data = {**memory_data, "lastUpdated": utc_now_iso_z()}
|
||||
|
||||
temp_path = file_path.with_suffix(".tmp")
|
||||
temp_path = file_path.with_suffix(f".{uuid.uuid4().hex}.tmp")
|
||||
with open(temp_path, "w", encoding="utf-8") as f:
|
||||
json.dump(memory_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
@@ -150,7 +181,9 @@ class FileMemoryStorage(MemoryStorage):
|
||||
except OSError:
|
||||
mtime = None
|
||||
|
||||
self._memory_cache[agent_name] = (memory_data, mtime)
|
||||
cache_key = (user_id, agent_name)
|
||||
with self._cache_lock:
|
||||
self._memory_cache[cache_key] = (memory_data, mtime)
|
||||
logger.info("Memory saved to %s", file_path)
|
||||
return True
|
||||
except OSError as e:
|
||||
@@ -158,23 +191,31 @@ class FileMemoryStorage(MemoryStorage):
|
||||
return False
|
||||
|
||||
|
||||
_storage_instance: MemoryStorage | None = None
|
||||
# Instances keyed by (storage_class_path, id(memory_config)) so tests can
|
||||
# construct isolated storages and multi-client setups with different configs
|
||||
# don't collide on a single process-wide singleton.
|
||||
_storage_instances: dict[tuple[str, int], MemoryStorage] = {}
|
||||
_storage_lock = threading.Lock()
|
||||
|
||||
|
||||
def get_memory_storage() -> MemoryStorage:
|
||||
"""Get the configured memory storage instance."""
|
||||
global _storage_instance
|
||||
if _storage_instance is not None:
|
||||
return _storage_instance
|
||||
def get_memory_storage(memory_config: MemoryConfig) -> MemoryStorage:
|
||||
"""Get the configured memory storage instance.
|
||||
|
||||
Caches one instance per ``(storage_class, memory_config)`` pair. In
|
||||
single-config deployments this collapses to one instance; in multi-client
|
||||
or test scenarios each config gets its own storage.
|
||||
"""
|
||||
key = (memory_config.storage_class, id(memory_config))
|
||||
existing = _storage_instances.get(key)
|
||||
if existing is not None:
|
||||
return existing
|
||||
|
||||
with _storage_lock:
|
||||
if _storage_instance is not None:
|
||||
return _storage_instance
|
||||
|
||||
config = get_memory_config()
|
||||
storage_class_path = config.storage_class
|
||||
existing = _storage_instances.get(key)
|
||||
if existing is not None:
|
||||
return existing
|
||||
|
||||
storage_class_path = memory_config.storage_class
|
||||
try:
|
||||
module_path, class_name = storage_class_path.rsplit(".", 1)
|
||||
import importlib
|
||||
@@ -188,13 +229,14 @@ def get_memory_storage() -> MemoryStorage:
|
||||
if not issubclass(storage_class, MemoryStorage):
|
||||
raise TypeError(f"Configured memory storage '{storage_class_path}' is not a subclass of MemoryStorage")
|
||||
|
||||
_storage_instance = storage_class()
|
||||
instance = storage_class(memory_config)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to load memory storage %s, falling back to FileMemoryStorage: %s",
|
||||
storage_class_path,
|
||||
e,
|
||||
)
|
||||
_storage_instance = FileMemoryStorage()
|
||||
instance = FileMemoryStorage(memory_config)
|
||||
|
||||
return _storage_instance
|
||||
_storage_instances[key] = instance
|
||||
return instance
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
"""Hooks fired before summarization removes messages from state."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from deerflow.agents.memory.message_processing import detect_correction, detect_reinforcement, filter_messages_for_memory
|
||||
from deerflow.agents.memory.queue import get_memory_queue
|
||||
from deerflow.agents.middlewares.summarization_middleware import SummarizationEvent
|
||||
from deerflow.config.app_config import AppConfig
|
||||
|
||||
|
||||
def memory_flush_hook(event: SummarizationEvent) -> None:
|
||||
"""Flush messages about to be summarized into the memory queue.
|
||||
|
||||
Reads ``AppConfig`` from disk on every invocation. This hook is fired by
|
||||
``SummarizationMiddleware`` which has no ergonomic way to thread an
|
||||
explicit ``app_config`` through; ``AppConfig.from_file()`` is a pure load
|
||||
so the cost is acceptable for this rare pre-summarization callback.
|
||||
"""
|
||||
app_config = AppConfig.from_file()
|
||||
if not app_config.memory.enabled or not event.thread_id:
|
||||
return
|
||||
|
||||
filtered_messages = filter_messages_for_memory(list(event.messages_to_summarize))
|
||||
user_messages = [message for message in filtered_messages if getattr(message, "type", None) == "human"]
|
||||
assistant_messages = [message for message in filtered_messages if getattr(message, "type", None) == "ai"]
|
||||
if not user_messages or not assistant_messages:
|
||||
return
|
||||
|
||||
correction_detected = detect_correction(filtered_messages)
|
||||
reinforcement_detected = not correction_detected and detect_reinforcement(filtered_messages)
|
||||
queue = get_memory_queue(app_config)
|
||||
queue.add_nowait(
|
||||
thread_id=event.thread_id,
|
||||
messages=filtered_messages,
|
||||
agent_name=event.agent_name,
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
)
|
||||
@@ -1,67 +1,71 @@
|
||||
"""Memory updater for reading, writing, and updating memory data."""
|
||||
|
||||
import asyncio
|
||||
import atexit
|
||||
import concurrent.futures
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from collections.abc import Awaitable
|
||||
from typing import Any
|
||||
|
||||
from deerflow.agents.memory.prompt import (
|
||||
MEMORY_UPDATE_PROMPT,
|
||||
format_conversation_for_update,
|
||||
)
|
||||
from deerflow.agents.memory.storage import create_empty_memory, get_memory_storage
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.agents.memory.storage import (
|
||||
create_empty_memory,
|
||||
get_memory_storage,
|
||||
utc_now_iso_z,
|
||||
)
|
||||
from deerflow.config.app_config import AppConfig
|
||||
from deerflow.config.memory_config import MemoryConfig
|
||||
from deerflow.models import create_chat_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_SYNC_MEMORY_UPDATER_EXECUTOR = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=4,
|
||||
thread_name_prefix="memory-updater-sync",
|
||||
)
|
||||
atexit.register(lambda: _SYNC_MEMORY_UPDATER_EXECUTOR.shutdown(wait=False))
|
||||
|
||||
|
||||
def _create_empty_memory() -> dict[str, Any]:
|
||||
"""Backward-compatible wrapper around the storage-layer empty-memory factory."""
|
||||
return create_empty_memory()
|
||||
|
||||
|
||||
def _save_memory_to_file(memory_data: dict[str, Any], agent_name: str | None = None) -> bool:
|
||||
"""Backward-compatible wrapper around the configured memory storage save path."""
|
||||
return get_memory_storage().save(memory_data, agent_name)
|
||||
def _save_memory_to_file(memory_config: MemoryConfig, memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> bool:
|
||||
"""Save via the configured memory storage."""
|
||||
return get_memory_storage(memory_config).save(memory_data, agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def get_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
||||
def get_memory_data(memory_config: MemoryConfig, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Get the current memory data via storage provider."""
|
||||
return get_memory_storage().load(agent_name)
|
||||
return get_memory_storage(memory_config).load(agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def reload_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
||||
def reload_memory_data(memory_config: MemoryConfig, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Reload memory data via storage provider."""
|
||||
return get_memory_storage().reload(agent_name)
|
||||
return get_memory_storage(memory_config).reload(agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def import_memory_data(memory_data: dict[str, Any], agent_name: str | None = None) -> dict[str, Any]:
|
||||
"""Persist imported memory data via storage provider.
|
||||
|
||||
Args:
|
||||
memory_data: Full memory payload to persist.
|
||||
agent_name: If provided, imports into per-agent memory.
|
||||
|
||||
Returns:
|
||||
The saved memory data after storage normalization.
|
||||
|
||||
Raises:
|
||||
OSError: If persisting the imported memory fails.
|
||||
"""
|
||||
storage = get_memory_storage()
|
||||
if not storage.save(memory_data, agent_name):
|
||||
def import_memory_data(memory_config: MemoryConfig, memory_data: dict[str, Any], agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Persist imported memory data via storage provider."""
|
||||
storage = get_memory_storage(memory_config)
|
||||
if not storage.save(memory_data, agent_name, user_id=user_id):
|
||||
raise OSError("Failed to save imported memory data")
|
||||
return storage.load(agent_name)
|
||||
return storage.load(agent_name, user_id=user_id)
|
||||
|
||||
|
||||
def clear_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
||||
def clear_memory_data(memory_config: MemoryConfig, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Clear all stored memory data and persist an empty structure."""
|
||||
cleared_memory = create_empty_memory()
|
||||
if not _save_memory_to_file(cleared_memory, agent_name):
|
||||
if not _save_memory_to_file(memory_config, cleared_memory, agent_name, user_id=user_id):
|
||||
raise OSError("Failed to save cleared memory data")
|
||||
return cleared_memory
|
||||
|
||||
@@ -74,10 +78,13 @@ def _validate_confidence(confidence: float) -> float:
|
||||
|
||||
|
||||
def create_memory_fact(
|
||||
memory_config: MemoryConfig,
|
||||
content: str,
|
||||
category: str = "context",
|
||||
confidence: float = 0.5,
|
||||
agent_name: str | None = None,
|
||||
*,
|
||||
user_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a new fact and persist the updated memory data."""
|
||||
normalized_content = content.strip()
|
||||
@@ -86,8 +93,8 @@ def create_memory_fact(
|
||||
|
||||
normalized_category = category.strip() or "context"
|
||||
validated_confidence = _validate_confidence(confidence)
|
||||
now = datetime.utcnow().isoformat() + "Z"
|
||||
memory_data = get_memory_data(agent_name)
|
||||
now = utc_now_iso_z()
|
||||
memory_data = get_memory_data(memory_config, agent_name, user_id=user_id)
|
||||
updated_memory = dict(memory_data)
|
||||
facts = list(memory_data.get("facts", []))
|
||||
facts.append(
|
||||
@@ -102,15 +109,15 @@ def create_memory_fact(
|
||||
)
|
||||
updated_memory["facts"] = facts
|
||||
|
||||
if not _save_memory_to_file(updated_memory, agent_name):
|
||||
if not _save_memory_to_file(memory_config, updated_memory, agent_name, user_id=user_id):
|
||||
raise OSError("Failed to save memory data after creating fact")
|
||||
|
||||
return updated_memory
|
||||
|
||||
|
||||
def delete_memory_fact(fact_id: str, agent_name: str | None = None) -> dict[str, Any]:
|
||||
def delete_memory_fact(memory_config: MemoryConfig, fact_id: str, agent_name: str | None = None, *, user_id: str | None = None) -> dict[str, Any]:
|
||||
"""Delete a fact by its id and persist the updated memory data."""
|
||||
memory_data = get_memory_data(agent_name)
|
||||
memory_data = get_memory_data(memory_config, agent_name, user_id=user_id)
|
||||
facts = memory_data.get("facts", [])
|
||||
updated_facts = [fact for fact in facts if fact.get("id") != fact_id]
|
||||
if len(updated_facts) == len(facts):
|
||||
@@ -119,21 +126,24 @@ def delete_memory_fact(fact_id: str, agent_name: str | None = None) -> dict[str,
|
||||
updated_memory = dict(memory_data)
|
||||
updated_memory["facts"] = updated_facts
|
||||
|
||||
if not _save_memory_to_file(updated_memory, agent_name):
|
||||
if not _save_memory_to_file(memory_config, updated_memory, agent_name, user_id=user_id):
|
||||
raise OSError(f"Failed to save memory data after deleting fact '{fact_id}'")
|
||||
|
||||
return updated_memory
|
||||
|
||||
|
||||
def update_memory_fact(
|
||||
memory_config: MemoryConfig,
|
||||
fact_id: str,
|
||||
content: str | None = None,
|
||||
category: str | None = None,
|
||||
confidence: float | None = None,
|
||||
agent_name: str | None = None,
|
||||
*,
|
||||
user_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Update an existing fact and persist the updated memory data."""
|
||||
memory_data = get_memory_data(agent_name)
|
||||
memory_data = get_memory_data(memory_config, agent_name, user_id=user_id)
|
||||
updated_memory = dict(memory_data)
|
||||
updated_facts: list[dict[str, Any]] = []
|
||||
found = False
|
||||
@@ -160,7 +170,7 @@ def update_memory_fact(
|
||||
|
||||
updated_memory["facts"] = updated_facts
|
||||
|
||||
if not _save_memory_to_file(updated_memory, agent_name):
|
||||
if not _save_memory_to_file(memory_config, updated_memory, agent_name, user_id=user_id):
|
||||
raise OSError(f"Failed to save memory data after updating fact '{fact_id}'")
|
||||
|
||||
return updated_memory
|
||||
@@ -203,6 +213,39 @@ def _extract_text(content: Any) -> str:
|
||||
return str(content)
|
||||
|
||||
|
||||
def _run_async_update_sync(coro: Awaitable[bool]) -> bool:
|
||||
"""Run an async memory update from sync code, including nested-loop contexts."""
|
||||
handed_off = False
|
||||
|
||||
try:
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
loop = None
|
||||
|
||||
if loop is not None and loop.is_running():
|
||||
future = _SYNC_MEMORY_UPDATER_EXECUTOR.submit(asyncio.run, coro)
|
||||
handed_off = True
|
||||
return future.result()
|
||||
|
||||
handed_off = True
|
||||
return asyncio.run(coro)
|
||||
except Exception:
|
||||
if not handed_off:
|
||||
close = getattr(coro, "close", None)
|
||||
if callable(close):
|
||||
try:
|
||||
close()
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Failed to close un-awaited memory update coroutine",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
logger.exception("Failed to run async memory update from sync context")
|
||||
return False
|
||||
|
||||
|
||||
# Matches sentences that describe a file-upload *event* rather than general
|
||||
# file-related work. Deliberately narrow to avoid removing legitimate facts
|
||||
# such as "User works with CSV files" or "prefers PDF export".
|
||||
@@ -246,38 +289,171 @@ def _fact_content_key(content: Any) -> str | None:
|
||||
stripped = content.strip()
|
||||
if not stripped:
|
||||
return None
|
||||
return stripped
|
||||
return stripped.casefold()
|
||||
|
||||
|
||||
class MemoryUpdater:
|
||||
"""Updates memory using LLM based on conversation context."""
|
||||
|
||||
def __init__(self, model_name: str | None = None):
|
||||
def __init__(self, app_config: AppConfig, model_name: str | None = None):
|
||||
"""Initialize the memory updater.
|
||||
|
||||
Args:
|
||||
app_config: Application config (the updater needs both ``memory``
|
||||
section for behavior and the full config for ``create_chat_model``).
|
||||
model_name: Optional model name to use. If None, uses config or default.
|
||||
"""
|
||||
self._app_config = app_config
|
||||
self._model_name = model_name
|
||||
|
||||
@property
|
||||
def _memory_config(self) -> MemoryConfig:
|
||||
return self._app_config.memory
|
||||
|
||||
def _get_model(self):
|
||||
"""Get the model for memory updates."""
|
||||
config = get_memory_config()
|
||||
model_name = self._model_name or config.model_name
|
||||
return create_chat_model(name=model_name, thinking_enabled=False)
|
||||
model_name = self._model_name or self._memory_config.model_name
|
||||
return create_chat_model(name=model_name, thinking_enabled=False, app_config=self._app_config)
|
||||
|
||||
def update_memory(self, messages: list[Any], thread_id: str | None = None, agent_name: str | None = None) -> bool:
|
||||
"""Update memory based on conversation messages.
|
||||
def _build_correction_hint(
|
||||
self,
|
||||
correction_detected: bool,
|
||||
reinforcement_detected: bool,
|
||||
) -> str:
|
||||
"""Build optional prompt hints for correction and reinforcement signals."""
|
||||
correction_hint = ""
|
||||
if correction_detected:
|
||||
correction_hint = (
|
||||
"IMPORTANT: Explicit correction signals were detected in this conversation. "
|
||||
"Pay special attention to what the agent got wrong, what the user corrected, "
|
||||
"and record the correct approach as a fact with category "
|
||||
'"correction" and confidence >= 0.95 when appropriate.'
|
||||
)
|
||||
if reinforcement_detected:
|
||||
reinforcement_hint = (
|
||||
"IMPORTANT: Positive reinforcement signals were detected in this conversation. "
|
||||
"The user explicitly confirmed the agent's approach was correct or helpful. "
|
||||
"Record the confirmed approach, style, or preference as a fact with category "
|
||||
'"preference" or "behavior" and confidence >= 0.9 when appropriate.'
|
||||
)
|
||||
correction_hint = (correction_hint + "\n" + reinforcement_hint).strip() if correction_hint else reinforcement_hint
|
||||
|
||||
return correction_hint
|
||||
|
||||
def _prepare_update_prompt(
|
||||
self,
|
||||
messages: list[Any],
|
||||
agent_name: str | None,
|
||||
correction_detected: bool,
|
||||
reinforcement_detected: bool,
|
||||
user_id: str | None = None,
|
||||
) -> tuple[dict[str, Any], str] | None:
|
||||
"""Load memory and build the update prompt for a conversation."""
|
||||
config = self._memory_config
|
||||
if not config.enabled or not messages:
|
||||
return None
|
||||
|
||||
current_memory = get_memory_data(config, agent_name, user_id=user_id)
|
||||
conversation_text = format_conversation_for_update(messages)
|
||||
if not conversation_text.strip():
|
||||
return None
|
||||
|
||||
correction_hint = self._build_correction_hint(
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
)
|
||||
prompt = MEMORY_UPDATE_PROMPT.format(
|
||||
current_memory=json.dumps(current_memory, indent=2),
|
||||
conversation=conversation_text,
|
||||
correction_hint=correction_hint,
|
||||
)
|
||||
return current_memory, prompt
|
||||
|
||||
def _finalize_update(
|
||||
self,
|
||||
current_memory: dict[str, Any],
|
||||
response_content: Any,
|
||||
thread_id: str | None,
|
||||
agent_name: str | None,
|
||||
user_id: str | None = None,
|
||||
) -> bool:
|
||||
"""Parse the model response, apply updates, and persist memory."""
|
||||
response_text = _extract_text(response_content).strip()
|
||||
|
||||
if response_text.startswith("```"):
|
||||
lines = response_text.split("\n")
|
||||
response_text = "\n".join(lines[1:-1] if lines[-1] == "```" else lines[1:])
|
||||
|
||||
update_data = json.loads(response_text)
|
||||
# Deep-copy before in-place mutation so a subsequent save() failure
|
||||
# cannot corrupt the still-cached original object reference.
|
||||
updated_memory = self._apply_updates(copy.deepcopy(current_memory), update_data, thread_id)
|
||||
updated_memory = _strip_upload_mentions_from_memory(updated_memory)
|
||||
return get_memory_storage(self._memory_config).save(updated_memory, agent_name, user_id=user_id)
|
||||
|
||||
async def aupdate_memory(
|
||||
self,
|
||||
messages: list[Any],
|
||||
thread_id: str | None = None,
|
||||
agent_name: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
user_id: str | None = None,
|
||||
) -> bool:
|
||||
"""Update memory asynchronously based on conversation messages."""
|
||||
try:
|
||||
prepared = await asyncio.to_thread(
|
||||
self._prepare_update_prompt,
|
||||
messages=messages,
|
||||
agent_name=agent_name,
|
||||
correction_detected=correction_detected,
|
||||
reinforcement_detected=reinforcement_detected,
|
||||
user_id=user_id,
|
||||
)
|
||||
if prepared is None:
|
||||
return False
|
||||
|
||||
current_memory, prompt = prepared
|
||||
model = self._get_model()
|
||||
response = await model.ainvoke(prompt, config={"run_name": "memory_agent"})
|
||||
return await asyncio.to_thread(
|
||||
self._finalize_update,
|
||||
current_memory=current_memory,
|
||||
response_content=response.content,
|
||||
thread_id=thread_id,
|
||||
agent_name=agent_name,
|
||||
user_id=user_id,
|
||||
)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning("Failed to parse LLM response for memory update: %s", e)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.exception("Memory update failed: %s", e)
|
||||
return False
|
||||
|
||||
def update_memory(
|
||||
self,
|
||||
messages: list[Any],
|
||||
thread_id: str | None = None,
|
||||
agent_name: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
user_id: str | None = None,
|
||||
) -> bool:
|
||||
"""Synchronously update memory via the async updater path.
|
||||
|
||||
Args:
|
||||
messages: List of conversation messages.
|
||||
thread_id: Optional thread ID for tracking source.
|
||||
agent_name: If provided, updates per-agent memory. If None, updates global memory.
|
||||
correction_detected: Whether recent turns include an explicit correction signal.
|
||||
reinforcement_detected: Whether recent turns include a positive reinforcement signal.
|
||||
user_id: If provided, scopes memory to a specific user.
|
||||
|
||||
Returns:
|
||||
True if update was successful, False otherwise.
|
||||
"""
|
||||
config = get_memory_config()
|
||||
config = self._memory_config
|
||||
if not config.enabled:
|
||||
return False
|
||||
|
||||
@@ -286,7 +462,7 @@ class MemoryUpdater:
|
||||
|
||||
try:
|
||||
# Get current memory
|
||||
current_memory = get_memory_data(agent_name)
|
||||
current_memory = get_memory_data(config, agent_name, user_id=user_id)
|
||||
|
||||
# Format conversation for prompt
|
||||
conversation_text = format_conversation_for_update(messages)
|
||||
@@ -295,9 +471,27 @@ class MemoryUpdater:
|
||||
return False
|
||||
|
||||
# Build prompt
|
||||
correction_hint = ""
|
||||
if correction_detected:
|
||||
correction_hint = (
|
||||
"IMPORTANT: Explicit correction signals were detected in this conversation. "
|
||||
"Pay special attention to what the agent got wrong, what the user corrected, "
|
||||
"and record the correct approach as a fact with category "
|
||||
'"correction" and confidence >= 0.95 when appropriate.'
|
||||
)
|
||||
if reinforcement_detected:
|
||||
reinforcement_hint = (
|
||||
"IMPORTANT: Positive reinforcement signals were detected in this conversation. "
|
||||
"The user explicitly confirmed the agent's approach was correct or helpful. "
|
||||
"Record the confirmed approach, style, or preference as a fact with category "
|
||||
'"preference" or "behavior" and confidence >= 0.9 when appropriate.'
|
||||
)
|
||||
correction_hint = (correction_hint + "\n" + reinforcement_hint).strip() if correction_hint else reinforcement_hint
|
||||
|
||||
prompt = MEMORY_UPDATE_PROMPT.format(
|
||||
current_memory=json.dumps(current_memory, indent=2),
|
||||
conversation=conversation_text,
|
||||
correction_hint=correction_hint,
|
||||
)
|
||||
|
||||
# Call LLM
|
||||
@@ -323,7 +517,7 @@ class MemoryUpdater:
|
||||
updated_memory = _strip_upload_mentions_from_memory(updated_memory)
|
||||
|
||||
# Save
|
||||
return get_memory_storage().save(updated_memory, agent_name)
|
||||
return get_memory_storage(config).save(updated_memory, agent_name, user_id=user_id)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning("Failed to parse LLM response for memory update: %s", e)
|
||||
@@ -348,8 +542,8 @@ class MemoryUpdater:
|
||||
Returns:
|
||||
Updated memory data.
|
||||
"""
|
||||
config = get_memory_config()
|
||||
now = datetime.utcnow().isoformat() + "Z"
|
||||
config = self._memory_config
|
||||
now = utc_now_iso_z()
|
||||
|
||||
# Update user sections
|
||||
user_updates = update_data.get("user", {})
|
||||
@@ -383,6 +577,8 @@ class MemoryUpdater:
|
||||
confidence = fact.get("confidence", 0.5)
|
||||
if confidence >= config.fact_confidence_threshold:
|
||||
raw_content = fact.get("content", "")
|
||||
if not isinstance(raw_content, str):
|
||||
continue
|
||||
normalized_content = raw_content.strip()
|
||||
fact_key = _fact_content_key(normalized_content)
|
||||
if fact_key is not None and fact_key in existing_fact_keys:
|
||||
@@ -396,6 +592,11 @@ class MemoryUpdater:
|
||||
"createdAt": now,
|
||||
"source": thread_id or "unknown",
|
||||
}
|
||||
source_error = fact.get("sourceError")
|
||||
if isinstance(source_error, str):
|
||||
normalized_source_error = source_error.strip()
|
||||
if normalized_source_error:
|
||||
fact_entry["sourceError"] = normalized_source_error
|
||||
current_memory["facts"].append(fact_entry)
|
||||
if fact_key is not None:
|
||||
existing_fact_keys.add(fact_key)
|
||||
@@ -412,16 +613,26 @@ class MemoryUpdater:
|
||||
return current_memory
|
||||
|
||||
|
||||
def update_memory_from_conversation(messages: list[Any], thread_id: str | None = None, agent_name: str | None = None) -> bool:
|
||||
def update_memory_from_conversation(
|
||||
messages: list[Any],
|
||||
thread_id: str | None = None,
|
||||
agent_name: str | None = None,
|
||||
correction_detected: bool = False,
|
||||
reinforcement_detected: bool = False,
|
||||
user_id: str | None = None,
|
||||
) -> bool:
|
||||
"""Convenience function to update memory from a conversation.
|
||||
|
||||
Args:
|
||||
messages: List of conversation messages.
|
||||
thread_id: Optional thread ID.
|
||||
agent_name: If provided, updates per-agent memory. If None, updates global memory.
|
||||
correction_detected: Whether recent turns include an explicit correction signal.
|
||||
reinforcement_detected: Whether recent turns include a positive reinforcement signal.
|
||||
user_id: If provided, scopes memory to a specific user.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
updater = MemoryUpdater()
|
||||
return updater.update_memory(messages, thread_id, agent_name)
|
||||
return updater.update_memory(messages, thread_id, agent_name, correction_detected, reinforcement_detected, user_id=user_id)
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
"""Middleware for intercepting clarification requests and presenting them to the user."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from hashlib import sha256
|
||||
from typing import override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
@@ -35,6 +37,13 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
||||
|
||||
state_schema = ClarificationMiddlewareState
|
||||
|
||||
def _stable_message_id(self, tool_call_id: str, formatted_message: str) -> str:
|
||||
"""Build a deterministic message ID so retried clarification calls replace, not append."""
|
||||
if tool_call_id:
|
||||
return f"clarification:{tool_call_id}"
|
||||
digest = sha256(formatted_message.encode("utf-8")).hexdigest()[:16]
|
||||
return f"clarification:{digest}"
|
||||
|
||||
def _is_chinese(self, text: str) -> bool:
|
||||
"""Check if text contains Chinese characters.
|
||||
|
||||
@@ -60,6 +69,20 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
||||
context = args.get("context")
|
||||
options = args.get("options", [])
|
||||
|
||||
# Some models (e.g. Qwen3-Max) serialize array parameters as JSON strings
|
||||
# instead of native arrays. Deserialize and normalize so `options`
|
||||
# is always a list for the rendering logic below.
|
||||
if isinstance(options, str):
|
||||
try:
|
||||
options = json.loads(options)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
options = [options]
|
||||
|
||||
if options is None:
|
||||
options = []
|
||||
elif not isinstance(options, list):
|
||||
options = [options]
|
||||
|
||||
# Type-specific icons
|
||||
type_icons = {
|
||||
"missing_info": "❓",
|
||||
@@ -116,6 +139,7 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
||||
# Create a ToolMessage with the formatted question
|
||||
# This will be added to the message history
|
||||
tool_message = ToolMessage(
|
||||
id=self._stable_message_id(tool_call_id, formatted_message),
|
||||
content=formatted_message,
|
||||
tool_call_id=tool_call_id,
|
||||
name="ask_clarification",
|
||||
|
||||
+41
-2
@@ -13,6 +13,7 @@ at the correct positions (immediately after each dangling AIMessage), not append
|
||||
to the end of the message list as before_model + add_messages reducer would do.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import override
|
||||
@@ -33,6 +34,44 @@ class DanglingToolCallMiddleware(AgentMiddleware[AgentState]):
|
||||
offending AIMessage so the LLM receives a well-formed conversation.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _message_tool_calls(msg) -> list[dict]:
|
||||
"""Return normalized tool calls from structured fields or raw provider payloads."""
|
||||
tool_calls = getattr(msg, "tool_calls", None) or []
|
||||
if tool_calls:
|
||||
return list(tool_calls)
|
||||
|
||||
raw_tool_calls = (getattr(msg, "additional_kwargs", None) or {}).get("tool_calls") or []
|
||||
normalized: list[dict] = []
|
||||
for raw_tc in raw_tool_calls:
|
||||
if not isinstance(raw_tc, dict):
|
||||
continue
|
||||
|
||||
function = raw_tc.get("function")
|
||||
name = raw_tc.get("name")
|
||||
if not name and isinstance(function, dict):
|
||||
name = function.get("name")
|
||||
|
||||
args = raw_tc.get("args", {})
|
||||
if not args and isinstance(function, dict):
|
||||
raw_args = function.get("arguments")
|
||||
if isinstance(raw_args, str):
|
||||
try:
|
||||
parsed_args = json.loads(raw_args)
|
||||
except (TypeError, ValueError, json.JSONDecodeError):
|
||||
parsed_args = {}
|
||||
args = parsed_args if isinstance(parsed_args, dict) else {}
|
||||
|
||||
normalized.append(
|
||||
{
|
||||
"id": raw_tc.get("id"),
|
||||
"name": name or "unknown",
|
||||
"args": args if isinstance(args, dict) else {},
|
||||
}
|
||||
)
|
||||
|
||||
return normalized
|
||||
|
||||
def _build_patched_messages(self, messages: list) -> list | None:
|
||||
"""Return a new message list with patches inserted at the correct positions.
|
||||
|
||||
@@ -51,7 +90,7 @@ class DanglingToolCallMiddleware(AgentMiddleware[AgentState]):
|
||||
for msg in messages:
|
||||
if getattr(msg, "type", None) != "ai":
|
||||
continue
|
||||
for tc in getattr(msg, "tool_calls", None) or []:
|
||||
for tc in self._message_tool_calls(msg):
|
||||
tc_id = tc.get("id")
|
||||
if tc_id and tc_id not in existing_tool_msg_ids:
|
||||
needs_patch = True
|
||||
@@ -70,7 +109,7 @@ class DanglingToolCallMiddleware(AgentMiddleware[AgentState]):
|
||||
patched.append(msg)
|
||||
if getattr(msg, "type", None) != "ai":
|
||||
continue
|
||||
for tc in getattr(msg, "tool_calls", None) or []:
|
||||
for tc in self._message_tool_calls(msg):
|
||||
tc_id = tc.get("id")
|
||||
if tc_id and tc_id not in existing_tool_msg_ids and tc_id not in patched_ids:
|
||||
patched.append(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user