feat(provisioner): add optional PVC support for sandbox volumes (#2020)

* feat(provisioner): add optional PVC support for sandbox volumes (#1978)

  Add SKILLS_PVC_NAME and USERDATA_PVC_NAME env vars to allow sandbox
  Pods to use PersistentVolumeClaims instead of hostPath volumes. This
  prevents data loss in production when pods are rescheduled across nodes.

  When USERDATA_PVC_NAME is set, a subPath of threads/{thread_id}/user-data
  is used so a single PVC can serve multiple threads. Falls back to hostPath
  when the new env vars are not set, preserving backward compatibility.

* add unit test for provisioner pvc volumes

* refactor: extract shared provisioner_module fixture to conftest.py

Agent-Logs-Url: https://github.com/bytedance/deer-flow/sessions/e7ccf708-c6ba-40e4-844a-b526bdb249dd

Co-authored-by: WillemJiang <219644+WillemJiang@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: JeffJiang <for-eleven@hotmail.com>
This commit is contained in:
Willem Jiang
2026-04-10 20:40:30 +08:00
committed by GitHub
parent 7dc0c7d01f
commit 90299e2710
6 changed files with 255 additions and 55 deletions
+5
View File
@@ -36,6 +36,11 @@ services:
# export DEER_FLOW_ROOT=/absolute/path/to/deer-flow
- SKILLS_HOST_PATH=${DEER_FLOW_ROOT}/skills
- THREADS_HOST_PATH=${DEER_FLOW_ROOT}/backend/.deer-flow/threads
# Production: use PVC instead of hostPath to avoid data loss on node failure.
# When set, hostPath vars above are ignored for the corresponding volume.
# USERDATA_PVC_NAME uses subPath (threads/{thread_id}/user-data) automatically.
# - SKILLS_PVC_NAME=deer-flow-skills-pvc
# - USERDATA_PVC_NAME=deer-flow-userdata-pvc
- KUBECONFIG_PATH=/root/.kube/config
- NODE_HOST=host.docker.internal
# Override K8S API server URL since kubeconfig uses 127.0.0.1
+4 -2
View File
@@ -137,6 +137,8 @@ The provisioner is configured via environment variables (set in [docker-compose-
| `SANDBOX_IMAGE` | `enterprise-public-cn-beijing.cr.volces.com/vefaas-public/all-in-one-sandbox:latest` | Container image for sandbox Pods |
| `SKILLS_HOST_PATH` | - | **Host machine** path to skills directory (must be absolute) |
| `THREADS_HOST_PATH` | - | **Host machine** path to threads data directory (must be absolute) |
| `SKILLS_PVC_NAME` | empty (use hostPath) | PVC name for skills volume; when set, sandbox Pods use PVC instead of hostPath |
| `USERDATA_PVC_NAME` | empty (use hostPath) | PVC name for user-data volume; when set, uses PVC with `subPath: threads/{thread_id}/user-data` |
| `KUBECONFIG_PATH` | `/root/.kube/config` | Path to kubeconfig **inside** the provisioner container |
| `NODE_HOST` | `host.docker.internal` | Hostname that backend containers use to reach host NodePorts |
| `K8S_API_SERVER` | (from kubeconfig) | Override K8s API server URL (e.g., `https://host.docker.internal:26443`) |
@@ -309,7 +311,7 @@ docker exec deer-flow-gateway curl -s $SANDBOX_URL/v1/sandbox
## Security Considerations
1. **HostPath Volumes**: The provisioner mounts host directories into sandbox Pods. Ensure these paths contain only trusted data.
1. **HostPath Volumes**: The provisioner mounts host directories into sandbox Pods by default. Ensure these paths contain only trusted data. For production, prefer PVC-based volumes (set `SKILLS_PVC_NAME` and `USERDATA_PVC_NAME`) to avoid node-specific data loss risks.
2. **Resource Limits**: Each sandbox Pod has CPU, memory, and storage limits to prevent resource exhaustion.
@@ -322,7 +324,7 @@ docker exec deer-flow-gateway curl -s $SANDBOX_URL/v1/sandbox
## Future Enhancements
- [ ] Support for custom resource requests/limits per sandbox
- [ ] PersistentVolume support for larger data requirements
- [x] PersistentVolume support for larger data requirements
- [ ] Automatic cleanup of stale sandboxes (timeout-based)
- [ ] Metrics and monitoring (Prometheus integration)
- [ ] Multi-cluster support (route to different K8s clusters)
+62 -28
View File
@@ -60,6 +60,8 @@ SANDBOX_IMAGE = os.environ.get(
)
SKILLS_HOST_PATH = os.environ.get("SKILLS_HOST_PATH", "/skills")
THREADS_HOST_PATH = os.environ.get("THREADS_HOST_PATH", "/.deer-flow/threads")
SKILLS_PVC_NAME = os.environ.get("SKILLS_PVC_NAME", "")
USERDATA_PVC_NAME = os.environ.get("USERDATA_PVC_NAME", "")
SAFE_THREAD_ID_PATTERN = r"^[A-Za-z0-9_\-]+$"
# Path to the kubeconfig *inside* the provisioner container.
@@ -243,6 +245,64 @@ def _sandbox_url(node_port: int) -> str:
return f"http://{NODE_HOST}:{node_port}"
def _build_volumes(thread_id: str) -> list[k8s_client.V1Volume]:
"""Build volume list: PVC when configured, otherwise hostPath."""
if SKILLS_PVC_NAME:
skills_vol = k8s_client.V1Volume(
name="skills",
persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=SKILLS_PVC_NAME,
read_only=True,
),
)
else:
skills_vol = k8s_client.V1Volume(
name="skills",
host_path=k8s_client.V1HostPathVolumeSource(
path=SKILLS_HOST_PATH,
type="Directory",
),
)
if USERDATA_PVC_NAME:
userdata_vol = k8s_client.V1Volume(
name="user-data",
persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=USERDATA_PVC_NAME,
),
)
else:
userdata_vol = k8s_client.V1Volume(
name="user-data",
host_path=k8s_client.V1HostPathVolumeSource(
path=join_host_path(THREADS_HOST_PATH, thread_id, "user-data"),
type="DirectoryOrCreate",
),
)
return [skills_vol, userdata_vol]
def _build_volume_mounts(thread_id: str) -> list[k8s_client.V1VolumeMount]:
"""Build volume mount list, using subPath for PVC user-data."""
userdata_mount = k8s_client.V1VolumeMount(
name="user-data",
mount_path="/mnt/user-data",
read_only=False,
)
if USERDATA_PVC_NAME:
userdata_mount.sub_path = f"threads/{thread_id}/user-data"
return [
k8s_client.V1VolumeMount(
name="skills",
mount_path="/mnt/skills",
read_only=True,
),
userdata_mount,
]
def _build_pod(sandbox_id: str, thread_id: str) -> k8s_client.V1Pod:
"""Construct a Pod manifest for a single sandbox."""
thread_id = _validate_thread_id(thread_id)
@@ -302,40 +362,14 @@ def _build_pod(sandbox_id: str, thread_id: str) -> k8s_client.V1Pod:
"ephemeral-storage": "500Mi",
},
),
volume_mounts=[
k8s_client.V1VolumeMount(
name="skills",
mount_path="/mnt/skills",
read_only=True,
),
k8s_client.V1VolumeMount(
name="user-data",
mount_path="/mnt/user-data",
read_only=False,
),
],
volume_mounts=_build_volume_mounts(thread_id),
security_context=k8s_client.V1SecurityContext(
privileged=False,
allow_privilege_escalation=True,
),
)
],
volumes=[
k8s_client.V1Volume(
name="skills",
host_path=k8s_client.V1HostPathVolumeSource(
path=SKILLS_HOST_PATH,
type="Directory",
),
),
k8s_client.V1Volume(
name="user-data",
host_path=k8s_client.V1HostPathVolumeSource(
path=join_host_path(THREADS_HOST_PATH, thread_id, "user-data"),
type="DirectoryOrCreate",
),
),
],
volumes=_build_volumes(thread_id),
restart_policy="Always",
),
)