nanoClaw/backend/tools/docker_executor.py

156 lines
5.6 KiB
Python

"""Docker-based code execution with isolation."""
import subprocess
import tempfile
from typing import Optional, Dict, Any
from pathlib import Path
from backend.config import config
class DockerExecutor:
"""Execute Python code in isolated Docker containers."""
def __init__(
self,
image: str = "python:3.12-slim",
network: str = "none",
user: str = "nobody",
workdir: str = "/workspace",
memory_limit: Optional[str] = None,
cpu_shares: Optional[int] = None,
):
self.image = image
self.network = network
self.user = user
self.workdir = workdir
self.memory_limit = memory_limit
self.cpu_shares = cpu_shares
def execute(
self,
code: str,
timeout: int,
strictness: str,
extra_env: Optional[Dict[str, str]] = None,
mount_src: Optional[str] = None,
mount_dst: Optional[str] = None,
) -> Dict[str, Any]:
"""
Execute Python code in a Docker container.
Args:
code: Python code to execute.
timeout: Maximum execution time in seconds.
strictness: Strictness level (lenient/standard/strict) for logging.
extra_env: Additional environment variables.
mount_src: Host path to mount into container (optional).
mount_dst: Container mount path (defaults to workdir).
Returns:
Dictionary with keys:
success: bool
output: str if success else empty
error: str if not success else empty
container_id: str for debugging
"""
# Create temporary file with code inside a temporary directory
# so we can mount it into container
with tempfile.TemporaryDirectory() as tmpdir:
code_path = Path(tmpdir) / "code.py"
code_path.write_text(code, encoding="utf-8")
# Build docker run command
cmd = [
"docker", "run",
"--rm",
f"--network={self.network}",
f"--user={self.user}",
f"--workdir={self.workdir}",
f"--env=PYTHONIOENCODING=utf-8",
]
# Add memory limit if specified
if self.memory_limit:
cmd.append(f"--memory={self.memory_limit}")
# Add CPU shares if specified
if self.cpu_shares:
cmd.append(f"--cpu-shares={self.cpu_shares}")
# Add timeout via --stop-timeout (seconds before SIGKILL)
# Docker's timeout is different; we'll use subprocess timeout instead.
# We'll rely on subprocess timeout, but also set --stop-timeout as backup.
stop_timeout = timeout + 2 # give 2 seconds grace
cmd.append(f"--stop-timeout={stop_timeout}")
# Mount the temporary directory as /workspace (read-only)
cmd.extend(["-v", f"{tmpdir}:{self.workdir}:ro"])
# Additional mount if provided
if mount_src and mount_dst:
cmd.extend(["-v", f"{mount_src}:{mount_dst}:ro"])
# Add environment variables
env = extra_env or {}
for k, v in env.items():
cmd.extend(["-e", f"{k}={v}"])
# Finally, image and command to run
cmd.append(self.image)
cmd.extend(["python", "-c", code])
# Execute docker run with timeout
try:
result = subprocess.run(
cmd,
capture_output=True,
timeout=timeout,
encoding="utf-8",
errors="ignore",
)
if result.returncode == 0:
return {
"success": True,
"output": result.stdout,
"error": "",
"container_id": "", # not available with --rm
"strictness": strictness,
"timeout": timeout,
}
else:
return {
"success": False,
"output": "",
"error": result.stderr or f"Container exited with code {result.returncode}",
"container_id": "",
"strictness": strictness,
"timeout": timeout,
}
except subprocess.TimeoutExpired:
return {
"success": False,
"output": "",
"error": f"Execution timeout ({timeout}s limit in '{strictness}' mode)",
"container_id": "",
"strictness": strictness,
"timeout": timeout,
}
except Exception as e:
return {
"success": False,
"output": "",
"error": f"Docker execution error: {str(e)}",
"container_id": "",
"strictness": strictness,
"timeout": timeout,
}
# Singleton instance
_default_executor = DockerExecutor()
def execute_in_docker(code: str, timeout: int, strictness: str, **kwargs) -> Dict[str, Any]:
"""Convenience function using default executor."""
return _default_executor.execute(code, timeout, strictness, **kwargs)