From 24e849723005fd1971b1ba5f9767874d9ec06da7 Mon Sep 17 00:00:00 2001 From: ViperEkura <3081035982@qq.com> Date: Sat, 28 Mar 2026 13:27:09 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E5=AD=90=E4=BB=A3?= =?UTF-8?q?=E7=90=86=E7=8E=AF=E5=A2=83=E9=9A=94=E7=A6=BB=E3=80=81=E7=BB=9F?= =?UTF-8?q?=E8=AE=A1=E6=97=B6=E9=97=B4=E6=88=B3=E9=97=AE=E9=A2=98=EF=BC=8C?= =?UTF-8?q?=E6=8F=90=E5=8F=96=E5=AD=90=E4=BB=A3=E7=90=86=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 9 +++++ backend/config.py | 7 ++++ backend/tools/builtin/agent.py | 46 ++++++++++++++++++++------ backend/utils/helpers.py | 4 +-- docs/Design.md | 8 +++++ docs/ToolSystemDesign.md | 21 +++++++++--- frontend/src/components/StatsPanel.vue | 18 +++++++--- 7 files changed, 92 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 7fa28ff..b625da9 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,13 @@ frontend_port: 4000 # Max agentic loop iterations (tool call rounds) max_iterations: 15 +# Sub-agent settings (multi_agent tool) +sub_agent: + max_iterations: 3 # Max tool-call rounds per sub-agent + max_tokens: 4096 # Max tokens per LLM call inside a sub-agent + max_agents: 5 # Max number of concurrent sub-agents per request + max_concurrency: 3 # ThreadPoolExecutor max workers + # Available models # Each model must have its own id, name, api_url, api_key models: @@ -117,6 +124,7 @@ backend/ │ ├── data.py # 计算器、文本、JSON 处理 │ ├── weather.py # 天气查询(模拟) │ ├── file_ops.py # 文件操作(6 个工具,project_id 自动注入) +│ ├── agent.py # 多智能体(子 Agent 并发执行,工具权限隔离) │ └── code.py # Python 代码执行(沙箱) └── utils/ # 辅助函数 ├── helpers.py # 通用函数(ok/err/build_messages 等) @@ -207,6 +215,7 @@ frontend/ | **代码执行** | execute_python | 沙箱环境执行 Python | | **文件操作** | file_read, file_write, file_delete, file_list, file_exists, file_mkdir | project_id 自动注入 | | **天气** | get_weather | 天气查询(模拟) | +| **智能体** | multi_agent | 派生子 Agent 并发执行(禁止递归,工具权限与主 Agent 一致) | ## 文档 diff --git a/backend/config.py b/backend/config.py index 232c1ee..bfebf1a 100644 --- a/backend/config.py +++ b/backend/config.py @@ -39,3 +39,10 @@ TOOL_MAX_WORKERS = _cfg.get("tool_max_workers", 4) # Max character length for a single tool result content (truncated if exceeded) TOOL_RESULT_MAX_LENGTH = _cfg.get("tool_result_max_length", 4096) + +# Sub-agent settings (multi_agent tool) +_sa = _cfg.get("sub_agent", {}) +SUB_AGENT_MAX_ITERATIONS = _sa.get("max_iterations", 3) +SUB_AGENT_MAX_TOKENS = _sa.get("max_tokens", 4096) +SUB_AGENT_MAX_AGENTS = _sa.get("max_agents", 5) +SUB_AGENT_MAX_CONCURRENCY = _sa.get("max_concurrency", 3) diff --git a/backend/tools/builtin/agent.py b/backend/tools/builtin/agent.py index 0b3f7a1..503a8ef 100644 --- a/backend/tools/builtin/agent.py +++ b/backend/tools/builtin/agent.py @@ -4,12 +4,25 @@ Provides: - multi_agent: Spawn sub-agents with independent LLM conversation loops """ import json +import logging from concurrent.futures import ThreadPoolExecutor, as_completed from typing import List, Dict, Any, Optional from backend.tools.factory import tool from backend.tools.core import registry from backend.tools.executor import ToolExecutor +from backend.config import ( + DEFAULT_MODEL, + SUB_AGENT_MAX_ITERATIONS, + SUB_AGENT_MAX_TOKENS, + SUB_AGENT_MAX_AGENTS, + SUB_AGENT_MAX_CONCURRENCY, +) + +logger = logging.getLogger(__name__) + +# Sub-agents are forbidden from using multi_agent to prevent infinite recursion +BLOCKED_TOOLS = {"multi_agent"} def _to_executor_calls(tool_calls: list, id_prefix: str = "tc") -> list: @@ -68,13 +81,16 @@ def _run_sub_agent( "error": "LLM client not available", } - # Build tool list – filter to requested tools or use all + # Build tool list – filter to requested tools, then remove blocked all_tools = registry.list_all() if tool_names: allowed = set(tool_names) tools = [t for t in all_tools if t["function"]["name"] in allowed] else: - tools = all_tools + tools = list(all_tools) + + # Remove blocked tools to prevent recursion + tools = [t for t in tools if t["function"]["name"] not in BLOCKED_TOOLS] executor = ToolExecutor(registry=registry) context = {"model": model} @@ -128,7 +144,17 @@ def _run_sub_agent( }) tc_list = message["tool_calls"] executor_calls = _to_executor_calls(tc_list) - tool_results = executor.process_tool_calls(executor_calls, context) + # Execute tools inside app_context – file ops and other DB- + # dependent tools require an active Flask context and session. + with app.app_context(): + if len(executor_calls) > 1: + tool_results = executor.process_tool_calls_parallel( + executor_calls, context + ) + else: + tool_results = executor.process_tool_calls( + executor_calls, context + ) messages.extend(tool_results) else: # Final text response @@ -159,7 +185,7 @@ def _run_sub_agent( "Spawn multiple sub-agents to work on tasks concurrently. " "Each agent runs its own independent conversation with the LLM and can use tools. " "Useful for parallel research, multi-file analysis, or dividing complex tasks into sub-tasks. " - "Each agent is limited to 3 iterations and 4096 tokens to control cost." + "Resource limits (iterations, tokens, concurrency) are configured in config.yml -> sub_agent." ), parameters={ "type": "object", @@ -221,19 +247,18 @@ def multi_agent(arguments: dict) -> dict: tasks = arguments["tasks"] - if len(tasks) > 5: - return {"success": False, "error": "Maximum 5 concurrent agents allowed"} + if len(tasks) > SUB_AGENT_MAX_AGENTS: + return {"success": False, "error": f"Maximum {SUB_AGENT_MAX_AGENTS} concurrent agents allowed"} # Get current conversation context for model/project info app = current_app._get_current_object() # Use injected model/project_id from executor context, fall back to defaults - from backend.config import DEFAULT_MODEL model = arguments.get("_model") or DEFAULT_MODEL project_id = arguments.get("_project_id") - # Execute agents concurrently (max 3 at a time) - concurrency = min(len(tasks), 3) + # Execute agents concurrently + concurrency = min(len(tasks), SUB_AGENT_MAX_CONCURRENCY) results = [None] * len(tasks) with ThreadPoolExecutor(max_workers=concurrency) as pool: @@ -244,9 +269,10 @@ def multi_agent(arguments: dict) -> dict: task["instruction"], task.get("tools"), model, - 4096, + SUB_AGENT_MAX_TOKENS, project_id, app, + SUB_AGENT_MAX_ITERATIONS, ): i for i, task in enumerate(tasks) } diff --git a/backend/utils/helpers.py b/backend/utils/helpers.py index 6691002..2acb3c2 100644 --- a/backend/utils/helpers.py +++ b/backend/utils/helpers.py @@ -1,6 +1,6 @@ """Common helper functions""" import json -from datetime import date, datetime +from datetime import date, datetime, timezone from typing import Any from flask import jsonify from backend import db @@ -97,7 +97,7 @@ def message_to_dict(msg: Message) -> dict: def record_token_usage(user_id, model, prompt_tokens, completion_tokens): """Record token usage""" - today = date.today() + today = datetime.now(timezone.utc).date() usage = TokenUsage.query.filter_by( user_id=user_id, date=today, model=model ).first() diff --git a/docs/Design.md b/docs/Design.md index 0647108..fe16f1a 100644 --- a/docs/Design.md +++ b/docs/Design.md @@ -66,6 +66,7 @@ backend/ │ ├── data.py # 计算器、文本、JSON │ ├── weather.py # 天气查询 │ ├── file_ops.py # 文件操作(project_id 自动注入) +│ ├── agent.py # 多智能体(子 Agent 并发执行,工具权限隔离) │ └── code.py # 代码执行 │ ├── utils/ # 辅助函数 @@ -1020,6 +1021,13 @@ frontend_port: 4000 # 智能体循环最大迭代次数(工具调用轮次上限,默认 5) max_iterations: 15 +# 子代理资源配置(multi_agent 工具) +sub_agent: + max_iterations: 3 # 每个子代理的最大工具调用轮数 + max_tokens: 4096 # 每次调用的最大 token 数 + max_agents: 5 # 每次请求最多派生的子代理数 + max_concurrency: 3 # 并发线程数 + # 可用模型列表(每个模型必须指定 api_url 和 api_key) # 支持任何 OpenAI 兼容 API(DeepSeek、GLM、OpenAI、Moonshot、Qwen 等) models: diff --git a/docs/ToolSystemDesign.md b/docs/ToolSystemDesign.md index 1c76288..f56df60 100644 --- a/docs/ToolSystemDesign.md +++ b/docs/ToolSystemDesign.md @@ -249,14 +249,25 @@ file_read({"path": "src/main.py", "project_id": "xxx"}) | 工具名称 | 描述 | 参数 | |---------|------|------| -| `multi_agent` | 派生子 Agent 并发执行任务(最多 5 个) | `tasks`: 任务数组(name, instruction, tools)
`_model`: 模型名称(自动注入)
`_project_id`: 项目 ID(自动注入) | +| `multi_agent` | 派生子 Agent 并发执行任务 | `tasks`: 任务数组(name, instruction, tools)
`_model`: 模型名称(自动注入)
`_project_id`: 项目 ID(自动注入) | **`multi_agent` 工作原理:** 1. 接收任务数组,每个任务指定 name、instruction 和可选的 tools 列表 -2. 为每个子 Agent 创建独立线程,各自拥有 LLM 对话循环(最多 3 轮迭代,4096 tokens) -3. 通过 Service Locator 获取 `llm_client` 实例 -4. 子 Agent 在 `app.app_context()` 中运行,可独立调用所有注册工具 -5. 返回 `{success, results: [{task_name, success, response/error}], total}` +2. 子 Agent **禁止使用 `multi_agent` 工具**(`BLOCKED_TOOLS`),防止无限递归 +3. 子 Agent 工具权限与主 Agent 一致(除 multi_agent 外的所有已注册工具),支持并行工具执行 +4. 为每个子 Agent 创建独立线程,各自拥有 LLM 对话循环 +5. 子 Agent 在 `app.app_context()` 中运行 LLM 调用和工具执行,确保数据库等依赖正常工作 +6. 通过 Service Locator 获取 `llm_client` 实例 +7. 返回 `{success, results: [{task_name, success, response/error}], total}` + +**资源配置**(`config.yml` → `sub_agent`): + +| 配置项 | 默认值 | 说明 | +|--------|--------|------| +| `max_iterations` | 3 | 每个子代理的最大工具调用轮数 | +| `max_tokens` | 4096 | 每次调用的最大 token 数 | +| `max_agents` | 5 | 每次请求最多派生的子代理数 | +| `max_concurrency` | 3 | ThreadPoolExecutor 并发线程数 | --- diff --git a/frontend/src/components/StatsPanel.vue b/frontend/src/components/StatsPanel.vue index 66ea9f4..7d640d1 100644 --- a/frontend/src/components/StatsPanel.vue +++ b/frontend/src/components/StatsPanel.vue @@ -132,8 +132,15 @@ const sortedDaily = computed(() => { const chartData = computed(() => { if (period.value === 'daily' && stats.value?.hourly) { const hourly = stats.value.hourly + // Backend returns UTC hours — convert to local timezone for display. + const offset = -new Date().getTimezoneOffset() / 60 // e.g. +8 for UTC+8 + const localHourly = {} + for (const [utcH, val] of Object.entries(hourly)) { + const localH = ((parseInt(utcH) + offset) % 24 + 24) % 24 + localHourly[localH] = val + } let minH = 24, maxH = -1 - for (const h of Object.keys(hourly)) { + for (const h of Object.keys(localHourly)) { const hour = parseInt(h) if (hour < minH) minH = hour if (hour > maxH) maxH = hour @@ -145,16 +152,19 @@ const chartData = computed(() => { const h = start + i return { label: `${h}:00`, - value: hourly[String(h)]?.total || 0, + value: localHourly[String(h)]?.total || 0, } }) } const data = sortedDaily.value return Object.entries(data).map(([date, val]) => { - const d = new Date(date) + // date is "YYYY-MM-DD" from backend — parse directly to avoid + // new Date() timezone shift (parsed as UTC midnight then + // getMonth/getDate applies local offset, potentially off by one day). + const [year, month, day] = date.split('-') return { - label: `${d.getMonth() + 1}/${d.getDate()}`, + label: `${parseInt(month)}/${parseInt(day)}`, value: val.total, prompt: val.prompt || 0, completion: val.completion || 0,