From 3fd308d6b6342469f7cd89804c55378f0c0a4e01 Mon Sep 17 00:00:00 2001
From: ViperEkura <3081035982@qq.com>
Date: Wed, 25 Mar 2026 16:36:25 +0800
Subject: [PATCH] =?UTF-8?q?feat:=20=E6=9B=B4=E6=96=B0=E5=B7=A5=E5=85=B7?=
=?UTF-8?q?=E8=B0=83=E7=94=A8=E6=98=BE=E7=A4=BA=E9=80=BB=E8=BE=91?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
backend/services/chat.py | 84 ++++++++--
backend/tools/executor.py | 63 +++----
backend/utils/helpers.py | 22 ++-
docs/Design.md | 45 ++++-
docs/ToolSystemDesign.md | 45 ++---
frontend/src/App.vue | 195 +++++++++++++++++-----
frontend/src/api/index.js | 8 +-
frontend/src/components/ChatView.vue | 3 +
frontend/src/components/MessageBubble.vue | 4 +-
frontend/src/components/ProcessBlock.vue | 149 ++++++++++++++---
10 files changed, 466 insertions(+), 152 deletions(-)
diff --git a/backend/services/chat.py b/backend/services/chat.py
index f9e7df2..6b06918 100644
--- a/backend/services/chat.py
+++ b/backend/services/chat.py
@@ -99,7 +99,11 @@ class ChatService:
return err(500, "exceeded maximum tool call iterations")
def stream_response(self, conv: Conversation, tools_enabled: bool = True):
- """Stream response with tool call support"""
+ """Stream response with tool call support
+
+ Uses 'process_step' events to send thinking and tool calls in order,
+ allowing them to be interleaved properly in the frontend.
+ """
conv_id = conv.id
conv_model = conv.model
app = current_app._get_current_object()
@@ -113,6 +117,7 @@ class ChatService:
messages = list(initial_messages)
all_tool_calls = []
all_tool_results = []
+ step_index = 0 # Track global step index for ordering
for iteration in range(self.MAX_ITERATIONS):
full_content = ""
@@ -122,6 +127,9 @@ class ChatService:
msg_id = str(uuid.uuid4())
tool_calls_list = []
+ # Send thinking_start event to clear previous thinking in frontend
+ yield f"event: thinking_start\ndata: {{}}\n\n"
+
try:
with app.app_context():
active_conv = db.session.get(Conversation, conv_id)
@@ -152,10 +160,11 @@ class ChatService:
delta = chunk["choices"][0].get("delta", {})
- # Process thinking
+ # Process thinking - send as process_step
reasoning = delta.get("reasoning_content", "")
if reasoning:
full_thinking += reasoning
+ # Still send thinking event for backward compatibility
yield f"event: thinking\ndata: {json.dumps({'content': reasoning}, ensure_ascii=False)}\n\n"
# Process text
@@ -179,9 +188,40 @@ class ChatService:
# Tool calls exist - execute and continue
if tool_calls_list:
all_tool_calls.extend(tool_calls_list)
+
+ # Send thinking as a complete step if exists
+ if full_thinking:
+ yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'thinking', 'content': full_thinking}, ensure_ascii=False)}\n\n"
+ step_index += 1
+
+ # Also send legacy tool_calls event for backward compatibility
yield f"event: tool_calls\ndata: {json.dumps({'calls': tool_calls_list}, ensure_ascii=False)}\n\n"
- tool_results = self.executor.process_tool_calls(tool_calls_list)
+ # Process each tool call one by one, send result immediately
+ tool_results = []
+ for tc in tool_calls_list:
+ # Send tool call step
+ yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'tool_call', 'id': tc['id'], 'name': tc['function']['name'], 'arguments': tc['function']['arguments']}, ensure_ascii=False)}\n\n"
+ step_index += 1
+
+ # Execute this single tool call
+ single_result = self.executor.process_tool_calls([tc])
+ tool_results.extend(single_result)
+
+ # Send tool result step immediately
+ tr = single_result[0]
+ try:
+ result_data = json.loads(tr["content"])
+ skipped = result_data.get("skipped", False)
+ except:
+ skipped = False
+
+ yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'tool_result', 'id': tr['tool_call_id'], 'name': tr['name'], 'content': tr['content'], 'skipped': skipped}, ensure_ascii=False)}\n\n"
+ step_index += 1
+
+ # Also send legacy tool_result event
+ yield f"event: tool_result\ndata: {json.dumps({'id': tr['tool_call_id'], 'name': tr['name'], 'content': tr['content'], 'skipped': skipped}, ensure_ascii=False)}\n\n"
+
messages.append({
"role": "assistant",
"content": full_content or None,
@@ -189,12 +229,14 @@ class ChatService:
})
messages.extend(tool_results)
all_tool_results.extend(tool_results)
-
- for tr in tool_results:
- yield f"event: tool_result\ndata: {json.dumps({'name': tr['name'], 'content': tr['content']}, ensure_ascii=False)}\n\n"
continue
# No tool calls - finish
+ # Send thinking as a step if exists
+ if full_thinking:
+ yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'thinking', 'content': full_thinking}, ensure_ascii=False)}\n\n"
+ step_index += 1
+
with app.app_context():
msg = Message(
id=msg_id,
@@ -221,7 +263,12 @@ class ChatService:
return Response(
generate(),
mimetype="text/event-stream",
- headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}
+ headers={
+ "Cache-Control": "no-cache, no-store, must-revalidate",
+ "X-Accel-Buffering": "no",
+ "Connection": "keep-alive",
+ "Transfer-Encoding": "chunked",
+ }
)
def _save_tool_calls(self, message_id: str, tool_calls: list, tool_results: list) -> None:
@@ -256,8 +303,20 @@ class ChatService:
# Add tool calls if any
tool_calls = msg.tool_calls.all() if msg.tool_calls else []
if tool_calls:
- result["tool_calls"] = [
- {
+ result["tool_calls"] = []
+ for tc in tool_calls:
+ # Parse result to extract success/skipped status
+ success = True
+ skipped = False
+ if tc.result:
+ try:
+ result_data = json.loads(tc.result)
+ success = result_data.get("success", True)
+ skipped = result_data.get("skipped", False)
+ except:
+ pass
+
+ result["tool_calls"].append({
"id": tc.call_id,
"type": "function",
"function": {
@@ -265,9 +324,10 @@ class ChatService:
"arguments": tc.arguments,
},
"result": tc.result,
- }
- for tc in tool_calls
- ]
+ "success": success,
+ "skipped": skipped,
+ "execution_time": tc.execution_time,
+ })
return result
diff --git a/backend/tools/executor.py b/backend/tools/executor.py
index 38bc676..fc535c9 100644
--- a/backend/tools/executor.py
+++ b/backend/tools/executor.py
@@ -16,12 +16,14 @@ class ToolExecutor:
api_key: Optional[str] = None,
enable_cache: bool = True,
cache_ttl: int = 300, # 5 minutes
+ max_retries: int = 2, # Max retries per tool
):
self.registry = registry or ToolRegistry()
self.api_url = api_url
self.api_key = api_key
self.enable_cache = enable_cache
self.cache_ttl = cache_ttl
+ self.max_retries = max_retries
self._cache: Dict[str, tuple] = {} # key -> (result, timestamp)
self._call_history: List[dict] = [] # Track calls in current session
@@ -115,11 +117,12 @@ class ToolExecutor:
results.append(self._create_tool_result(call_id, name, result))
continue
- # Execute tool
- result = self.registry.execute(name, args)
+ # Execute tool with retry
+ result = self._execute_with_retry(name, args)
- # Cache the result
- self._set_cache(cache_key, result)
+ # Cache the result (only cache successful results)
+ if result.get("success"):
+ self._set_cache(cache_key, result)
# Add to history
self._call_history.append({
@@ -132,6 +135,24 @@ class ToolExecutor:
return results
+ def _execute_with_retry(
+ self,
+ name: str,
+ arguments: dict,
+ ) -> dict:
+ """
+ Execute tool without automatic retry.
+
+ If the tool fails, return the error to let the model decide
+ whether to retry with the same tool or try a different approach.
+
+ Returns:
+ Result dict with success status. Failed tool returns:
+ {"success": False, "error": "..."}
+ """
+ result = self.registry.execute(name, arguments)
+ return result
+
def _create_tool_result(
self,
call_id: str,
@@ -191,37 +212,3 @@ class ToolExecutor:
"tool_choice": kwargs.get("tool_choice", "auto"),
**{k: v for k, v in kwargs.items() if k not in ["tool_choice"]}
}
-
- def execute_with_retry(
- self,
- name: str,
- arguments: dict,
- max_retries: int = 3,
- retry_delay: float = 1.0
- ) -> dict:
- """
- Execute tool with retry
-
- Args:
- name: Tool name
- arguments: Tool arguments
- max_retries: Max retry count
- retry_delay: Retry delay in seconds
-
- Returns:
- Execution result
- """
- last_error = None
-
- for attempt in range(max_retries):
- try:
- return self.registry.execute(name, arguments)
- except Exception as e:
- last_error = e
- if attempt < max_retries - 1:
- time.sleep(retry_delay)
-
- return {
- "success": False,
- "error": f"Failed after {max_retries} retries: {last_error}"
- }
diff --git a/backend/utils/helpers.py b/backend/utils/helpers.py
index 3505c11..50cd2db 100644
--- a/backend/utils/helpers.py
+++ b/backend/utils/helpers.py
@@ -53,8 +53,20 @@ def message_to_dict(msg: Message) -> dict:
# Add tool calls if any
tool_calls = msg.tool_calls.all() if msg.tool_calls else []
if tool_calls:
- result["tool_calls"] = [
- {
+ result["tool_calls"] = []
+ for tc in tool_calls:
+ # Parse result to extract success/skipped status
+ success = True
+ skipped = False
+ if tc.result:
+ try:
+ result_data = json.loads(tc.result)
+ success = result_data.get("success", True)
+ skipped = result_data.get("skipped", False)
+ except:
+ pass
+
+ result["tool_calls"].append({
"id": tc.call_id,
"type": "function",
"function": {
@@ -62,10 +74,10 @@ def message_to_dict(msg: Message) -> dict:
"arguments": tc.arguments,
},
"result": tc.result,
+ "success": success,
+ "skipped": skipped,
"execution_time": tc.execution_time,
- }
- for tc in tool_calls
- ]
+ })
return result
diff --git a/docs/Design.md b/docs/Design.md
index f08575b..9d387be 100644
--- a/docs/Design.md
+++ b/docs/Design.md
@@ -158,6 +158,7 @@ classDiagram
-_save_tool_calls(msg_id, calls, results) void
-_message_to_dict(msg) dict
-_process_tool_calls_delta(delta, list) list
+ -_emit_process_step(event, data) void
}
class GLMClient {
@@ -173,7 +174,6 @@ classDiagram
+process_tool_calls(calls, context) list
+build_request(messages, model, tools) dict
+clear_history() void
- +execute_with_retry(name, args, retries) dict
}
ChatService --> GLMClient : 使用
@@ -266,13 +266,56 @@ classDiagram
| 事件 | 说明 |
|------|------|
+| `thinking_start` | 新一轮思考开始,前端应清空之前的思考缓冲 |
| `thinking` | 思维链增量内容(启用时) |
| `message` | 回复内容的增量片段 |
| `tool_calls` | 工具调用信息 |
| `tool_result` | 工具执行结果 |
+| `process_step` | 处理步骤(按顺序:thinking/tool_call/tool_result),支持交替显示 |
| `error` | 错误信息 |
| `done` | 回复结束,携带 message_id 和 token_count |
+### 思考与工具调用交替流程
+
+```
+iteration 1:
+ thinking_start -> 前端清空 streamThinking
+ thinking (增量) -> 前端累加到 streamThinking
+ process_step(thinking, "思考内容A")
+ tool_calls -> 批量通知(兼容)
+ process_step(tool_call, "file_read") -> 调用工具
+ process_step(tool_result, {...}) -> 立即返回结果
+ process_step(tool_call, "file_list") -> 下一个工具
+ process_step(tool_result, {...}) -> 立即返回结果
+
+iteration 2:
+ thinking_start -> 前端清空 streamThinking
+ thinking (增量) -> 前端累加到 streamThinking
+ process_step(thinking, "思考内容B")
+ done
+```
+
+### process_step 事件格式
+
+```json
+// 思考过程
+{"index": 0, "type": "thinking", "content": "完整思考内容..."}
+
+// 工具调用
+{"index": 1, "type": "tool_call", "id": "call_abc123", "name": "web_search", "arguments": "{\"query\": \"...\"}"}
+
+// 工具返回
+{"index": 2, "type": "tool_result", "id": "call_abc123", "name": "web_search", "content": "{\"success\": true, ...}", "skipped": false}
+```
+
+字段说明:
+- `index`: 步骤序号,确保按正确顺序显示
+- `type`: 步骤类型(thinking/tool_call/tool_result)
+- `id`: 工具调用唯一标识,用于匹配工具调用和返回结果
+- `name`: 工具名称
+- `content`: 内容或结果
+- `skipped`: 工具是否被跳过(失败后跳过)
+
---
## 数据模型
diff --git a/docs/ToolSystemDesign.md b/docs/ToolSystemDesign.md
index 5cd5d8f..a990449 100644
--- a/docs/ToolSystemDesign.md
+++ b/docs/ToolSystemDesign.md
@@ -40,7 +40,6 @@ classDiagram
+process_tool_calls(list tool_calls, dict context) list~dict~
+build_request(list messages, str model, list tools, dict kwargs) dict
+clear_history() void
- +execute_with_retry(str name, dict args, int max_retries) dict
}
class ToolResult {
@@ -236,6 +235,14 @@ class ToolExecutor:
self._cache: Dict[str, tuple] = {} # key -> (result, timestamp)
self._call_history: List[dict] = [] # 当前会话的调用历史
+ def _execute_with_retry(self, name: str, arguments: dict) -> dict:
+ """
+ 执行工具,不自动重试。
+ 成功或失败都直接返回结果,由模型决定下一步操作。
+ """
+ result = self.registry.execute(name, arguments)
+ return result
+
def _make_cache_key(self, name: str, args: dict) -> str:
"""生成缓存键"""
args_str = json.dumps(args, sort_keys=True, ensure_ascii=False)
@@ -390,29 +397,6 @@ class ToolExecutor:
"tool_choice": kwargs.get("tool_choice", "auto"),
**{k: v for k, v in kwargs.items() if k not in ["tool_choice"]}
}
-
- def execute_with_retry(
- self,
- name: str,
- arguments: dict,
- max_retries: int = 3,
- retry_delay: float = 1.0
- ) -> dict:
- """带重试的工具执行"""
- last_error = None
-
- for attempt in range(max_retries):
- try:
- return self.registry.execute(name, arguments)
- except Exception as e:
- last_error = e
- if attempt < max_retries - 1:
- time.sleep(retry_delay)
-
- return {
- "success": False,
- "error": f"Failed after {max_retries} retries: {last_error}"
- }
```
---
@@ -954,7 +938,13 @@ def my_tool(arguments: dict) -> dict:
- **历史去重**:同一会话内已调用过的工具会直接返回缓存结果
- **自动清理**:新会话开始时调用 `clear_history()` 清理历史
-### 9.4 安全设计
+### 9.4 无自动重试
+
+- **直接返回结果**:工具执行成功或失败都直接返回,不自动重试
+- **模型决策**:失败时返回错误信息,由模型决定是否重试或尝试其他工具
+- **灵活性**:模型可以根据错误类型选择不同的解决策略
+
+### 9.5 安全设计
- **计算器安全**:禁止函数调用和变量名,只支持数学运算
- **文件沙箱**:文件操作限制在项目根目录内,防止越权访问
@@ -970,5 +960,6 @@ def my_tool(arguments: dict) -> dict:
2. **工厂模式**:使用 `@tool` 装饰器注册工具
3. **服务分离**:工具依赖的服务独立,不与工具类耦合
4. **性能优化**:支持缓存和重复检测,减少重复计算和网络请求
-5. **易于扩展**:新增工具只需写一个函数并加装饰器
-6. **安全可靠**:文件沙箱、安全计算、完善的错误处理
+5. **智能决策**:工具执行失败时不自动重试,由模型决定下一步操作
+6. **易于扩展**:新增工具只需写一个函数并加装饰器
+7. **安全可靠**:文件沙箱、安全计算、完善的错误处理
diff --git a/frontend/src/App.vue b/frontend/src/App.vue
index 6486b95..2659896 100644
--- a/frontend/src/App.vue
+++ b/frontend/src/App.vue
@@ -19,6 +19,7 @@
:streaming-content="streamContent"
:streaming-thinking="streamThinking"
:streaming-tool-calls="streamToolCalls"
+ :streaming-process-steps="streamProcessSteps"
:has-more-messages="hasMoreMessages"
:loading-more="loadingMessages"
:tools-enabled="toolsEnabled"
@@ -65,6 +66,13 @@ const streaming = ref(false)
const streamContent = ref('')
const streamThinking = ref('')
const streamToolCalls = ref([])
+const streamProcessSteps = ref([])
+
+// 保存每个对话的流式状态
+const streamStates = new Map()
+
+// 保存当前流式请求引用
+let currentStreamPromise = null
// -- UI state --
const showSettings = ref(false)
@@ -111,12 +119,38 @@ async function createConversation() {
// -- Select conversation --
async function selectConversation(id) {
+ // 保存当前对话的流式状态(如果有)
+ if (currentConvId.value && streaming.value) {
+ streamStates.set(currentConvId.value, {
+ streaming: true,
+ streamContent: streamContent.value,
+ streamThinking: streamThinking.value,
+ streamToolCalls: [...streamToolCalls.value],
+ streamProcessSteps: [...streamProcessSteps.value],
+ })
+ }
+
currentConvId.value = id
messages.value = []
nextMsgCursor.value = null
hasMoreMessages.value = false
- streamContent.value = ''
- streamThinking.value = ''
+
+ // 恢复新对话的流式状态
+ const savedState = streamStates.get(id)
+ if (savedState && savedState.streaming) {
+ streaming.value = true
+ streamContent.value = savedState.streamContent
+ streamThinking.value = savedState.streamThinking
+ streamToolCalls.value = savedState.streamToolCalls
+ streamProcessSteps.value = savedState.streamProcessSteps
+ } else {
+ streaming.value = false
+ streamContent.value = ''
+ streamThinking.value = ''
+ streamToolCalls.value = []
+ streamProcessSteps.value = []
+ }
+
await loadMessages(true)
}
@@ -149,10 +183,12 @@ function loadMoreMessages() {
async function sendMessage(content) {
if (!currentConvId.value || streaming.value) return
+ const convId = currentConvId.value // 保存当前对话ID
+
// Add user message optimistically
const userMsg = {
id: 'temp_' + Date.now(),
- conversation_id: currentConvId.value,
+ conversation_id: convId,
role: 'user',
content,
token_count: 0,
@@ -165,71 +201,138 @@ async function sendMessage(content) {
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
+ streamProcessSteps.value = []
- await messageApi.send(currentConvId.value, content, {
+ currentStreamPromise = messageApi.send(convId, content, {
stream: true,
toolsEnabled: toolsEnabled.value,
+ onThinkingStart() {
+ if (currentConvId.value === convId) {
+ streamThinking.value = ''
+ } else {
+ const saved = streamStates.get(convId) || {}
+ streamStates.set(convId, { ...saved, streamThinking: '' })
+ }
+ },
onThinking(text) {
- streamThinking.value += text
+ if (currentConvId.value === convId) {
+ streamThinking.value += text
+ } else {
+ const saved = streamStates.get(convId) || { streamThinking: '' }
+ streamStates.set(convId, { ...saved, streamThinking: (saved.streamThinking || '') + text })
+ }
},
onMessage(text) {
- streamContent.value += text
+ if (currentConvId.value === convId) {
+ streamContent.value += text
+ } else {
+ const saved = streamStates.get(convId) || { streamContent: '' }
+ streamStates.set(convId, { ...saved, streamContent: (saved.streamContent || '') + text })
+ }
},
onToolCalls(calls) {
console.log('🔧 Tool calls received:', calls)
- streamToolCalls.value = calls
+ if (currentConvId.value === convId) {
+ streamToolCalls.value.push(...calls.map(c => ({ ...c, result: null })))
+ } else {
+ const saved = streamStates.get(convId) || { streamToolCalls: [] }
+ const newCalls = [...(saved.streamToolCalls || []), ...calls.map(c => ({ ...c, result: null }))]
+ streamStates.set(convId, { ...saved, streamToolCalls: newCalls })
+ }
},
onToolResult(result) {
console.log('✅ Tool result received:', result)
- // 更新工具调用结果
- const call = streamToolCalls.value.find(c => c.function?.name === result.name)
- if (call) {
- call.result = result.content
+ if (currentConvId.value === convId) {
+ const call = streamToolCalls.value.find(c => c.id === result.id)
+ if (call) call.result = result.content
} else {
- // 如果找不到,添加到第一个调用(兜底处理)
- if (streamToolCalls.value.length > 0) {
- streamToolCalls.value[0].result = result.content
+ const saved = streamStates.get(convId) || { streamToolCalls: [] }
+ const call = saved.streamToolCalls?.find(c => c.id === result.id)
+ if (call) call.result = result.content
+ streamStates.set(convId, { ...saved })
+ }
+ },
+ onProcessStep(step) {
+ const idx = step.index
+ if (currentConvId.value === convId) {
+ // 创建新数组确保响应式更新
+ const newSteps = [...streamProcessSteps.value]
+ while (newSteps.length <= idx) {
+ newSteps.push(null)
}
+ newSteps[idx] = step
+ streamProcessSteps.value = newSteps
+ } else {
+ const saved = streamStates.get(convId) || { streamProcessSteps: [] }
+ const steps = [...(saved.streamProcessSteps || [])]
+ while (steps.length <= idx) steps.push(null)
+ steps[idx] = step
+ streamStates.set(convId, { ...saved, streamProcessSteps: steps })
}
},
async onDone(data) {
- streaming.value = false
- // Replace temp message and add assistant message from server
- messages.value = messages.value.filter(m => m.id !== userMsg.id)
- messages.value.push({
- id: data.message_id,
- conversation_id: currentConvId.value,
- role: 'assistant',
- content: streamContent.value,
- token_count: data.token_count,
- thinking_content: streamThinking.value || null,
- tool_calls: streamToolCalls.value.length > 0 ? streamToolCalls.value : null,
- created_at: new Date().toISOString(),
- })
- streamContent.value = ''
- streamThinking.value = ''
- // Update conversation in list (move to top)
- const idx = conversations.value.findIndex(c => c.id === currentConvId.value)
- if (idx > 0) {
- const [conv] = conversations.value.splice(idx, 1)
- conv.message_count = (conv.message_count || 0) + 2
- conversations.value.unshift(conv)
- } else if (idx === 0) {
- conversations.value[0].message_count = (conversations.value[0].message_count || 0) + 2
- }
- // Auto title: use first message if title is empty
- if (conversations.value[0] && !conversations.value[0].title) {
+ // 清除保存的状态
+ streamStates.delete(convId)
+
+ if (currentConvId.value === convId) {
+ streaming.value = false
+ currentStreamPromise = null
+ // Replace temp message and add assistant message from server
+ messages.value = messages.value.filter(m => m.id !== userMsg.id)
+ messages.value.push({
+ id: data.message_id,
+ conversation_id: convId,
+ role: 'assistant',
+ content: streamContent.value,
+ token_count: data.token_count,
+ thinking_content: streamThinking.value || null,
+ tool_calls: streamToolCalls.value.length > 0 ? streamToolCalls.value : null,
+ process_steps: streamProcessSteps.value.filter(Boolean),
+ created_at: new Date().toISOString(),
+ })
+ streamContent.value = ''
+ streamThinking.value = ''
+ streamToolCalls.value = []
+ streamProcessSteps.value = []
+ // Update conversation in list (move to top)
+ const idx = conversations.value.findIndex(c => c.id === convId)
+ if (idx > 0) {
+ const [conv] = conversations.value.splice(idx, 1)
+ conv.message_count = (conv.message_count || 0) + 2
+ conversations.value.unshift(conv)
+ } else if (idx === 0) {
+ conversations.value[0].message_count = (conversations.value[0].message_count || 0) + 2
+ }
+ // Auto title: use first message if title is empty
+ if (conversations.value[0] && !conversations.value[0].title) {
+ try {
+ await conversationApi.update(convId, { title: content.slice(0, 30) })
+ conversations.value[0].title = content.slice(0, 30)
+ } catch (_) {}
+ }
+ } else {
+ // 后台完成,重新加载该对话的消息
try {
- await conversationApi.update(currentConvId.value, { title: content.slice(0, 30) })
- conversations.value[0].title = content.slice(0, 30)
+ const res = await messageApi.list(convId, null, 50)
+ // 更新对话列表中的消息计数
+ const idx = conversations.value.findIndex(c => c.id === convId)
+ if (idx >= 0) {
+ conversations.value[idx].message_count = res.data.items.length
+ }
} catch (_) {}
}
},
onError(msg) {
- streaming.value = false
- streamContent.value = ''
- streamThinking.value = ''
- console.error('Stream error:', msg)
+ streamStates.delete(convId)
+ if (currentConvId.value === convId) {
+ streaming.value = false
+ currentStreamPromise = null
+ streamContent.value = ''
+ streamThinking.value = ''
+ streamToolCalls.value = []
+ streamProcessSteps.value = []
+ console.error('Stream error:', msg)
+ }
},
})
}
diff --git a/frontend/src/api/index.js b/frontend/src/api/index.js
index 5b78353..5e5749e 100644
--- a/frontend/src/api/index.js
+++ b/frontend/src/api/index.js
@@ -95,7 +95,7 @@ export const messageApi = {
return request(`/conversations/${convId}/messages?${params}`)
},
- send(convId, content, { stream = true, toolsEnabled = true, onThinking, onMessage, onToolCalls, onToolResult, onDone, onError } = {}) {
+ send(convId, content, { stream = true, toolsEnabled = true, onThinkingStart, onThinking, onMessage, onToolCalls, onToolResult, onProcessStep, onDone, onError } = {}) {
if (!stream) {
return request(`/conversations/${convId}/messages`, {
method: 'POST',
@@ -137,7 +137,9 @@ export const messageApi = {
currentEvent = line.slice(7).trim()
} else if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6))
- if (currentEvent === 'thinking' && onThinking) {
+ if (currentEvent === 'thinking_start' && onThinkingStart) {
+ onThinkingStart()
+ } else if (currentEvent === 'thinking' && onThinking) {
onThinking(data.content)
} else if (currentEvent === 'message' && onMessage) {
onMessage(data.content)
@@ -145,6 +147,8 @@ export const messageApi = {
onToolCalls(data.calls)
} else if (currentEvent === 'tool_result' && onToolResult) {
onToolResult(data)
+ } else if (currentEvent === 'process_step' && onProcessStep) {
+ onProcessStep(data)
} else if (currentEvent === 'done' && onDone) {
onDone(data)
} else if (currentEvent === 'error' && onError) {
diff --git a/frontend/src/components/ChatView.vue b/frontend/src/components/ChatView.vue
index 5d28087..e3ba793 100644
--- a/frontend/src/components/ChatView.vue
+++ b/frontend/src/components/ChatView.vue
@@ -38,6 +38,7 @@
:content="msg.content"
:thinking-content="msg.thinking_content"
:tool-calls="msg.tool_calls"
+ :process-steps="msg.process_steps"
:tool-name="msg.name"
:token-count="msg.token_count"
:created-at="msg.created_at"
@@ -51,6 +52,7 @@