feat: 更新工具调用显示逻辑

This commit is contained in:
ViperEkura 2026-03-25 16:36:25 +08:00
parent 7742a5c182
commit 3fd308d6b6
10 changed files with 466 additions and 152 deletions

View File

@ -99,7 +99,11 @@ class ChatService:
return err(500, "exceeded maximum tool call iterations")
def stream_response(self, conv: Conversation, tools_enabled: bool = True):
"""Stream response with tool call support"""
"""Stream response with tool call support
Uses 'process_step' events to send thinking and tool calls in order,
allowing them to be interleaved properly in the frontend.
"""
conv_id = conv.id
conv_model = conv.model
app = current_app._get_current_object()
@ -113,6 +117,7 @@ class ChatService:
messages = list(initial_messages)
all_tool_calls = []
all_tool_results = []
step_index = 0 # Track global step index for ordering
for iteration in range(self.MAX_ITERATIONS):
full_content = ""
@ -122,6 +127,9 @@ class ChatService:
msg_id = str(uuid.uuid4())
tool_calls_list = []
# Send thinking_start event to clear previous thinking in frontend
yield f"event: thinking_start\ndata: {{}}\n\n"
try:
with app.app_context():
active_conv = db.session.get(Conversation, conv_id)
@ -152,10 +160,11 @@ class ChatService:
delta = chunk["choices"][0].get("delta", {})
# Process thinking
# Process thinking - send as process_step
reasoning = delta.get("reasoning_content", "")
if reasoning:
full_thinking += reasoning
# Still send thinking event for backward compatibility
yield f"event: thinking\ndata: {json.dumps({'content': reasoning}, ensure_ascii=False)}\n\n"
# Process text
@ -179,9 +188,40 @@ class ChatService:
# Tool calls exist - execute and continue
if tool_calls_list:
all_tool_calls.extend(tool_calls_list)
# Send thinking as a complete step if exists
if full_thinking:
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'thinking', 'content': full_thinking}, ensure_ascii=False)}\n\n"
step_index += 1
# Also send legacy tool_calls event for backward compatibility
yield f"event: tool_calls\ndata: {json.dumps({'calls': tool_calls_list}, ensure_ascii=False)}\n\n"
tool_results = self.executor.process_tool_calls(tool_calls_list)
# Process each tool call one by one, send result immediately
tool_results = []
for tc in tool_calls_list:
# Send tool call step
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'tool_call', 'id': tc['id'], 'name': tc['function']['name'], 'arguments': tc['function']['arguments']}, ensure_ascii=False)}\n\n"
step_index += 1
# Execute this single tool call
single_result = self.executor.process_tool_calls([tc])
tool_results.extend(single_result)
# Send tool result step immediately
tr = single_result[0]
try:
result_data = json.loads(tr["content"])
skipped = result_data.get("skipped", False)
except:
skipped = False
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'tool_result', 'id': tr['tool_call_id'], 'name': tr['name'], 'content': tr['content'], 'skipped': skipped}, ensure_ascii=False)}\n\n"
step_index += 1
# Also send legacy tool_result event
yield f"event: tool_result\ndata: {json.dumps({'id': tr['tool_call_id'], 'name': tr['name'], 'content': tr['content'], 'skipped': skipped}, ensure_ascii=False)}\n\n"
messages.append({
"role": "assistant",
"content": full_content or None,
@ -189,12 +229,14 @@ class ChatService:
})
messages.extend(tool_results)
all_tool_results.extend(tool_results)
for tr in tool_results:
yield f"event: tool_result\ndata: {json.dumps({'name': tr['name'], 'content': tr['content']}, ensure_ascii=False)}\n\n"
continue
# No tool calls - finish
# Send thinking as a step if exists
if full_thinking:
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'thinking', 'content': full_thinking}, ensure_ascii=False)}\n\n"
step_index += 1
with app.app_context():
msg = Message(
id=msg_id,
@ -221,7 +263,12 @@ class ChatService:
return Response(
generate(),
mimetype="text/event-stream",
headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}
headers={
"Cache-Control": "no-cache, no-store, must-revalidate",
"X-Accel-Buffering": "no",
"Connection": "keep-alive",
"Transfer-Encoding": "chunked",
}
)
def _save_tool_calls(self, message_id: str, tool_calls: list, tool_results: list) -> None:
@ -256,8 +303,20 @@ class ChatService:
# Add tool calls if any
tool_calls = msg.tool_calls.all() if msg.tool_calls else []
if tool_calls:
result["tool_calls"] = [
{
result["tool_calls"] = []
for tc in tool_calls:
# Parse result to extract success/skipped status
success = True
skipped = False
if tc.result:
try:
result_data = json.loads(tc.result)
success = result_data.get("success", True)
skipped = result_data.get("skipped", False)
except:
pass
result["tool_calls"].append({
"id": tc.call_id,
"type": "function",
"function": {
@ -265,9 +324,10 @@ class ChatService:
"arguments": tc.arguments,
},
"result": tc.result,
}
for tc in tool_calls
]
"success": success,
"skipped": skipped,
"execution_time": tc.execution_time,
})
return result

View File

@ -16,12 +16,14 @@ class ToolExecutor:
api_key: Optional[str] = None,
enable_cache: bool = True,
cache_ttl: int = 300, # 5 minutes
max_retries: int = 2, # Max retries per tool
):
self.registry = registry or ToolRegistry()
self.api_url = api_url
self.api_key = api_key
self.enable_cache = enable_cache
self.cache_ttl = cache_ttl
self.max_retries = max_retries
self._cache: Dict[str, tuple] = {} # key -> (result, timestamp)
self._call_history: List[dict] = [] # Track calls in current session
@ -115,11 +117,12 @@ class ToolExecutor:
results.append(self._create_tool_result(call_id, name, result))
continue
# Execute tool
result = self.registry.execute(name, args)
# Execute tool with retry
result = self._execute_with_retry(name, args)
# Cache the result
self._set_cache(cache_key, result)
# Cache the result (only cache successful results)
if result.get("success"):
self._set_cache(cache_key, result)
# Add to history
self._call_history.append({
@ -132,6 +135,24 @@ class ToolExecutor:
return results
def _execute_with_retry(
self,
name: str,
arguments: dict,
) -> dict:
"""
Execute tool without automatic retry.
If the tool fails, return the error to let the model decide
whether to retry with the same tool or try a different approach.
Returns:
Result dict with success status. Failed tool returns:
{"success": False, "error": "..."}
"""
result = self.registry.execute(name, arguments)
return result
def _create_tool_result(
self,
call_id: str,
@ -191,37 +212,3 @@ class ToolExecutor:
"tool_choice": kwargs.get("tool_choice", "auto"),
**{k: v for k, v in kwargs.items() if k not in ["tool_choice"]}
}
def execute_with_retry(
self,
name: str,
arguments: dict,
max_retries: int = 3,
retry_delay: float = 1.0
) -> dict:
"""
Execute tool with retry
Args:
name: Tool name
arguments: Tool arguments
max_retries: Max retry count
retry_delay: Retry delay in seconds
Returns:
Execution result
"""
last_error = None
for attempt in range(max_retries):
try:
return self.registry.execute(name, arguments)
except Exception as e:
last_error = e
if attempt < max_retries - 1:
time.sleep(retry_delay)
return {
"success": False,
"error": f"Failed after {max_retries} retries: {last_error}"
}

View File

@ -53,8 +53,20 @@ def message_to_dict(msg: Message) -> dict:
# Add tool calls if any
tool_calls = msg.tool_calls.all() if msg.tool_calls else []
if tool_calls:
result["tool_calls"] = [
{
result["tool_calls"] = []
for tc in tool_calls:
# Parse result to extract success/skipped status
success = True
skipped = False
if tc.result:
try:
result_data = json.loads(tc.result)
success = result_data.get("success", True)
skipped = result_data.get("skipped", False)
except:
pass
result["tool_calls"].append({
"id": tc.call_id,
"type": "function",
"function": {
@ -62,10 +74,10 @@ def message_to_dict(msg: Message) -> dict:
"arguments": tc.arguments,
},
"result": tc.result,
"success": success,
"skipped": skipped,
"execution_time": tc.execution_time,
}
for tc in tool_calls
]
})
return result

View File

@ -158,6 +158,7 @@ classDiagram
-_save_tool_calls(msg_id, calls, results) void
-_message_to_dict(msg) dict
-_process_tool_calls_delta(delta, list) list
-_emit_process_step(event, data) void
}
class GLMClient {
@ -173,7 +174,6 @@ classDiagram
+process_tool_calls(calls, context) list
+build_request(messages, model, tools) dict
+clear_history() void
+execute_with_retry(name, args, retries) dict
}
ChatService --> GLMClient : 使用
@ -266,13 +266,56 @@ classDiagram
| 事件 | 说明 |
|------|------|
| `thinking_start` | 新一轮思考开始,前端应清空之前的思考缓冲 |
| `thinking` | 思维链增量内容(启用时) |
| `message` | 回复内容的增量片段 |
| `tool_calls` | 工具调用信息 |
| `tool_result` | 工具执行结果 |
| `process_step` | 处理步骤按顺序thinking/tool_call/tool_result支持交替显示 |
| `error` | 错误信息 |
| `done` | 回复结束,携带 message_id 和 token_count |
### 思考与工具调用交替流程
```
iteration 1:
thinking_start -> 前端清空 streamThinking
thinking (增量) -> 前端累加到 streamThinking
process_step(thinking, "思考内容A")
tool_calls -> 批量通知(兼容)
process_step(tool_call, "file_read") -> 调用工具
process_step(tool_result, {...}) -> 立即返回结果
process_step(tool_call, "file_list") -> 下一个工具
process_step(tool_result, {...}) -> 立即返回结果
iteration 2:
thinking_start -> 前端清空 streamThinking
thinking (增量) -> 前端累加到 streamThinking
process_step(thinking, "思考内容B")
done
```
### process_step 事件格式
```json
// 思考过程
{"index": 0, "type": "thinking", "content": "完整思考内容..."}
// 工具调用
{"index": 1, "type": "tool_call", "id": "call_abc123", "name": "web_search", "arguments": "{\"query\": \"...\"}"}
// 工具返回
{"index": 2, "type": "tool_result", "id": "call_abc123", "name": "web_search", "content": "{\"success\": true, ...}", "skipped": false}
```
字段说明:
- `index`: 步骤序号,确保按正确顺序显示
- `type`: 步骤类型thinking/tool_call/tool_result
- `id`: 工具调用唯一标识,用于匹配工具调用和返回结果
- `name`: 工具名称
- `content`: 内容或结果
- `skipped`: 工具是否被跳过(失败后跳过)
---
## 数据模型

View File

@ -40,7 +40,6 @@ classDiagram
+process_tool_calls(list tool_calls, dict context) list~dict~
+build_request(list messages, str model, list tools, dict kwargs) dict
+clear_history() void
+execute_with_retry(str name, dict args, int max_retries) dict
}
class ToolResult {
@ -236,6 +235,14 @@ class ToolExecutor:
self._cache: Dict[str, tuple] = {} # key -> (result, timestamp)
self._call_history: List[dict] = [] # 当前会话的调用历史
def _execute_with_retry(self, name: str, arguments: dict) -> dict:
"""
执行工具,不自动重试。
成功或失败都直接返回结果,由模型决定下一步操作。
"""
result = self.registry.execute(name, arguments)
return result
def _make_cache_key(self, name: str, args: dict) -> str:
"""生成缓存键"""
args_str = json.dumps(args, sort_keys=True, ensure_ascii=False)
@ -390,29 +397,6 @@ class ToolExecutor:
"tool_choice": kwargs.get("tool_choice", "auto"),
**{k: v for k, v in kwargs.items() if k not in ["tool_choice"]}
}
def execute_with_retry(
self,
name: str,
arguments: dict,
max_retries: int = 3,
retry_delay: float = 1.0
) -> dict:
"""带重试的工具执行"""
last_error = None
for attempt in range(max_retries):
try:
return self.registry.execute(name, arguments)
except Exception as e:
last_error = e
if attempt < max_retries - 1:
time.sleep(retry_delay)
return {
"success": False,
"error": f"Failed after {max_retries} retries: {last_error}"
}
```
---
@ -954,7 +938,13 @@ def my_tool(arguments: dict) -> dict:
- **历史去重**:同一会话内已调用过的工具会直接返回缓存结果
- **自动清理**:新会话开始时调用 `clear_history()` 清理历史
### 9.4 安全设计
### 9.4 无自动重试
- **直接返回结果**:工具执行成功或失败都直接返回,不自动重试
- **模型决策**:失败时返回错误信息,由模型决定是否重试或尝试其他工具
- **灵活性**:模型可以根据错误类型选择不同的解决策略
### 9.5 安全设计
- **计算器安全**:禁止函数调用和变量名,只支持数学运算
- **文件沙箱**:文件操作限制在项目根目录内,防止越权访问
@ -970,5 +960,6 @@ def my_tool(arguments: dict) -> dict:
2. **工厂模式**:使用 `@tool` 装饰器注册工具
3. **服务分离**:工具依赖的服务独立,不与工具类耦合
4. **性能优化**:支持缓存和重复检测,减少重复计算和网络请求
5. **易于扩展**:新增工具只需写一个函数并加装饰器
6. **安全可靠**:文件沙箱、安全计算、完善的错误处理
5. **智能决策**:工具执行失败时不自动重试,由模型决定下一步操作
6. **易于扩展**:新增工具只需写一个函数并加装饰器
7. **安全可靠**:文件沙箱、安全计算、完善的错误处理

View File

@ -19,6 +19,7 @@
:streaming-content="streamContent"
:streaming-thinking="streamThinking"
:streaming-tool-calls="streamToolCalls"
:streaming-process-steps="streamProcessSteps"
:has-more-messages="hasMoreMessages"
:loading-more="loadingMessages"
:tools-enabled="toolsEnabled"
@ -65,6 +66,13 @@ const streaming = ref(false)
const streamContent = ref('')
const streamThinking = ref('')
const streamToolCalls = ref([])
const streamProcessSteps = ref([])
//
const streamStates = new Map()
//
let currentStreamPromise = null
// -- UI state --
const showSettings = ref(false)
@ -111,12 +119,38 @@ async function createConversation() {
// -- Select conversation --
async function selectConversation(id) {
//
if (currentConvId.value && streaming.value) {
streamStates.set(currentConvId.value, {
streaming: true,
streamContent: streamContent.value,
streamThinking: streamThinking.value,
streamToolCalls: [...streamToolCalls.value],
streamProcessSteps: [...streamProcessSteps.value],
})
}
currentConvId.value = id
messages.value = []
nextMsgCursor.value = null
hasMoreMessages.value = false
streamContent.value = ''
streamThinking.value = ''
//
const savedState = streamStates.get(id)
if (savedState && savedState.streaming) {
streaming.value = true
streamContent.value = savedState.streamContent
streamThinking.value = savedState.streamThinking
streamToolCalls.value = savedState.streamToolCalls
streamProcessSteps.value = savedState.streamProcessSteps
} else {
streaming.value = false
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
}
await loadMessages(true)
}
@ -149,10 +183,12 @@ function loadMoreMessages() {
async function sendMessage(content) {
if (!currentConvId.value || streaming.value) return
const convId = currentConvId.value // ID
// Add user message optimistically
const userMsg = {
id: 'temp_' + Date.now(),
conversation_id: currentConvId.value,
conversation_id: convId,
role: 'user',
content,
token_count: 0,
@ -165,71 +201,138 @@ async function sendMessage(content) {
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
await messageApi.send(currentConvId.value, content, {
currentStreamPromise = messageApi.send(convId, content, {
stream: true,
toolsEnabled: toolsEnabled.value,
onThinkingStart() {
if (currentConvId.value === convId) {
streamThinking.value = ''
} else {
const saved = streamStates.get(convId) || {}
streamStates.set(convId, { ...saved, streamThinking: '' })
}
},
onThinking(text) {
streamThinking.value += text
if (currentConvId.value === convId) {
streamThinking.value += text
} else {
const saved = streamStates.get(convId) || { streamThinking: '' }
streamStates.set(convId, { ...saved, streamThinking: (saved.streamThinking || '') + text })
}
},
onMessage(text) {
streamContent.value += text
if (currentConvId.value === convId) {
streamContent.value += text
} else {
const saved = streamStates.get(convId) || { streamContent: '' }
streamStates.set(convId, { ...saved, streamContent: (saved.streamContent || '') + text })
}
},
onToolCalls(calls) {
console.log('🔧 Tool calls received:', calls)
streamToolCalls.value = calls
if (currentConvId.value === convId) {
streamToolCalls.value.push(...calls.map(c => ({ ...c, result: null })))
} else {
const saved = streamStates.get(convId) || { streamToolCalls: [] }
const newCalls = [...(saved.streamToolCalls || []), ...calls.map(c => ({ ...c, result: null }))]
streamStates.set(convId, { ...saved, streamToolCalls: newCalls })
}
},
onToolResult(result) {
console.log('✅ Tool result received:', result)
//
const call = streamToolCalls.value.find(c => c.function?.name === result.name)
if (call) {
call.result = result.content
if (currentConvId.value === convId) {
const call = streamToolCalls.value.find(c => c.id === result.id)
if (call) call.result = result.content
} else {
//
if (streamToolCalls.value.length > 0) {
streamToolCalls.value[0].result = result.content
const saved = streamStates.get(convId) || { streamToolCalls: [] }
const call = saved.streamToolCalls?.find(c => c.id === result.id)
if (call) call.result = result.content
streamStates.set(convId, { ...saved })
}
},
onProcessStep(step) {
const idx = step.index
if (currentConvId.value === convId) {
//
const newSteps = [...streamProcessSteps.value]
while (newSteps.length <= idx) {
newSteps.push(null)
}
newSteps[idx] = step
streamProcessSteps.value = newSteps
} else {
const saved = streamStates.get(convId) || { streamProcessSteps: [] }
const steps = [...(saved.streamProcessSteps || [])]
while (steps.length <= idx) steps.push(null)
steps[idx] = step
streamStates.set(convId, { ...saved, streamProcessSteps: steps })
}
},
async onDone(data) {
streaming.value = false
// Replace temp message and add assistant message from server
messages.value = messages.value.filter(m => m.id !== userMsg.id)
messages.value.push({
id: data.message_id,
conversation_id: currentConvId.value,
role: 'assistant',
content: streamContent.value,
token_count: data.token_count,
thinking_content: streamThinking.value || null,
tool_calls: streamToolCalls.value.length > 0 ? streamToolCalls.value : null,
created_at: new Date().toISOString(),
})
streamContent.value = ''
streamThinking.value = ''
// Update conversation in list (move to top)
const idx = conversations.value.findIndex(c => c.id === currentConvId.value)
if (idx > 0) {
const [conv] = conversations.value.splice(idx, 1)
conv.message_count = (conv.message_count || 0) + 2
conversations.value.unshift(conv)
} else if (idx === 0) {
conversations.value[0].message_count = (conversations.value[0].message_count || 0) + 2
}
// Auto title: use first message if title is empty
if (conversations.value[0] && !conversations.value[0].title) {
//
streamStates.delete(convId)
if (currentConvId.value === convId) {
streaming.value = false
currentStreamPromise = null
// Replace temp message and add assistant message from server
messages.value = messages.value.filter(m => m.id !== userMsg.id)
messages.value.push({
id: data.message_id,
conversation_id: convId,
role: 'assistant',
content: streamContent.value,
token_count: data.token_count,
thinking_content: streamThinking.value || null,
tool_calls: streamToolCalls.value.length > 0 ? streamToolCalls.value : null,
process_steps: streamProcessSteps.value.filter(Boolean),
created_at: new Date().toISOString(),
})
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
// Update conversation in list (move to top)
const idx = conversations.value.findIndex(c => c.id === convId)
if (idx > 0) {
const [conv] = conversations.value.splice(idx, 1)
conv.message_count = (conv.message_count || 0) + 2
conversations.value.unshift(conv)
} else if (idx === 0) {
conversations.value[0].message_count = (conversations.value[0].message_count || 0) + 2
}
// Auto title: use first message if title is empty
if (conversations.value[0] && !conversations.value[0].title) {
try {
await conversationApi.update(convId, { title: content.slice(0, 30) })
conversations.value[0].title = content.slice(0, 30)
} catch (_) {}
}
} else {
//
try {
await conversationApi.update(currentConvId.value, { title: content.slice(0, 30) })
conversations.value[0].title = content.slice(0, 30)
const res = await messageApi.list(convId, null, 50)
//
const idx = conversations.value.findIndex(c => c.id === convId)
if (idx >= 0) {
conversations.value[idx].message_count = res.data.items.length
}
} catch (_) {}
}
},
onError(msg) {
streaming.value = false
streamContent.value = ''
streamThinking.value = ''
console.error('Stream error:', msg)
streamStates.delete(convId)
if (currentConvId.value === convId) {
streaming.value = false
currentStreamPromise = null
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
console.error('Stream error:', msg)
}
},
})
}

View File

@ -95,7 +95,7 @@ export const messageApi = {
return request(`/conversations/${convId}/messages?${params}`)
},
send(convId, content, { stream = true, toolsEnabled = true, onThinking, onMessage, onToolCalls, onToolResult, onDone, onError } = {}) {
send(convId, content, { stream = true, toolsEnabled = true, onThinkingStart, onThinking, onMessage, onToolCalls, onToolResult, onProcessStep, onDone, onError } = {}) {
if (!stream) {
return request(`/conversations/${convId}/messages`, {
method: 'POST',
@ -137,7 +137,9 @@ export const messageApi = {
currentEvent = line.slice(7).trim()
} else if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6))
if (currentEvent === 'thinking' && onThinking) {
if (currentEvent === 'thinking_start' && onThinkingStart) {
onThinkingStart()
} else if (currentEvent === 'thinking' && onThinking) {
onThinking(data.content)
} else if (currentEvent === 'message' && onMessage) {
onMessage(data.content)
@ -145,6 +147,8 @@ export const messageApi = {
onToolCalls(data.calls)
} else if (currentEvent === 'tool_result' && onToolResult) {
onToolResult(data)
} else if (currentEvent === 'process_step' && onProcessStep) {
onProcessStep(data)
} else if (currentEvent === 'done' && onDone) {
onDone(data)
} else if (currentEvent === 'error' && onError) {

View File

@ -38,6 +38,7 @@
:content="msg.content"
:thinking-content="msg.thinking_content"
:tool-calls="msg.tool_calls"
:process-steps="msg.process_steps"
:tool-name="msg.name"
:token-count="msg.token_count"
:created-at="msg.created_at"
@ -51,6 +52,7 @@
<ProcessBlock
:thinking-content="streamingThinking"
:tool-calls="streamingToolCalls"
:process-steps="streamingProcessSteps"
:streaming="streaming"
/>
<div class="message-content streaming-content" v-html="renderedStreamContent || '<span class=\'placeholder\'>...</span>'"></div>
@ -84,6 +86,7 @@ const props = defineProps({
streamingContent: { type: String, default: '' },
streamingThinking: { type: String, default: '' },
streamingToolCalls: { type: Array, default: () => [] },
streamingProcessSteps: { type: Array, default: () => [] },
hasMoreMessages: { type: Boolean, default: false },
loadingMore: { type: Boolean, default: false },
toolsEnabled: { type: Boolean, default: true },

View File

@ -4,9 +4,10 @@
<div v-else class="avatar">claw</div>
<div class="message-body">
<ProcessBlock
v-if="thinkingContent || (toolCalls && toolCalls.length > 0)"
v-if="thinkingContent || (toolCalls && toolCalls.length > 0) || (processSteps && processSteps.length > 0)"
:thinking-content="thinkingContent"
:tool-calls="toolCalls"
:process-steps="processSteps"
/>
<div v-if="role === 'tool'" class="tool-result-content">
<div class="tool-badge">工具返回结果: {{ toolName }}</div>
@ -43,6 +44,7 @@ const props = defineProps({
content: { type: String, default: '' },
thinkingContent: { type: String, default: '' },
toolCalls: { type: Array, default: () => [] },
processSteps: { type: Array, default: () => [] },
toolName: { type: String, default: '' },
tokenCount: { type: Number, default: 0 },
createdAt: { type: String, default: '' },

View File

@ -12,7 +12,7 @@
</button>
<div v-if="allExpanded" class="process-list">
<div v-for="(item, index) in processItems" :key="index" class="process-item" :class="item.type">
<div v-for="item in processItems" :key="item.key" class="process-item" :class="[item.type, { loading: item.loading }]">
<div class="process-header" @click="toggleItem(item.index)">
<div class="process-icon">
<svg v-if="item.type === 'thinking'" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
@ -27,14 +27,15 @@
</svg>
</div>
<span class="process-label">{{ item.label }}</span>
<span v-if="item.type === 'tool_result'" class="process-summary" :class="{ success: item.isSuccess, error: !item.isSuccess }">{{ item.summary }}</span>
<span v-if="item.loading" class="loading-dots">...</span>
<span v-else-if="item.type === 'tool_result'" class="process-summary" :class="{ success: item.isSuccess, error: !item.isSuccess }">{{ item.summary }}</span>
<span class="process-time">{{ item.time }}</span>
<svg class="item-arrow" :class="{ open: isItemExpanded(item.index) }" width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<svg v-if="!item.loading" class="item-arrow" :class="{ open: isItemExpanded(item.index) }" width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<polyline points="6 9 12 15 18 9"></polyline>
</svg>
</div>
<div v-if="isItemExpanded(item.index)" class="process-content">
<div v-if="isItemExpanded(item.index) && !item.loading" class="process-content">
<div v-if="item.type === 'thinking'" class="thinking-text">{{ item.content }}</div>
<div v-else-if="item.type === 'tool_call'" class="tool-call-detail">
@ -64,6 +65,7 @@ import { ref, computed, watch } from 'vue'
const props = defineProps({
thinkingContent: { type: String, default: '' },
toolCalls: { type: Array, default: () => [] },
processSteps: { type: Array, default: () => [] },
streaming: { type: Boolean, default: false }
})
@ -72,32 +74,106 @@ const itemExpanded = ref({}) // 存储每个项目的展开状态
const processItems = computed(() => {
const items = []
let index = 0
let idx = 0
//
// 使 processSteps
if (props.processSteps && props.processSteps.length > 0) {
props.processSteps.forEach((step, stepIdx) => {
if (!step) return
if (step.type === 'thinking') {
items.push({
type: 'thinking',
label: '思考过程',
content: step.content,
time: '',
index: idx,
key: `thinking-${idx}`,
loading: false
})
idx++
} else if (step.type === 'tool_call') {
items.push({
type: 'tool_call',
label: `调用工具: ${step.name || '未知工具'}`,
toolName: step.name || '未知工具',
arguments: formatArgs(step.arguments),
id: step.id,
index: idx,
key: `tool_call-${step.id || idx}`,
loading: false
})
idx++
} else if (step.type === 'tool_result') {
const resultSummary = getResultSummary(step.content)
items.push({
type: 'tool_result',
label: `工具返回: ${step.name || '未知工具'}`,
content: formatResult(step.content),
summary: resultSummary.text,
isSuccess: resultSummary.success,
id: step.id,
index: idx,
key: `tool_result-${step.id || idx}`,
loading: false
})
idx++
}
})
//
if (props.streaming && items.length > 0) {
const lastItem = items[items.length - 1]
//
if (lastItem.type === 'tool_call') {
lastItem.loading = true
lastItem.label = `执行工具: ${lastItem.toolName}`
}
}
return items
}
// 退
if (props.thinkingContent) {
items.push({
type: 'thinking',
label: '思考过程',
content: props.thinkingContent,
time: '',
index: index++
index: idx,
key: `thinking-${idx}`,
loading: false
})
idx++
} else if (props.streaming && items.length === 0) {
//
items.push({
type: 'thinking',
label: '思考中',
content: '',
time: '',
index: idx,
key: `thinking-loading`,
loading: true
})
idx++
}
//
if (props.toolCalls && props.toolCalls.length > 0) {
props.toolCalls.forEach((call, i) => {
//
items.push({
type: 'tool_call',
label: `调用工具: ${call.function?.name || '未知工具'}`,
toolName: call.function?.name || '未知工具',
arguments: formatArgs(call.function?.arguments),
index: index++
id: call.id,
index: idx,
key: `tool_call-${call.id || idx}`,
loading: false
})
idx++
//
if (call.result) {
const resultSummary = getResultSummary(call.result)
items.push({
@ -106,8 +182,16 @@ const processItems = computed(() => {
content: formatResult(call.result),
summary: resultSummary.text,
isSuccess: resultSummary.success,
index: index++
id: call.id,
index: idx,
key: `tool_result-${call.id || idx}`,
loading: false
})
idx++
} else if (props.streaming) {
//
items[items.length - 1].loading = true
items[items.length - 1].label = `执行工具: ${call.function?.name || '未知工具'}`
}
})
}
@ -306,6 +390,31 @@ watch(() => props.streaming, (streaming) => {
transform: rotate(180deg);
}
.loading-dots {
font-size: 16px;
font-weight: 700;
color: var(--accent-primary);
animation: pulse 1s ease-in-out infinite;
}
@keyframes pulse {
0%, 100% { opacity: 0.4; }
50% { opacity: 1; }
}
.process-item.loading .process-header {
background: var(--bg-hover);
}
.process-item.loading .process-icon {
animation: spin 1s linear infinite;
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
}
.process-content {
padding: 12px;
background: var(--bg-primary);