fix: 修复数据传递问题

This commit is contained in:
ViperEkura 2026-03-26 21:17:53 +08:00
parent 39fb220cf2
commit 6ffbb29ec7
11 changed files with 349 additions and 151 deletions

View File

@ -96,7 +96,16 @@ class Message(db.Model):
role = db.Column(db.String(16), nullable=False) # user, assistant, system, tool
# Unified JSON structure:
# User: {"text": "...", "attachments": [{"name": "a.py", "extension": "py", "content": "..."}]}
# Assistant: {"text": "...", "thinking": "...", "tool_calls": [{"id": "...", "name": "...", "arguments": "...", "result": "..."}]}
# Assistant: {
# "text": "...",
# "tool_calls": [...], // legacy flat structure
# "steps": [ // ordered steps for rendering (primary source of truth)
# {"id": "step-0", "index": 0, "type": "thinking", "content": "..."},
# {"id": "step-1", "index": 1, "type": "text", "content": "..."},
# {"id": "step-2", "index": 2, "type": "tool_call", "id_ref": "call_xxx", "name": "...", "arguments": "..."},
# {"id": "step-3", "index": 3, "type": "tool_result", "id_ref": "call_xxx", "name": "...", "content": "..."},
# ]
# }
content = db.Column(LongText, default="")
token_count = db.Column(db.Integer, default=0)
created_at = db.Column(db.DateTime, default=lambda: datetime.now(timezone.utc), index=True)

View File

@ -224,7 +224,7 @@ def upload_project_folder():
# Create project record
project = Project(
id=str(uuid.uuid4()),
user_id=user_id,
user_id=user.id,
name=project_name,
path=relative_path,
description=description

View File

@ -53,8 +53,9 @@ class ChatService:
messages = list(initial_messages)
all_tool_calls = []
all_tool_results = []
all_steps = [] # Collect all ordered steps for DB storage (thinking/text/tool_call/tool_result)
step_index = 0 # Track global step index for ordering
for iteration in range(self.MAX_ITERATIONS):
full_content = ""
full_thinking = ""
@ -62,10 +63,10 @@ class ChatService:
prompt_tokens = 0
msg_id = str(uuid.uuid4())
tool_calls_list = []
# Send thinking_start event to clear previous thinking in frontend
yield f"event: thinking_start\ndata: {{}}\n\n"
try:
with app.app_context():
active_conv = db.session.get(Conversation, conv_id)
@ -79,7 +80,8 @@ class ChatService:
stream=True,
)
resp.raise_for_status()
# Stream LLM response chunk by chunk
for line in resp.iter_lines():
if not line:
continue
@ -93,76 +95,109 @@ class ChatService:
chunk = json.loads(data_str)
except json.JSONDecodeError:
continue
delta = chunk["choices"][0].get("delta", {})
# Process thinking - send as process_step
# Accumulate thinking content for this iteration
reasoning = delta.get("reasoning_content", "")
if reasoning:
full_thinking += reasoning
# Still send thinking event for backward compatibility
yield f"event: thinking\ndata: {json.dumps({'content': reasoning}, ensure_ascii=False)}\n\n"
# Process text
# Accumulate text content for this iteration
text = delta.get("content", "")
if text:
full_content += text
yield f"event: message\ndata: {json.dumps({'content': text}, ensure_ascii=False)}\n\n"
# Process tool calls
# Accumulate tool calls from streaming deltas
tool_calls_list = self._process_tool_calls_delta(delta, tool_calls_list)
usage = chunk.get("usage", {})
if usage:
token_count = usage.get("completion_tokens", 0)
prompt_tokens = usage.get("prompt_tokens", 0)
except Exception as e:
yield f"event: error\ndata: {json.dumps({'content': str(e)}, ensure_ascii=False)}\n\n"
return
# Tool calls exist - execute and continue
# --- Tool calls exist: emit finalized steps, execute tools, continue loop ---
if tool_calls_list:
all_tool_calls.extend(tool_calls_list)
# Send thinking as a complete step if exists
# Record thinking as a finalized step (preserves order)
if full_thinking:
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'thinking', 'content': full_thinking}, ensure_ascii=False)}\n\n"
step_data = {
'id': f'step-{step_index}',
'index': step_index,
'type': 'thinking',
'content': full_thinking,
}
all_steps.append(step_data)
yield f"event: process_step\ndata: {json.dumps(step_data, ensure_ascii=False)}\n\n"
step_index += 1
# Send text as a step if exists (text before tool calls)
# Record text as a finalized step (text that preceded tool calls)
if full_content:
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'text', 'content': full_content}, ensure_ascii=False)}\n\n"
step_data = {
'id': f'step-{step_index}',
'index': step_index,
'type': 'text',
'content': full_content,
}
all_steps.append(step_data)
yield f"event: process_step\ndata: {json.dumps(step_data, ensure_ascii=False)}\n\n"
step_index += 1
# Also send legacy tool_calls event for backward compatibility
# Legacy tool_calls event for backward compatibility
yield f"event: tool_calls\ndata: {json.dumps({'calls': tool_calls_list}, ensure_ascii=False)}\n\n"
# Process each tool call one by one, send result immediately
# Execute each tool call, emit tool_call + tool_result as paired steps
tool_results = []
for tc in tool_calls_list:
# Send tool call step
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'tool_call', 'id': tc['id'], 'name': tc['function']['name'], 'arguments': tc['function']['arguments']}, ensure_ascii=False)}\n\n"
# Emit tool_call step (before execution)
call_step = {
'id': f'step-{step_index}',
'index': step_index,
'type': 'tool_call',
'id_ref': tc['id'],
'name': tc['function']['name'],
'arguments': tc['function']['arguments'],
}
all_steps.append(call_step)
yield f"event: process_step\ndata: {json.dumps(call_step, ensure_ascii=False)}\n\n"
step_index += 1
# Execute this single tool call (needs app context for db access)
# Execute the tool
with app.app_context():
single_result = self.executor.process_tool_calls([tc], context)
tool_results.extend(single_result)
# Send tool result step immediately
# Emit tool_result step (after execution)
tr = single_result[0]
try:
result_content = json.loads(tr["content"])
skipped = result_content.get("skipped", False)
except:
skipped = False
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'tool_result', 'id': tr['tool_call_id'], 'name': tr['name'], 'content': tr['content'], 'skipped': skipped}, ensure_ascii=False)}\n\n"
result_step = {
'id': f'step-{step_index}',
'index': step_index,
'type': 'tool_result',
'id_ref': tr['tool_call_id'],
'name': tr['name'],
'content': tr['content'],
'skipped': skipped,
}
all_steps.append(result_step)
yield f"event: process_step\ndata: {json.dumps(result_step, ensure_ascii=False)}\n\n"
step_index += 1
# Also send legacy tool_result event
# Legacy tool_result event for backward compatibility
yield f"event: tool_result\ndata: {json.dumps({'id': tr['tool_call_id'], 'name': tr['name'], 'content': tr['content'], 'skipped': skipped}, ensure_ascii=False)}\n\n"
# Append assistant message + tool results for the next iteration
messages.append({
"role": "assistant",
"content": full_content or None,
@ -171,28 +206,41 @@ class ChatService:
messages.extend(tool_results)
all_tool_results.extend(tool_results)
continue
# No tool calls - finish
# Send thinking as a step if exists
# --- No tool calls: final iteration — emit remaining steps and save ---
if full_thinking:
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'thinking', 'content': full_thinking}, ensure_ascii=False)}\n\n"
step_data = {
'id': f'step-{step_index}',
'index': step_index,
'type': 'thinking',
'content': full_thinking,
}
all_steps.append(step_data)
yield f"event: process_step\ndata: {json.dumps(step_data, ensure_ascii=False)}\n\n"
step_index += 1
# Send text as a step if exists
if full_content:
yield f"event: process_step\ndata: {json.dumps({'index': step_index, 'type': 'text', 'content': full_content}, ensure_ascii=False)}\n\n"
step_data = {
'id': f'step-{step_index}',
'index': step_index,
'type': 'text',
'content': full_content,
}
all_steps.append(step_data)
yield f"event: process_step\ndata: {json.dumps(step_data, ensure_ascii=False)}\n\n"
step_index += 1
suggested_title = None
with app.app_context():
# Build content JSON
# Build content JSON with ordered steps array for DB storage.
# 'steps' is the single source of truth for rendering order.
content_json = {
"text": full_content,
}
if full_thinking:
content_json["thinking"] = full_thinking
if all_tool_calls:
content_json["tool_calls"] = self._build_tool_calls_json(all_tool_calls, all_tool_results)
# Store ordered steps — the single source of truth for rendering order
content_json["steps"] = all_steps
msg = Message(
id=msg_id,
@ -208,15 +256,13 @@ class ChatService:
if user:
record_token_usage(user.id, conv_model, prompt_tokens, token_count)
# Check if we need to set title (first message in conversation)
# Auto-generate title from first user message if needed
conv = db.session.get(Conversation, conv_id)
if conv and (not conv.title or conv.title == "新对话"):
# Get user message content
user_msg = Message.query.filter_by(
conversation_id=conv_id, role="user"
).order_by(Message.created_at.asc()).first()
if user_msg and user_msg.content:
# Parse content JSON to get text
try:
content_data = json.loads(user_msg.content)
title_text = content_data.get("text", "")[:30]
@ -226,7 +272,6 @@ class ChatService:
suggested_title = title_text
else:
suggested_title = "新对话"
# Refresh conv to avoid stale state
db.session.refresh(conv)
conv.title = suggested_title
db.session.commit()

View File

@ -63,7 +63,12 @@ def to_dict(inst, **extra):
def message_to_dict(msg: Message) -> dict:
"""Convert message to dict, parsing JSON content"""
"""Convert message to dict, parsing JSON content.
For assistant messages, extracts the 'steps' array which preserves the
ordered sequence of thinking/text/tool_call/tool_result steps, so the
frontend can render them in the correct interleaved order.
"""
result = to_dict(msg)
# Parse content JSON
@ -71,16 +76,15 @@ def message_to_dict(msg: Message) -> dict:
try:
content_data = json.loads(msg.content)
if isinstance(content_data, dict):
# Extract all fields from JSON
result["text"] = content_data.get("text", "")
if content_data.get("attachments"):
result["attachments"] = content_data["attachments"]
if content_data.get("thinking"):
result["thinking"] = content_data["thinking"]
if content_data.get("tool_calls"):
result["tool_calls"] = content_data["tool_calls"]
# Extract ordered steps array for correct rendering order
if content_data.get("steps"):
result["process_steps"] = content_data["steps"]
else:
# Fallback: plain text
result["text"] = msg.content
except (json.JSONDecodeError, TypeError):
result["text"] = msg.content

View File

@ -175,7 +175,6 @@ classDiagram
```json
{
"text": "AI 回复的文本内容",
"thinking": "思考过程(可选)",
"tool_calls": [
{
"id": "call_xxx",
@ -189,10 +188,55 @@ classDiagram
"skipped": false,
"execution_time": 0.5
}
],
"steps": [
{
"id": "step-0",
"index": 0,
"type": "thinking",
"content": "第一轮思考过程..."
},
{
"id": "step-1",
"index": 1,
"type": "text",
"content": "工具调用前的文本..."
},
{
"id": "step-2",
"index": 2,
"type": "tool_call",
"id_ref": "call_abc123",
"name": "web_search",
"arguments": "{\"query\": \"...\"}"
},
{
"id": "step-3",
"index": 3,
"type": "tool_result",
"id_ref": "call_abc123",
"name": "web_search",
"content": "{\"success\": true, ...}",
"skipped": false
},
{
"id": "step-4",
"index": 4,
"type": "thinking",
"content": "第二轮思考过程..."
},
{
"id": "step-5",
"index": 5,
"type": "text",
"content": "最终回复文本..."
}
]
}
```
`steps` 字段是**渲染顺序的唯一数据源**,按 `index` 顺序排列。thinking、text、tool_call、tool_result 可以在多轮迭代中穿插出现。`id_ref` 用于 tool_call 和 tool_result 步骤之间的匹配(对应 LLM 返回的工具调用 ID。`tool_calls` 字段保留用于向后兼容旧版前端。
### 服务层
```mermaid
@ -426,33 +470,53 @@ def process_tool_calls(self, tool_calls, context=None):
| `message` | 回复内容的增量片段 |
| `tool_calls` | 工具调用信息 |
| `tool_result` | 工具执行结果 |
| `process_step` | 处理步骤(按顺序:thinking/text/tool_call/tool_result支持穿插显示 |
| `process_step` | 有序处理步骤thinking/text/tool_call/tool_result支持穿插显示。携带 `id`、`index` 确保渲染顺序 |
| `error` | 错误信息 |
| `done` | 回复结束,携带 message_id 和 token_count |
### process_step 事件格式
每个 `process_step` 事件携带一个带 `id`、`index` 和 `type` 的步骤对象。步骤按 `index` 顺序排列,确保前端可以正确渲染穿插的思考、文本和工具调用。
```json
// 思考过程
{"index": 0, "type": "thinking", "content": "完整思考内容..."}
{"id": "step-0", "index": 0, "type": "thinking", "content": "完整思考内容..."}
// 回复文本(可穿插在任意步骤之间)
{"index": 1, "type": "text", "content": "回复文本内容..."}
{"id": "step-1", "index": 1, "type": "text", "content": "回复文本内容..."}
// 工具调用
{"index": 2, "type": "tool_call", "id": "call_abc123", "name": "web_search", "arguments": "{\"query\": \"...\"}"}
// 工具调用id_ref 存储工具调用 ID用于与 tool_result 匹配)
{"id": "step-2", "index": 2, "type": "tool_call", "id_ref": "call_abc123", "name": "web_search", "arguments": "{\"query\": \"...\"}"}
// 工具返回
{"index": 3, "type": "tool_result", "id": "call_abc123", "name": "web_search", "content": "{\"success\": true, ...}", "skipped": false}
// 工具返回id_ref 与 tool_call 的 id_ref 匹配)
{"id": "step-3", "index": 3, "type": "tool_result", "id_ref": "call_abc123", "name": "web_search", "content": "{\"success\": true, ...}", "skipped": false}
```
字段说明:
- `index`: 步骤序号,确保按正确顺序显示
- `type`: 步骤类型thinking/tool_call/tool_result
- `id`: 工具调用唯一标识,用于匹配工具调用和返回结果
- `name`: 工具名称
- `content`: 内容或结果
- `skipped`: 工具是否被跳过(失败后跳过)
| 字段 | 说明 |
|------|------|
| `id` | 步骤唯一标识(格式 `step-{index}`),用于前端 key |
| `index` | 步骤序号,确保按正确顺序显示 |
| `type` | 步骤类型:`thinking` / `text` / `tool_call` / `tool_result` |
| `id_ref` | 工具调用引用 ID仅 tool_call/tool_result用于匹配调用与结果 |
| `name` | 工具名称(仅 tool_call/tool_result |
| `arguments` | 工具调用参数 JSON 字符串(仅 tool_call |
| `content` | 内容thinking 的思考内容、text 的文本、tool_result 的返回结果) |
| `skipped` | 工具是否被跳过(仅 tool_result |
### 多轮迭代中的步骤顺序
一次完整的 LLM 交互可能经历多轮工具调用循环,每轮产生的步骤按以下顺序追加:
```
迭代 1: thinking → text → tool_call → tool_result
迭代 2: thinking → text → tool_call → tool_result
...
最终轮: thinking → text无工具调用结束
```
所有步骤通过全局递增的 `index` 保证顺序。后端在完成所有迭代后,将这些步骤存入 `content_json["steps"]` 数组写入数据库。前端页面刷新时从 API 加载消息,`message_to_dict` 提取 `steps` 字段映射为 `process_steps` 返回ProcessBlock 组件按 `index` 顺序渲染。
---
@ -509,10 +573,12 @@ def process_tool_calls(self, tool_calls, context=None):
| `id` | String(64) | UUID 主键 |
| `conversation_id` | String(64) | 外键关联 Conversation |
| `role` | String(16) | user/assistant/system/tool |
| `content` | LongText | JSON 格式内容(见上方结构说明) |
| `content` | LongText | JSON 格式内容(见上方结构说明)assistant 消息包含 `steps` 有序步骤数组 |
| `token_count` | Integer | Token 数量 |
| `created_at` | DateTime | 创建时间 |
`message_to_dict()` 辅助函数负责解析 `content` JSON并提取 `steps` 字段映射为 `process_steps` 返回给前端,确保页面刷新后仍能按正确顺序渲染穿插的思考、文本和工具调用。
### TokenUsageToken 使用统计)
| 字段 | 类型 | 说明 |

View File

@ -2,6 +2,7 @@
<div class="app">
<Sidebar
:conversations="conversations"
:projects="projects"
:current-id="currentConvId"
:loading="loadingConvs"
:has-more="hasMoreConvs"
@ -11,6 +12,7 @@
@create-project="showCreateModal = true"
@browse-project="browseProject"
@create-in-project="createConversationInProject"
@delete-project="deleteProject"
@toggle-settings="togglePanel('settings')"
@toggle-stats="togglePanel('stats')"
/>
@ -42,7 +44,6 @@
:messages="messages"
:streaming="streaming"
:streaming-content="streamContent"
:streaming-thinking="streamThinking"
:streaming-tool-calls="streamToolCalls"
:streaming-process-steps="streamProcessSteps"
:has-more-messages="hasMoreMessages"
@ -128,6 +129,9 @@ const loadingConvs = ref(false)
const hasMoreConvs = ref(false)
const nextConvCursor = ref(null)
// -- Projects state --
const projects = ref([])
// -- Messages state --
const messages = shallowRef([])
const hasMoreMessages = ref(false)
@ -135,11 +139,14 @@ const loadingMessages = ref(false)
const nextMsgCursor = ref(null)
// -- Streaming state --
// These refs hold the real-time streaming data for the current conversation.
// When switching conversations, the current state is saved to streamStates Map
// and restored when switching back. On stream completion (onDone), the finalized
// processSteps are stored in the message object and later persisted to DB.
const streaming = ref(false)
const streamContent = ref('')
const streamThinking = ref('')
const streamToolCalls = shallowRef([])
const streamProcessSteps = shallowRef([])
const streamContent = ref('') // Accumulated text content during current iteration
const streamToolCalls = shallowRef([]) // All tool calls across iterations (legacy compat)
const streamProcessSteps = shallowRef([]) // Ordered steps: thinking/text/tool_call/tool_result
//
const streamStates = new Map()
@ -147,7 +154,6 @@ const streamStates = new Map()
function setStreamState(isActive) {
streaming.value = isActive
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
}
@ -253,7 +259,6 @@ async function selectConversation(id) {
streamStates.set(currentConvId.value, {
streaming: true,
streamContent: streamContent.value,
streamThinking: streamThinking.value,
streamToolCalls: [...streamToolCalls.value],
streamProcessSteps: [...streamProcessSteps.value],
messages: [...messages.value],
@ -269,7 +274,6 @@ async function selectConversation(id) {
if (savedState && savedState.streaming) {
streaming.value = true
streamContent.value = savedState.streamContent
streamThinking.value = savedState.streamThinking
streamToolCalls.value = savedState.streamToolCalls
streamProcessSteps.value = savedState.streamProcessSteps
messages.value = savedState.messages || []
@ -310,13 +314,6 @@ function loadMoreMessages() {
// -- Helpers: create stream callbacks for a conversation --
function createStreamCallbacks(convId, { updateConvList = true } = {}) {
return {
onThinkingStart() {
updateStreamField(convId, 'streamThinking', streamThinking, '')
updateStreamField(convId, 'streamContent', streamContent, '')
},
onThinking(text) {
updateStreamField(convId, 'streamThinking', streamThinking, prev => (prev || '') + text)
},
onMessage(text) {
updateStreamField(convId, 'streamContent', streamContent, prev => (prev || '') + text)
},
@ -335,6 +332,10 @@ function createStreamCallbacks(convId, { updateConvList = true } = {}) {
})
},
onProcessStep(step) {
// Insert step at its index position to preserve ordering.
// Uses sparse array strategy: fills gaps with null.
// Each step carries { id, index, type, content, ... }
// these are the same steps that get stored to DB as the 'steps' array.
updateStreamField(convId, 'streamProcessSteps', streamProcessSteps, prev => {
const steps = prev ? [...prev] : []
while (steps.length <= step.index) steps.push(null)
@ -347,12 +348,15 @@ function createStreamCallbacks(convId, { updateConvList = true } = {}) {
if (currentConvId.value === convId) {
streaming.value = false
// Build the final message object.
// process_steps is the primary ordered data for rendering (thinking/text/tool_call/tool_result).
// When page reloads, these steps are loaded from DB via the 'steps' field in content JSON.
messages.value = [...messages.value, {
id: data.message_id,
conversation_id: convId,
role: 'assistant',
text: streamContent.value,
thinking: streamThinking.value || null,
tool_calls: streamToolCalls.value.length > 0 ? streamToolCalls.value : null,
process_steps: streamProcessSteps.value.filter(Boolean),
token_count: data.token_count,
@ -507,13 +511,13 @@ async function createProject() {
creatingProject.value = true
try {
await projectApi.create({
user_id: 1,
name: newProjectName.value.trim(),
description: newProjectDesc.value.trim(),
})
showCreateModal.value = false
newProjectName.value = ''
newProjectDesc.value = ''
await loadProjects()
} catch (e) {
console.error('Failed to create project:', e)
} finally {
@ -521,8 +525,47 @@ async function createProject() {
}
}
// -- Load projects --
async function loadProjects() {
try {
const res = await projectApi.list()
projects.value = res.data.projects || []
} catch (e) {
console.error('Failed to load projects:', e)
}
}
// -- Delete project --
async function deleteProject(project) {
if (!confirm(`确定删除项目「${project.name}」及其所有对话?`)) return
try {
await projectApi.delete(project.id)
// Remove conversations belonging to this project
conversations.value = conversations.value.filter(c => c.project_id !== project.id)
// If current conversation was in this project, switch away
if (currentConvId.value && conversations.value.length > 0) {
const currentConv = conversations.value.find(c => c.id === currentConvId.value)
if (!currentConv || currentConv.project_id === project.id) {
await selectConversation(conversations.value[0].id)
}
} else if (conversations.value.length === 0) {
currentConvId.value = null
messages.value = []
currentProject.value = null
}
if (currentProject.value?.id === project.id) {
currentProject.value = null
showFileExplorer.value = false
}
await loadProjects()
} catch (e) {
console.error('Failed to delete project:', e)
}
}
// -- Init --
onMounted(() => {
loadProjects()
loadConversations()
})
</script>

View File

@ -194,8 +194,8 @@ export const messageApi = {
}
export const projectApi = {
list(userId) {
return request(`/projects${buildQueryParams({ user_id: userId })}`)
list() {
return request('/projects')
},
create(data) {
@ -211,7 +211,6 @@ export const projectApi = {
uploadFolder(data) {
const formData = new FormData()
formData.append('user_id', String(data.user_id))
formData.append('name', data.name || '')
formData.append('description', data.description || '')
for (const file of data.files) {

View File

@ -27,12 +27,11 @@
v-for="msg in messages"
:key="msg.id"
:data-msg-id="msg.id"
v-memo="[msg.text, msg.thinking, msg.tool_calls, msg.process_steps, msg.attachments]"
v-memo="[msg.text, msg.tool_calls, msg.process_steps, msg.attachments]"
>
<MessageBubble
:role="msg.role"
:text="msg.text"
:thinking-content="msg.thinking"
:tool-calls="msg.tool_calls"
:process-steps="msg.process_steps"
:token-count="msg.token_count"
@ -48,7 +47,6 @@
<div class="avatar">claw</div>
<div class="message-body">
<ProcessBlock
:thinking-content="streamingThinking"
:tool-calls="streamingToolCalls"
:process-steps="streamingProcessSteps"
:streaming-content="streamingContent"
@ -90,7 +88,6 @@ const props = defineProps({
messages: { type: Array, required: true },
streaming: { type: Boolean, default: false },
streamingContent: { type: String, default: '' },
streamingThinking: { type: String, default: '' },
streamingToolCalls: { type: Array, default: () => [] },
streamingProcessSteps: { type: Array, default: () => [] },
hasMoreMessages: { type: Boolean, default: false },

View File

@ -3,7 +3,7 @@
<div v-if="role === 'user'" class="avatar">user</div>
<div v-else class="avatar">claw</div>
<div class="message-container">
<!-- 附件列表 -->
<!-- File attachments list -->
<div v-if="attachments && attachments.length > 0" class="attachments-list">
<div v-for="(file, index) in attachments" :key="index" class="attachment-item">
<span class="attachment-icon">{{ file.extension }}</span>
@ -11,18 +11,18 @@
</div>
</div>
<div ref="messageRef" class="message-body">
<!-- 新格式: processSteps 包含所有步骤 text统一通过 ProcessBlock 渲染 -->
<!-- Primary rendering path: processSteps contains all ordered steps -->
<!-- (thinking, text, tool_call, tool_result) from both streaming and DB load -->
<ProcessBlock
v-if="processSteps && processSteps.length > 0"
:process-steps="processSteps"
:thinking-content="thinkingContent"
:tool-calls="toolCalls"
/>
<!-- 旧格式: processSteps分开渲染 ProcessBlock + 文本 -->
<!-- Fallback path: old messages without processSteps in DB, -->
<!-- render toolCalls via ProcessBlock and text separately -->
<template v-else>
<ProcessBlock
v-if="thinkingContent || (toolCalls && toolCalls.length > 0)"
:thinking-content="thinkingContent"
v-if="toolCalls && toolCalls.length > 0"
:tool-calls="toolCalls"
/>
<div class="md-content message-content" v-html="renderedContent"></div>
@ -62,15 +62,18 @@ import { useCodeEnhancement } from '../composables/useCodeEnhancement'
import ProcessBlock from './ProcessBlock.vue'
const props = defineProps({
role: { type: String, required: true },
text: { type: String, default: '' },
thinkingContent: { type: String, default: '' },
toolCalls: { type: Array, default: () => [] },
role: { type: String, required: true }, // 'user' or 'assistant'
text: { type: String, default: '' }, // Plain text content (legacy / user messages)
toolCalls: { type: Array, default: () => [] }, // Tool calls array (legacy fallback)
// Ordered steps array primary rendering data source.
// During streaming: accumulated from process_step SSE events.
// On page load: loaded from DB via message_to_dict extracting 'steps' field.
// Each step: { id, index, type: 'thinking'|'text'|'tool_call'|'tool_result', content, ... }
processSteps: { type: Array, default: () => [] },
tokenCount: { type: Number, default: 0 },
createdAt: { type: String, default: '' },
deletable: { type: Boolean, default: false },
attachments: { type: Array, default: () => [] },
attachments: { type: Array, default: () => [] }, // User file attachments
})
defineEmits(['delete', 'regenerate'])

View File

@ -1,6 +1,6 @@
<template>
<div ref="processRef" class="process-block" :class="{ 'is-streaming': streaming }">
<!-- 流式加载还没有任何步骤时 -->
<!-- Placeholder while waiting for the first process step to arrive -->
<div v-if="streaming && processItems.length === 0" class="streaming-placeholder">
<div class="streaming-icon">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
@ -10,10 +10,10 @@
<span class="streaming-text">正在思考中<span class="dots">...</span></span>
</div>
<!-- 按序渲染步骤 -->
<!-- Render all steps in order: thinking, text, tool_call, tool_result interleaved -->
<template v-else>
<template v-for="item in processItems" :key="item.key">
<!-- 思考过程 -->
<!-- Thinking block -->
<div v-if="item.type === 'thinking'" class="step-item thinking">
<div class="step-header" @click="toggleItem(item.key)">
<svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
@ -30,7 +30,7 @@
</div>
</div>
<!-- 工具调用 -->
<!-- Tool call block -->
<div v-else-if="item.type === 'tool_call'" class="step-item tool_call" :class="{ loading: item.loading }">
<div class="step-header" @click="toggleItem(item.key)">
<svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
@ -56,11 +56,11 @@
</div>
</div>
<!-- 文本内容 - 直接渲染 markdown -->
<!-- Text content render as markdown -->
<div v-else-if="item.type === 'text'" class="step-item text-content md-content" v-html="item.rendered"></div>
</template>
<!-- 流式进行中指示器 -->
<!-- Active streaming indicator (cursor) -->
<div v-if="streaming" class="streaming-indicator">
<svg class="spinner" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<path d="M21 12a9 9 0 1 1-6.219-8.56"/>
@ -78,7 +78,6 @@ import { formatJson, truncate } from '../utils/format'
import { useCodeEnhancement } from '../composables/useCodeEnhancement'
const props = defineProps({
thinkingContent: { type: String, default: '' },
toolCalls: { type: Array, default: () => [] },
processSteps: { type: Array, default: () => [] },
streamingContent: { type: String, default: '' },
@ -87,7 +86,7 @@ const props = defineProps({
const expandedKeys = ref({})
//
// Auto-collapse all items when a new stream starts
watch(() => props.streaming, (v) => {
if (v) expandedKeys.value = {}
})
@ -110,30 +109,45 @@ function getResultSummary(result) {
}
}
// Build ordered process items from all available data (thinking, tool calls, text).
// During streaming, processSteps accumulate completed iterations while streamingContent
// represents the text being generated in the current (latest) iteration.
// When loaded from DB, steps use 'id_ref' for tool_call/tool_result matching;
// during streaming they use 'id'. Both fields are normalized here.
const processItems = computed(() => {
const items = []
// 使 processSteps
// Build items from processSteps finalized steps sent by backend or loaded from DB.
// Steps are ordered: each iteration produces thinking text tool_call tool_result.
if (props.processSteps && props.processSteps.length > 0) {
for (const step of props.processSteps) {
if (!step) continue
if (step.type === 'thinking') {
items.push({ type: 'thinking', content: step.content, summary: truncate(step.content), key: `thinking-${step.index}` })
items.push({
type: 'thinking',
content: step.content,
summary: truncate(step.content),
key: step.id || `thinking-${step.index}`,
})
} else if (step.type === 'tool_call') {
// Normalize: DB-loaded steps use 'id_ref', streaming steps use 'id'
const toolId = step.id_ref || step.id
items.push({
type: 'tool_call',
toolName: step.name || '未知工具',
arguments: formatJson(step.arguments),
summary: truncate(step.arguments),
id: step.id,
key: `tool_call-${step.id || step.index}`,
id: toolId,
key: step.id || `tool_call-${toolId || step.index}`,
loading: false,
result: null,
})
} else if (step.type === 'tool_result') {
// Merge result back into its corresponding tool_call item by matching tool ID
const toolId = step.id_ref || step.id
const summary = getResultSummary(step.content)
const match = items.findLast(it => it.type === 'tool_call' && it.id === step.id)
const match = items.findLast(it => it.type === 'tool_call' && it.id === toolId)
if (match) {
match.result = formatJson(step.content)
match.resultSummary = summary.text
@ -145,12 +159,12 @@ const processItems = computed(() => {
type: 'text',
content: step.content,
rendered: renderMarkdown(step.content),
key: `text-${step.index}`,
key: step.id || `text-${step.index}`,
})
}
}
// tool_call loading
// Mark the last tool_call as loading if it has no result yet (still executing)
if (props.streaming && items.length > 0) {
const last = items[items.length - 1]
if (last.type === 'tool_call' && !last.result) {
@ -158,26 +172,18 @@ const processItems = computed(() => {
}
}
// text
// Append the currently streaming text as a live text item.
// This text belongs to the latest LLM iteration that hasn't finished yet.
if (props.streaming && props.streamingContent) {
const hasTextStep = items.some(it => it.type === 'text')
if (!hasTextStep) {
items.push({
type: 'text',
content: props.streamingContent,
rendered: renderMarkdown(props.streamingContent) || '<span class="placeholder">...</span>',
key: 'text-streaming',
})
}
items.push({
type: 'text',
content: props.streamingContent,
rendered: renderMarkdown(props.streamingContent) || '<span class="placeholder">...</span>',
key: 'text-streaming',
})
}
} else {
// 退 thinking + toolCalls
if (props.thinkingContent) {
items.push({ type: 'thinking', content: props.thinkingContent, summary: truncate(props.thinkingContent), key: 'thinking-0' })
} else if (props.streaming && items.length === 0) {
items.push({ type: 'thinking', content: '', key: 'thinking-loading' })
}
// Fallback: legacy mode for old messages without processSteps stored in DB
if (props.toolCalls && props.toolCalls.length > 0) {
props.toolCalls.forEach((call, i) => {
const toolName = call.function?.name || '未知工具'
@ -197,7 +203,7 @@ const processItems = computed(() => {
})
}
//
// Append streaming text in legacy mode
if (props.streaming && props.streamingContent) {
items.push({
type: 'text',
@ -211,10 +217,10 @@ const processItems = computed(() => {
return items
})
// processBlock
// Enhance code blocks inside process items (syntax highlighting, copy buttons)
const { debouncedEnhance } = useCodeEnhancement(processRef, processItems, { deep: true })
// 使 DOM
// Throttle code enhancement during streaming to reduce DOM operations
watch(() => props.streamingContent?.length, () => {
if (props.streaming) debouncedEnhance()
})
@ -225,7 +231,7 @@ watch(() => props.streamingContent?.length, () => {
width: 100%;
}
/* 流式占位 */
/* Streaming placeholder while waiting for first step */
.streaming-placeholder {
padding: 16px 20px;
display: flex;
@ -263,7 +269,7 @@ watch(() => props.streamingContent?.length, () => {
50% { opacity: 1; }
}
/* 步骤通用 */
/* Step items (shared) */
.step-item {
margin-bottom: 8px;
}
@ -272,7 +278,7 @@ watch(() => props.streamingContent?.length, () => {
margin-bottom: 0;
}
/* 思考过程 */
/* Thinking and tool call step headers */
.thinking .step-header,
.tool_call .step-header {
display: flex;
@ -361,7 +367,7 @@ watch(() => props.streamingContent?.length, () => {
background: var(--bg-hover);
}
/* 步骤展开内容 */
/* Expandable step content panel */
.step-content {
padding: 12px;
margin-top: 4px;
@ -404,7 +410,7 @@ watch(() => props.streamingContent?.length, () => {
word-break: break-word;
}
/* 文本内容直接渲染 */
/* Text content — rendered as markdown */
.text-content {
padding: 0;
font-size: 15px;
@ -418,7 +424,7 @@ watch(() => props.streamingContent?.length, () => {
color: var(--text-tertiary);
}
/* 流式指示器 */
/* Streaming cursor indicator */
.streaming-indicator {
display: flex;
align-items: center;

View File

@ -35,6 +35,16 @@
<path d="M22 19a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V5a2 2 0 0 1 2-2h5l2 3h9a2 2 0 0 1 2 2z"/>
</svg>
</button>
<button
class="btn-group-action btn-delete-project"
title="删除项目"
@click.stop="$emit('deleteProject', { id: group.id, name: group.name })"
>
<svg width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<polyline points="3 6 5 6 21 6"></polyline>
<path d="M19 6v14a2 2 0 0 1-2 2H7a2 2 0 0 1-2-2V6m3 0V4a2 2 0 0 1 2-2h4a2 2 0 0 1 2 2v2"/>
</svg>
</button>
</div>
<div v-show="expandedGroups[group.id]">
<div
@ -60,8 +70,8 @@
</div>
</div>
<!-- Standalone conversations -->
<div v-if="groupedData.standalone.length > 0" class="project-group">
<!-- Standalone conversations (always visible) -->
<div class="project-group">
<div class="project-header" @click="toggleGroup('__standalone__')">
<svg class="chevron" :class="{ collapsed: !expandedGroups['__standalone__'] }" width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<polyline points="6 9 12 15 18 9"/>
@ -131,12 +141,13 @@ import { formatTime } from '../utils/format'
const props = defineProps({
conversations: { type: Array, required: true },
projects: { type: Array, default: () => [] },
currentId: { type: String, default: null },
loading: { type: Boolean, default: false },
hasMore: { type: Boolean, default: false },
})
const emit = defineEmits(['select', 'delete', 'loadMore', 'createProject', 'browseProject', 'createInProject', 'toggleSettings', 'toggleStats'])
const emit = defineEmits(['select', 'delete', 'loadMore', 'createProject', 'browseProject', 'createInProject', 'toggleSettings', 'toggleStats', 'deleteProject'])
const expandedGroups = reactive({})
@ -144,6 +155,16 @@ const groupedData = computed(() => {
const groups = {}
const standalone = []
// First, initialize groups from projects list (includes projects with 0 conversations)
for (const p of props.projects) {
groups[p.id] = {
id: p.id,
name: p.name,
conversations: [],
}
}
// Then merge conversations into groups
for (const conv of props.conversations) {
if (conv.project_id) {
if (!groups[conv.project_id]) {
@ -162,7 +183,7 @@ const groupedData = computed(() => {
for (const id of Object.keys(groups)) {
if (!(id in expandedGroups)) expandedGroups[id] = true
}
if (standalone.length > 0 && !('__standalone__' in expandedGroups)) {
if (!('__standalone__' in expandedGroups)) {
expandedGroups['__standalone__'] = true
}
@ -320,6 +341,11 @@ function onScroll(e) {
opacity: 1;
}
.btn-delete-project:hover {
color: var(--danger-color);
background: var(--danger-bg);
}
.conversation-item {
display: flex;
align-items: center;