chore: 精简代码并更新文档

This commit is contained in:
ViperEkura 2026-03-26 13:26:15 +08:00
parent 8325100c90
commit 31cfcd3ed2
13 changed files with 237 additions and 617 deletions

View File

@ -17,6 +17,8 @@
### 1. 安装依赖
```bash
conda create -n claw python=3.12
conda activate claw
pip install -e .
```
@ -50,19 +52,13 @@ db_type: sqlite
db_sqlite_file: nano_claw.db
```
### 3. 数据库迁移(首次运行或升级)
```bash
python -m backend.migrations.add_project_support
```
### 4. 启动后端
### 3. 启动后端
```bash
python -m backend.run
```
### 5. 启动前端
### 4. 启动前端
```bash
cd frontend
@ -124,7 +120,7 @@ frontend/
| `POST` | `/api/conversations` | 创建会话 |
| `GET` | `/api/conversations` | 会话列表 |
| `GET` | `/api/conversations/:id/messages` | 消息列表 |
| `POST` | `/api/conversations/:id/messages` | 发送消息SSE |
| `POST` | `/api/conversations/:id/messages` | 发送消息SSE 流式 |
| `GET` | `/api/projects` | 项目列表 |
| `POST` | `/api/projects` | 创建项目 |
| `POST` | `/api/projects/upload` | 上传文件夹 |

View File

@ -1,20 +1,10 @@
from backend import db
from datetime import datetime, timezone
from flask import current_app
from sqlalchemy import Text
from sqlalchemy.dialects.mysql import LONGTEXT as MYSQL_LONGTEXT
def get_longtext_type():
"""Get appropriate text type for long content based on database dialect."""
db_uri = current_app.config.get("SQLALCHEMY_DATABASE_URI", "")
if db_uri.startswith("mysql"):
return MYSQL_LONGTEXT
return Text # SQLite and PostgreSQL use Text
# For model definitions, we'll use a callable that returns the right type
class LongText(db.TypeDecorator):
"""Cross-database LONGTEXT type that works with MySQL, SQLite, and PostgreSQL."""
impl = Text

View File

@ -68,11 +68,9 @@ def message_list(conv_id):
db.session.commit()
tools_enabled = d.get("tools_enabled", True)
project_id = d.get("project_id")
if d.get("stream", False):
return _chat_service.stream_response(conv, tools_enabled, project_id)
return _chat_service.sync_response(conv, tools_enabled, project_id)
return _chat_service.stream_response(conv, tools_enabled, project_id)
@bp.route("/api/conversations/<conv_id>/messages/<msg_id>", methods=["DELETE"])

View File

@ -9,9 +9,6 @@ from backend.utils.helpers import (
get_or_create_default_user,
record_token_usage,
build_messages,
ok,
err,
to_dict,
)
from backend.services.glm_client import GLMClient
@ -26,114 +23,6 @@ class ChatService:
self.executor = ToolExecutor(registry=registry)
def sync_response(self, conv: Conversation, tools_enabled: bool = True, project_id: str = None):
"""Sync response with tool call support
Args:
conv: Conversation object
tools_enabled: Whether to enable tools
project_id: Project ID for workspace isolation
"""
tools = registry.list_all() if tools_enabled else None
messages = build_messages(conv, project_id)
# Clear tool call history for new request
self.executor.clear_history()
# Build context for tool execution
context = {"project_id": project_id} if project_id else None
all_tool_calls = []
all_tool_results = []
for _ in range(self.MAX_ITERATIONS):
try:
resp = self.glm_client.call(
model=conv.model,
messages=messages,
max_tokens=conv.max_tokens,
temperature=conv.temperature,
thinking_enabled=conv.thinking_enabled,
tools=tools,
)
resp.raise_for_status()
result = resp.json()
except Exception as e:
return err(500, f"upstream error: {e}")
choice = result["choices"][0]
message = choice["message"]
# No tool calls - return final result
if not message.get("tool_calls"):
usage = result.get("usage", {})
prompt_tokens = usage.get("prompt_tokens", 0)
completion_tokens = usage.get("completion_tokens", 0)
# Build content JSON
content_json = {
"text": message.get("content", ""),
}
if message.get("reasoning_content"):
content_json["thinking"] = message["reasoning_content"]
if all_tool_calls:
content_json["tool_calls"] = self._build_tool_calls_json(all_tool_calls, all_tool_results)
# Create message
msg = Message(
id=str(uuid.uuid4()),
conversation_id=conv.id,
role="assistant",
content=json.dumps(content_json, ensure_ascii=False),
token_count=completion_tokens,
)
db.session.add(msg)
db.session.commit()
user = get_or_create_default_user()
record_token_usage(user.id, conv.model, prompt_tokens, completion_tokens)
# Set title if needed (first message)
suggested_title = None
if not conv.title or conv.title == "新对话":
user_msg = Message.query.filter_by(
conversation_id=conv.id, role="user"
).order_by(Message.created_at.asc()).first()
if user_msg and user_msg.content:
# Parse content JSON to get text
try:
content_data = json.loads(user_msg.content)
title_text = content_data.get("text", "")[:30]
except (json.JSONDecodeError, TypeError):
title_text = user_msg.content.strip()[:30]
if title_text:
suggested_title = title_text
else:
suggested_title = "新对话"
conv.title = suggested_title
db.session.commit()
return ok({
"message": self._message_to_dict(msg),
"usage": {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": usage.get("total_tokens", 0)
},
"suggested_title": suggested_title,
})
# Process tool calls
tool_calls = message["tool_calls"]
all_tool_calls.extend(tool_calls)
messages.append(message)
tool_results = self.executor.process_tool_calls(tool_calls, context)
all_tool_results.extend(tool_results)
messages.extend(tool_results)
return err(500, "exceeded maximum tool call iterations")
def stream_response(self, conv: Conversation, tools_enabled: bool = True, project_id: str = None):
"""Stream response with tool call support
@ -379,31 +268,6 @@ class ChatService:
})
return result
def _message_to_dict(self, msg: Message) -> dict:
"""Convert message to dict, parsing JSON content"""
result = to_dict(msg)
# Parse content JSON
if msg.content:
try:
content_data = json.loads(msg.content)
if isinstance(content_data, dict):
result["text"] = content_data.get("text", "")
if content_data.get("attachments"):
result["attachments"] = content_data["attachments"]
if content_data.get("thinking"):
result["thinking"] = content_data["thinking"]
if content_data.get("tool_calls"):
result["tool_calls"] = content_data["tool_calls"]
else:
result["text"] = msg.content
except (json.JSONDecodeError, TypeError):
result["text"] = msg.content
if "text" not in result:
result["text"] = ""
return result
def _process_tool_calls_delta(self, delta: dict, tool_calls_list: list) -> list:
"""Process tool calls from streaming delta"""

View File

@ -12,18 +12,12 @@ class ToolExecutor:
def __init__(
self,
registry: Optional[ToolRegistry] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
enable_cache: bool = True,
cache_ttl: int = 300, # 5 minutes
max_retries: int = 2, # Max retries per tool
):
self.registry = registry or ToolRegistry()
self.api_url = api_url
self.api_key = api_key
self.enable_cache = enable_cache
self.cache_ttl = cache_ttl
self.max_retries = max_retries
self._cache: Dict[str, tuple] = {} # key -> (result, timestamp)
self._call_history: List[dict] = [] # Track calls in current session
@ -124,7 +118,7 @@ class ToolExecutor:
continue
# Execute tool with retry
result = self._execute_with_retry(name, args)
result = self._execute_tool(name, args)
# Cache the result (only cache successful results)
if result.get("success"):
@ -141,23 +135,13 @@ class ToolExecutor:
return results
def _execute_with_retry(
def _execute_tool(
self,
name: str,
arguments: dict,
) -> dict:
"""
Execute tool without automatic retry.
If the tool fails, return the error to let the model decide
whether to retry with the same tool or try a different approach.
Returns:
Result dict with success status. Failed tool returns:
{"success": False, "error": "..."}
"""
result = self.registry.execute(name, arguments)
return result
"""Execute a tool and return the result."""
return self.registry.execute(name, arguments)
def _create_tool_result(
self,

View File

@ -194,10 +194,8 @@ classDiagram
-GLMClient glm_client
-ToolExecutor executor
+Integer MAX_ITERATIONS
+sync_response(conv, tools_enabled, project_id) Response
+stream_response(conv, tools_enabled, project_id) Response
-_build_tool_calls_json(calls, results) list
-_message_to_dict(msg) dict
-_process_tool_calls_delta(delta, list) list
}
@ -369,7 +367,7 @@ def process_tool_calls(self, tool_calls, context=None):
| 方法 | 路径 | 说明 |
|------|------|------|
| `GET` | `/api/conversations/:id/messages` | 获取消息列表(游标分页) |
| `POST` | `/api/conversations/:id/messages` | 发送消息(支持 SSE 流式) |
| `POST` | `/api/conversations/:id/messages` | 发送消息SSE 流式) |
| `DELETE` | `/api/conversations/:id/messages/:mid` | 删除消息 |
| `POST` | `/api/conversations/:id/regenerate/:mid` | 重新生成消息 |

View File

@ -1188,6 +1188,7 @@
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@ -1300,6 +1301,7 @@
"integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.25.0",
"fdir": "^6.4.4",
@ -1374,6 +1376,7 @@
"resolved": "https://registry.npmmirror.com/vue/-/vue-3.5.30.tgz",
"integrity": "sha512-hTHLc6VNZyzzEH/l7PFGjpcTvUgiaPK5mdLkbjrTeWSRcEfxFrv56g/XckIYlE9ckuobsdwqd5mk2g1sBkMewg==",
"license": "MIT",
"peer": true,
"dependencies": {
"@vue/compiler-dom": "3.5.30",
"@vue/compiler-sfc": "3.5.30",

View File

@ -188,35 +188,9 @@ function loadMoreMessages() {
if (hasMoreMessages.value) loadMessages(false)
}
// -- Send message (streaming) --
async function sendMessage(data) {
if (!currentConvId.value || streaming.value) return
const convId = currentConvId.value // ID
const text = data.text || ''
const attachments = data.attachments || null
// Add user message optimistically
const userMsg = {
id: 'temp_' + Date.now(),
conversation_id: convId,
role: 'user',
text,
attachments: attachments ? attachments.map(a => ({ name: a.name, extension: a.extension })) : null,
token_count: 0,
created_at: new Date().toISOString(),
}
messages.value.push(userMsg)
streaming.value = true
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
currentStreamPromise = messageApi.send(convId, { text, attachments, projectId: currentProject.value?.id }, {
stream: true,
toolsEnabled: toolsEnabled.value,
// -- Helpers: create stream callbacks for a conversation --
function createStreamCallbacks(convId, { updateConvList = true } = {}) {
return {
onThinkingStart() {
if (currentConvId.value === convId) {
streamThinking.value = ''
@ -242,7 +216,6 @@ async function sendMessage(data) {
}
},
onToolCalls(calls) {
console.log('🔧 Tool calls received:', calls)
if (currentConvId.value === convId) {
streamToolCalls.value.push(...calls.map(c => ({ ...c, result: null })))
} else {
@ -252,7 +225,6 @@ async function sendMessage(data) {
}
},
onToolResult(result) {
console.log('✅ Tool result received:', result)
if (currentConvId.value === convId) {
const call = streamToolCalls.value.find(c => c.id === result.id)
if (call) call.result = result.content
@ -266,11 +238,8 @@ async function sendMessage(data) {
onProcessStep(step) {
const idx = step.index
if (currentConvId.value === convId) {
//
const newSteps = [...streamProcessSteps.value]
while (newSteps.length <= idx) {
newSteps.push(null)
}
while (newSteps.length <= idx) newSteps.push(null)
newSteps[idx] = step
streamProcessSteps.value = newSteps
} else {
@ -282,13 +251,11 @@ async function sendMessage(data) {
}
},
async onDone(data) {
//
streamStates.delete(convId)
if (currentConvId.value === convId) {
streaming.value = false
currentStreamPromise = null
//
messages.value.push({
id: data.message_id,
conversation_id: convId,
@ -305,35 +272,27 @@ async function sendMessage(data) {
streamToolCalls.value = []
streamProcessSteps.value = []
// Update conversation in list (move to top)
const idx = conversations.value.findIndex(c => c.id === convId)
if (idx > 0) {
const [conv] = conversations.value.splice(idx, 1)
conv.message_count = (conv.message_count || 0) + 2
if (data.suggested_title) {
conv.title = data.suggested_title
}
conversations.value.unshift(conv)
} else if (idx === 0) {
conversations.value[0].message_count = (conversations.value[0].message_count || 0) + 2
if (data.suggested_title) {
conversations.value[0].title = data.suggested_title
if (updateConvList) {
const idx = conversations.value.findIndex(c => c.id === convId)
if (idx > 0) {
const [conv] = conversations.value.splice(idx, 1)
conv.message_count = (conv.message_count || 0) + 2
if (data.suggested_title) conv.title = data.suggested_title
conversations.value.unshift(conv)
} else if (idx === 0) {
conversations.value[0].message_count = (conversations.value[0].message_count || 0) + 2
if (data.suggested_title) conversations.value[0].title = data.suggested_title
}
}
} else {
//
try {
const res = await messageApi.list(convId, null, 50)
//
const idx = conversations.value.findIndex(c => c.id === convId)
if (idx >= 0) {
conversations.value[idx].message_count = res.data.items.length
//
if (res.data.items.length > 0) {
const convRes = await conversationApi.get(convId)
if (convRes.data.title) {
conversations.value[idx].title = convRes.data.title
}
if (convRes.data.title) conversations.value[idx].title = convRes.data.title
}
}
} catch (_) {}
@ -351,6 +310,37 @@ async function sendMessage(data) {
console.error('Stream error:', msg)
}
},
}
}
// -- Send message (streaming) --
async function sendMessage(data) {
if (!currentConvId.value || streaming.value) return
const convId = currentConvId.value
const text = data.text || ''
const attachments = data.attachments || null
const userMsg = {
id: 'temp_' + Date.now(),
conversation_id: convId,
role: 'user',
text,
attachments: attachments ? attachments.map(a => ({ name: a.name, extension: a.extension })) : null,
token_count: 0,
created_at: new Date().toISOString(),
}
messages.value.push(userMsg)
streaming.value = true
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
currentStreamPromise = messageApi.send(convId, { text, attachments, projectId: currentProject.value?.id }, {
toolsEnabled: toolsEnabled.value,
...createStreamCallbacks(convId, { updateConvList: true }),
})
}
@ -370,12 +360,9 @@ async function regenerateMessage(msgId) {
if (!currentConvId.value || streaming.value) return
const convId = currentConvId.value
//
const msgIndex = messages.value.findIndex(m => m.id === msgId)
if (msgIndex === -1) return
//
messages.value = messages.value.slice(0, msgIndex)
streaming.value = true
@ -387,75 +374,7 @@ async function regenerateMessage(msgId) {
currentStreamPromise = messageApi.regenerate(convId, msgId, {
toolsEnabled: toolsEnabled.value,
projectId: currentProject.value?.id,
onThinkingStart() {
if (currentConvId.value === convId) {
streamThinking.value = ''
}
},
onThinking(text) {
if (currentConvId.value === convId) {
streamThinking.value += text
}
},
onMessage(text) {
if (currentConvId.value === convId) {
streamContent.value += text
}
},
onToolCalls(calls) {
if (currentConvId.value === convId) {
streamToolCalls.value.push(...calls.map(c => ({ ...c, result: null })))
}
},
onToolResult(result) {
if (currentConvId.value === convId) {
const call = streamToolCalls.value.find(c => c.id === result.id)
if (call) call.result = result.content
}
},
onProcessStep(step) {
const idx = step.index
if (currentConvId.value === convId) {
const newSteps = [...streamProcessSteps.value]
while (newSteps.length <= idx) {
newSteps.push(null)
}
newSteps[idx] = step
streamProcessSteps.value = newSteps
}
},
async onDone(data) {
if (currentConvId.value === convId) {
streaming.value = false
currentStreamPromise = null
messages.value.push({
id: data.message_id,
conversation_id: convId,
role: 'assistant',
text: streamContent.value,
thinking: streamThinking.value || null,
tool_calls: streamToolCalls.value.length > 0 ? streamToolCalls.value : null,
process_steps: streamProcessSteps.value.filter(Boolean),
token_count: data.token_count,
created_at: new Date().toISOString(),
})
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
}
},
onError(msg) {
if (currentConvId.value === convId) {
streaming.value = false
currentStreamPromise = null
streamContent.value = ''
streamThinking.value = ''
streamToolCalls.value = []
streamProcessSteps.value = []
console.error('Regenerate error:', msg)
}
},
...createStreamCallbacks(convId, { updateConvList: false }),
})
}

View File

@ -16,6 +16,79 @@ async function request(url, options = {}) {
return data
}
/**
* Shared SSE stream processor - parses SSE events and dispatches to callbacks
* @param {string} url - API URL (without BASE prefix)
* @param {object} body - Request body
* @param {object} callbacks - Event handlers: { onThinkingStart, onThinking, onMessage, onToolCalls, onToolResult, onProcessStep, onDone, onError }
* @returns {{ abort: () => void }}
*/
function createSSEStream(url, body, { onThinkingStart, onThinking, onMessage, onToolCalls, onToolResult, onProcessStep, onDone, onError }) {
const controller = new AbortController()
const promise = (async () => {
try {
const res = await fetch(`${BASE}${url}`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(body),
signal: controller.signal,
})
if (!res.ok) {
const err = await res.json().catch(() => ({}))
throw new Error(err.message || `HTTP ${res.status}`)
}
const reader = res.body.getReader()
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
let currentEvent = ''
for (const line of lines) {
if (line.startsWith('event: ')) {
currentEvent = line.slice(7).trim()
} else if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6))
if (currentEvent === 'thinking_start' && onThinkingStart) {
onThinkingStart()
} else if (currentEvent === 'thinking' && onThinking) {
onThinking(data.content)
} else if (currentEvent === 'message' && onMessage) {
onMessage(data.content)
} else if (currentEvent === 'tool_calls' && onToolCalls) {
onToolCalls(data.calls)
} else if (currentEvent === 'tool_result' && onToolResult) {
onToolResult(data)
} else if (currentEvent === 'process_step' && onProcessStep) {
onProcessStep(data)
} else if (currentEvent === 'done' && onDone) {
onDone(data)
} else if (currentEvent === 'error' && onError) {
onError(data.content)
}
}
}
}
} catch (e) {
if (e.name !== 'AbortError' && onError) {
onError(e.message)
}
}
})()
promise.abort = () => controller.abort()
return promise
}
export const modelApi = {
list() {
return request('/models')
@ -95,149 +168,25 @@ export const messageApi = {
return request(`/conversations/${convId}/messages?${params}`)
},
send(convId, data, { stream = true, toolsEnabled = true, onThinkingStart, onThinking, onMessage, onToolCalls, onToolResult, onProcessStep, onDone, onError } = {}) {
if (!stream) {
return request(`/conversations/${convId}/messages`, {
method: 'POST',
body: { text: data.text, attachments: data.attachments, stream: false, tools_enabled: toolsEnabled, project_id: data.projectId },
})
}
const controller = new AbortController()
const promise = (async () => {
try {
const res = await fetch(`${BASE}/conversations/${convId}/messages`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ text: data.text, attachments: data.attachments, stream: true, tools_enabled: toolsEnabled, project_id: data.projectId }),
signal: controller.signal,
})
if (!res.ok) {
const err = await res.json().catch(() => ({}))
throw new Error(err.message || `HTTP ${res.status}`)
}
const reader = res.body.getReader()
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
let currentEvent = ''
for (const line of lines) {
if (line.startsWith('event: ')) {
currentEvent = line.slice(7).trim()
} else if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6))
if (currentEvent === 'thinking_start' && onThinkingStart) {
onThinkingStart()
} else if (currentEvent === 'thinking' && onThinking) {
onThinking(data.content)
} else if (currentEvent === 'message' && onMessage) {
onMessage(data.content)
} else if (currentEvent === 'tool_calls' && onToolCalls) {
onToolCalls(data.calls)
} else if (currentEvent === 'tool_result' && onToolResult) {
onToolResult(data)
} else if (currentEvent === 'process_step' && onProcessStep) {
onProcessStep(data)
} else if (currentEvent === 'done' && onDone) {
onDone(data)
} else if (currentEvent === 'error' && onError) {
onError(data.content)
}
}
}
}
} catch (e) {
if (e.name !== 'AbortError' && onError) {
onError(e.message)
}
}
})()
promise.abort = () => controller.abort()
return promise
send(convId, data, callbacks) {
return createSSEStream(`/conversations/${convId}/messages`, {
text: data.text,
attachments: data.attachments,
stream: true,
tools_enabled: callbacks.toolsEnabled !== false,
project_id: data.projectId,
}, callbacks)
},
delete(convId, msgId) {
return request(`/conversations/${convId}/messages/${msgId}`, { method: 'DELETE' })
},
regenerate(convId, msgId, { toolsEnabled = true, projectId, onThinkingStart, onThinking, onMessage, onToolCalls, onToolResult, onProcessStep, onDone, onError } = {}) {
const controller = new AbortController()
const promise = (async () => {
try {
const res = await fetch(`${BASE}/conversations/${convId}/regenerate/${msgId}`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ tools_enabled: toolsEnabled, project_id: projectId }),
signal: controller.signal,
})
if (!res.ok) {
const err = await res.json().catch(() => ({}))
throw new Error(err.message || `HTTP ${res.status}`)
}
const reader = res.body.getReader()
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
let currentEvent = ''
for (const line of lines) {
if (line.startsWith('event: ')) {
currentEvent = line.slice(7).trim()
} else if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6))
if (currentEvent === 'thinking_start' && onThinkingStart) {
onThinkingStart()
} else if (currentEvent === 'thinking' && onThinking) {
onThinking(data.content)
} else if (currentEvent === 'message' && onMessage) {
onMessage(data.content)
} else if (currentEvent === 'tool_calls' && onToolCalls) {
onToolCalls(data.calls)
} else if (currentEvent === 'tool_result' && onToolResult) {
onToolResult(data)
} else if (currentEvent === 'process_step' && onProcessStep) {
onProcessStep(data)
} else if (currentEvent === 'done' && onDone) {
onDone(data)
} else if (currentEvent === 'error' && onError) {
onError(data.content)
}
}
}
}
} catch (e) {
if (e.name !== 'AbortError' && onError) {
onError(e.message)
}
}
})()
promise.abort = () => controller.abort()
return promise
regenerate(convId, msgId, callbacks) {
return createSSEStream(`/conversations/${convId}/regenerate/${msgId}`, {
tools_enabled: callbacks.toolsEnabled !== false,
project_id: callbacks.projectId,
}, callbacks)
},
}

View File

@ -56,7 +56,7 @@
:process-steps="streamingProcessSteps"
:streaming="streaming"
/>
<div class="message-content streaming-content" v-html="renderedStreamContent || '<span class=\'placeholder\'>...</span>'"></div>
<div class="md-content streaming-content" v-html="renderedStreamContent || '<span class=\'placeholder\'>...</span>'"></div>
<div class="streaming-indicator">
<svg class="spinner" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<path d="M21 12a9 9 0 1 1-6.219-8.56"/>
@ -370,10 +370,6 @@ defineExpose({ scrollToBottom })
}
.streaming-content {
font-size: 15px;
line-height: 1.7;
color: var(--text-primary);
word-break: break-word;
}
.streaming-indicator {
@ -387,85 +383,8 @@ defineExpose({ scrollToBottom })
color: var(--text-tertiary);
}
.streaming-content :deep(p) {
margin: 0 0 8px;
}
.streaming-content :deep(p:last-child) {
margin-bottom: 0;
}
.streaming-content :deep(pre) {
background: var(--bg-code);
border: 1px solid var(--border-light);
border-radius: 8px;
padding: 16px;
overflow-x: auto;
margin: 8px 0;
max-width: 100%;
}
.streaming-content :deep(pre code) {
font-family: 'JetBrains Mono', 'Fira Code', monospace;
font-size: 13px;
line-height: 1.5;
}
.streaming-content :deep(code) {
background: var(--accent-primary-light);
color: var(--accent-primary);
padding: 2px 6px;
border-radius: 4px;
font-size: 13px;
font-family: 'JetBrains Mono', 'Fira Code', monospace;
}
.streaming-content :deep(pre code) {
background: none;
color: inherit;
padding: 0;
}
.streaming-content :deep(ul),
.streaming-content :deep(ol) {
padding-left: 20px;
margin: 8px 0;
}
.streaming-content :deep(blockquote) {
border-left: 3px solid rgba(59, 130, 246, 0.4);
padding-left: 12px;
color: var(--text-secondary);
margin: 8px 0;
}
.streaming-content :deep(table) {
border-collapse: collapse;
margin: 8px 0;
width: 100%;
}
.streaming-content :deep(th),
.streaming-content :deep(td) {
border: 1px solid var(--border-medium);
padding: 8px 12px;
text-align: left;
}
.streaming-content :deep(th) {
background: var(--bg-code);
}
.streaming-content :deep(.placeholder) {
color: var(--text-tertiary);
}
.streaming-content :deep(.math-block),
.message-content :deep(.math-block) {
display: block;
text-align: center;
padding: 12px 0;
margin: 8px 0;
overflow-x: auto;
}
</style>

View File

@ -21,7 +21,7 @@
<div class="tool-badge">工具返回结果: {{ toolName }}</div>
<pre>{{ content }}</pre>
</div>
<div v-else class="message-content" v-html="renderedContent"></div>
<div v-else class="md-content message-content" v-html="renderedContent"></div>
</div>
<div class="message-footer">
<span class="token-count" v-if="tokenCount">{{ tokenCount }} tokens</span>
@ -196,10 +196,6 @@ function copyContent() {
}
.message-content {
font-size: 15px;
line-height: 1.7;
color: var(--text-primary);
word-break: break-word;
}
.tool-result-content {
@ -232,75 +228,6 @@ function copyContent() {
word-break: break-word;
}
.message-content :deep(p) {
margin: 0 0 8px;
}
.message-content :deep(p:last-child) {
margin-bottom: 0;
}
.message-content :deep(pre) {
background: var(--bg-code);
border: 1px solid var(--border-light);
border-radius: 8px;
padding: 16px;
overflow-x: auto;
margin: 8px 0;
position: relative;
}
.message-content :deep(pre code) {
font-family: 'JetBrains Mono', 'Fira Code', monospace;
font-size: 13px;
line-height: 1.5;
}
.message-content :deep(code) {
background: var(--accent-primary-light);
color: var(--accent-primary);
padding: 2px 6px;
border-radius: 4px;
font-size: 13px;
font-family: 'JetBrains Mono', 'Fira Code', monospace;
}
.message-content :deep(pre code) {
background: none;
color: inherit;
padding: 0;
}
.message-content :deep(ul),
.message-content :deep(ol) {
padding-left: 20px;
margin: 8px 0;
}
.message-content :deep(blockquote) {
border-left: 3px solid rgba(59, 130, 246, 0.4);
padding-left: 12px;
color: var(--text-secondary);
margin: 8px 0;
}
.message-content :deep(table) {
border-collapse: collapse;
margin: 8px 0;
width: 100%;
}
.message-content :deep(th),
.message-content :deep(td) {
border: 1px solid var(--border-medium);
padding: 8px 12px;
text-align: left;
}
.message-content :deep(th) {
background: var(--bg-code);
}
.message-footer {
display: flex;
align-items: center;
@ -343,12 +270,4 @@ function copyContent() {
color: var(--danger-color);
background: var(--danger-bg);
}
.message-content :deep(.math-block) {
display: block;
text-align: center;
padding: 12px 0;
margin: 8px 0;
overflow-x: auto;
}
</style>

View File

@ -18,11 +18,10 @@
<textarea
ref="textareaRef"
v-model="text"
placeholder="输入消息... (Shift+Enter 换行)"
:placeholder="disabled ? 'AI 正在回复中...' : '输入消息... (Shift+Enter 换行)'"
rows="1"
@input="autoResize"
@keydown="onKeydown"
:disabled="disabled"
></textarea>
<div class="input-footer">
<input
@ -274,10 +273,6 @@ textarea::placeholder {
color: var(--text-tertiary);
}
textarea:disabled {
opacity: 0.5;
}
.input-footer {
display: flex;
justify-content: flex-end;

View File

@ -1,3 +1,89 @@
/* Markdown content shared styles */
.md-content {
font-size: 15px;
line-height: 1.7;
color: var(--text-primary);
word-break: break-word;
}
.md-content :deep(p) {
margin: 0 0 8px;
}
.md-content :deep(p:last-child) {
margin-bottom: 0;
}
.md-content :deep(pre) {
background: var(--bg-code);
border: 1px solid var(--border-light);
border-radius: 8px;
padding: 16px;
overflow-x: auto;
margin: 8px 0;
max-width: 100%;
position: relative;
}
.md-content :deep(pre code) {
font-family: 'JetBrains Mono', 'Fira Code', monospace;
font-size: 13px;
line-height: 1.5;
}
.md-content :deep(code) {
background: var(--accent-primary-light);
color: var(--accent-primary);
padding: 2px 6px;
border-radius: 4px;
font-size: 13px;
font-family: 'JetBrains Mono', 'Fira Code', monospace;
}
.md-content :deep(pre code) {
background: none;
color: inherit;
padding: 0;
}
.md-content :deep(ul),
.md-content :deep(ol) {
padding-left: 20px;
margin: 8px 0;
}
.md-content :deep(blockquote) {
border-left: 3px solid rgba(59, 130, 246, 0.4);
padding-left: 12px;
color: var(--text-secondary);
margin: 8px 0;
}
.md-content :deep(table) {
border-collapse: collapse;
margin: 8px 0;
width: 100%;
}
.md-content :deep(th),
.md-content :deep(td) {
border: 1px solid var(--border-medium);
padding: 8px 12px;
text-align: left;
}
.md-content :deep(th) {
background: var(--bg-code);
}
.md-content :deep(.math-block) {
display: block;
text-align: center;
padding: 12px 0;
margin: 8px 0;
overflow-x: auto;
}
/* 共享滚动条样式 */
.custom-scrollbar::-webkit-scrollbar {
width: 6px;