feat: 实现增量渲染逻辑

This commit is contained in:
ViperEkura 2026-04-12 23:42:55 +08:00
parent e93ec6d94d
commit c1788f1ba3
15 changed files with 1901 additions and 679 deletions

View File

@ -22,6 +22,7 @@ luxx/
│ ├── auth.py # 认证
│ ├── conversations.py # 会话管理
│ ├── messages.py # 消息处理
│ ├── providers.py # LLM 提供商管理
│ └── tools.py # 工具管理
├── services/ # 服务层
│ ├── chat.py # 聊天服务
@ -101,15 +102,63 @@ erDiagram
string id PK
string conversation_id FK
string role
longtext content
longtext content "JSON 格式"
int token_count
datetime created_at
}
USER ||--o{ CONVERSATION : "has"
CONVERSATION ||--o{ MESSAGE : "has"
USER ||--o{ LLM_PROVIDER : "configures"
LLM_PROVIDER {
int id PK
int user_id FK
string name
string provider_type
string base_url
string api_key
string default_model
boolean is_default
boolean enabled
datetime created_at
datetime updated_at
}
```
### Message Content JSON 结构
`content` 字段统一使用 JSON 格式存储:
**User 消息:**
```json
{
"text": "用户输入的文本内容",
"attachments": [
{"name": "utils.py", "extension": "py", "content": "..."}
]
}
```
**Assistant 消息:**
```json
{
"text": "AI 回复的文本内容",
"tool_calls": [...],
"steps": [
{"id": "step-0", "index": 0, "type": "thinking", "content": "..."},
{"id": "step-1", "index": 1, "type": "text", "content": "..."},
{"id": "step-2", "index": 2, "type": "tool_call", "id_ref": "call_xxx", "name": "...", "arguments": "..."},
{"id": "step-3", "index": 3, "type": "tool_result", "id_ref": "call_xxx", "name": "...", "content": "..."}
]
}
```
`steps` 字段是**渲染顺序的唯一数据源**,按 `index` 顺序排列。thinking、text、tool_call、tool_result 可以在多轮迭代中穿插出现。
### 5. 工具系统
```mermaid
@ -191,6 +240,9 @@ LLM API 客户端:
| `/conversations` | GET/POST | 会话列表/创建 |
| `/conversations/{id}` | GET/DELETE | 会话详情/删除 |
| `/messages/stream` | POST | 流式消息发送 |
| `/providers` | GET/POST | LLM 提供商列表/创建 |
| `/providers/{id}` | GET/PUT/DELETE | 提供商详情/更新/删除 |
| `/providers/{id}/test` | POST | 测试提供商连接 |
| `/tools` | GET | 可用工具列表 |
## 数据流
@ -227,12 +279,29 @@ sequenceDiagram
| 事件 | 说明 |
|------|------|
| `text` | 文本内容增量 |
| `tool_call` | 工具调用请求 |
| `tool_result` | 工具执行结果 |
| `process_step` | 结构化步骤thinking/text/tool_call/tool_result携带 `id`、`index` 确保渲染顺序 |
| `done` | 响应完成 |
| `error` | 错误信息 |
### process_step 事件格式
```json
{"type": "process_step", "step": {"id": "step-0", "index": 0, "type": "thinking", "content": "..."}}
{"type": "process_step", "step": {"id": "step-1", "index": 1, "type": "text", "content": "回复文本..."}}
{"type": "process_step", "step": {"id": "step-2", "index": 2, "type": "tool_call", "id_ref": "call_abc", "name": "web_search", "arguments": "{\"query\": \"...\"}"}}
{"type": "process_step", "step": {"id": "step-3", "index": 3, "type": "tool_result", "id_ref": "call_abc", "name": "web_search", "content": "{\"success\": true, ...}"}}
```
| 字段 | 说明 |
|------|------|
| `id` | 步骤唯一标识(格式 `step-{index}` |
| `index` | 步骤序号,确保按正确顺序显示 |
| `type` | 步骤类型:`thinking` / `text` / `tool_call` / `tool_result` |
| `id_ref` | 工具调用引用 ID仅 tool_call/tool_result |
| `name` | 工具名称(仅 tool_call/tool_result |
| `arguments` | 工具调用参数 JSON 字符串(仅 tool_call |
| `content` | 内容thinking 的思考内容、text 的文本、tool_result 的返回结果) |
## 配置示例
### config.yaml

View File

@ -1,7 +1,7 @@
# 配置文件
app:
secret_key: ${APP_SECRET_KEY}
debug: true
debug: false
host: 0.0.0.0
port: 8000

View File

@ -4,7 +4,11 @@
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>dashboard</title>
<title>Luxx Dashboard</title>
<!-- KaTeX CSS -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.11/dist/katex.min.css">
<!-- Highlight.js CSS -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/highlight.js@11.10.0/styles/github.min.css">
</head>
<body>
<div id="app"></div>

View File

@ -9,6 +9,9 @@
"version": "0.0.0",
"dependencies": {
"axios": "^1.15.0",
"katex": "^0.16.11",
"marked": "^15.0.0",
"marked-highlight": "^2.2.1",
"pinia": "^3.0.4",
"vue": "^3.5.32",
"vue-router": "^4.6.4"
@ -586,6 +589,15 @@
"node": ">= 0.8"
}
},
"node_modules/commander": {
"version": "8.3.0",
"resolved": "https://registry.npmmirror.com/commander/-/commander-8.3.0.tgz",
"integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==",
"license": "MIT",
"engines": {
"node": ">= 12"
}
},
"node_modules/copy-anything": {
"version": "4.0.5",
"resolved": "https://registry.npmmirror.com/copy-anything/-/copy-anything-4.0.5.tgz",
@ -887,6 +899,22 @@
"url": "https://github.com/sponsors/mesqueeb"
}
},
"node_modules/katex": {
"version": "0.16.45",
"resolved": "https://registry.npmmirror.com/katex/-/katex-0.16.45.tgz",
"integrity": "sha512-pQpZbdBu7wCTmQUh7ufPmLr0pFoObnGUoL/yhtwJDgmmQpbkg/0HSVti25Fu4rmd1oCR6NGWe9vqTWuWv3GcNA==",
"funding": [
"https://opencollective.com/katex",
"https://github.com/sponsors/katex"
],
"license": "MIT",
"dependencies": {
"commander": "^8.3.0"
},
"bin": {
"katex": "cli.js"
}
},
"node_modules/lightningcss": {
"version": "1.32.0",
"resolved": "https://registry.npmmirror.com/lightningcss/-/lightningcss-1.32.0.tgz",
@ -1157,6 +1185,28 @@
"@jridgewell/sourcemap-codec": "^1.5.5"
}
},
"node_modules/marked": {
"version": "15.0.12",
"resolved": "https://registry.npmmirror.com/marked/-/marked-15.0.12.tgz",
"integrity": "sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA==",
"license": "MIT",
"peer": true,
"bin": {
"marked": "bin/marked.js"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/marked-highlight": {
"version": "2.2.4",
"resolved": "https://registry.npmmirror.com/marked-highlight/-/marked-highlight-2.2.4.tgz",
"integrity": "sha512-PZxisNMJDduSjc0q6uvjsnqqHCXc9s0eyzxDO9sB1eNGJnd/H1/Fu+z6g/liC1dfJdFW4SftMwMlLvsBhUPrqQ==",
"license": "MIT",
"peerDependencies": {
"marked": ">=4 <19"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz",

View File

@ -10,6 +10,9 @@
},
"dependencies": {
"axios": "^1.15.0",
"katex": "^0.16.11",
"marked": "^15.0.0",
"marked-highlight": "^2.2.1",
"pinia": "^3.0.4",
"vue": "^3.5.32",
"vue-router": "^4.6.4"

View File

@ -0,0 +1,137 @@
<template>
<div class="message-bubble" :class="[role]">
<div v-if="role === 'user'" class="avatar">user</div>
<div v-else class="avatar">Luxx</div>
<div class="message-container">
<!-- File attachments list -->
<div v-if="attachments && attachments.length > 0" class="attachments-list">
<div v-for="(file, index) in attachments" :key="index" class="attachment-item">
<span class="attachment-icon">{{ file.extension }}</span>
<span class="attachment-name">{{ file.name }}</span>
</div>
</div>
<div ref="messageRef" class="message-body">
<!-- Primary rendering path: processSteps contains all ordered steps -->
<ProcessBlock
v-if="processSteps && processSteps.length > 0"
:process-steps="processSteps"
/>
<!-- Fallback path: old messages without processSteps in DB -->
<template v-else>
<ProcessBlock
v-if="toolCalls && toolCalls.length > 0"
:tool-calls="toolCalls"
/>
<div class="md-content message-content" v-html="renderedContent"></div>
</template>
</div>
<div class="message-footer">
<span class="token-count" v-if="tokenCount">{{ tokenCount }} tokens</span>
<span class="message-time">{{ formatTime(createdAt) }}</span>
<button v-if="role === 'assistant'" class="ghost-btn accent" @click="copyContent" title="复制">
<span v-html="copyIcon"></span>
</button>
<button v-if="deletable" class="ghost-btn danger" @click="$emit('delete')" title="删除">
<span v-html="trashIcon"></span>
</button>
</div>
</div>
</div>
</template>
<script setup>
import { computed, ref } from 'vue'
import { renderMarkdown } from '../utils/markdown.js'
import ProcessBlock from './ProcessBlock.vue'
const props = defineProps({
role: { type: String, required: true },
text: { type: String, default: '' },
toolCalls: { type: Array, default: () => [] },
processSteps: { type: Array, default: () => [] },
tokenCount: { type: Number, default: 0 },
createdAt: { type: String, default: '' },
deletable: { type: Boolean, default: false },
attachments: { type: Array, default: () => [] },
})
defineEmits(['delete'])
const messageRef = ref(null)
const renderedContent = computed(() => {
if (!props.text) return ''
return renderMarkdown(props.text)
})
function formatTime(time) {
if (!time) return ''
return new Date(time).toLocaleTimeString('zh-CN', { hour: '2-digit', minute: '2-digit' })
}
function copyContent() {
let text = props.text || ''
if (props.processSteps && props.processSteps.length > 0) {
const parts = props.processSteps
.filter(s => s && s.type === 'text')
.map(s => s.content)
if (parts.length > 0) text = parts.join('\n\n')
}
navigator.clipboard.writeText(text).catch(() => {})
}
// Icons
const copyIcon = `<svg viewBox="0 0 24 24" width="14" height="14" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg>`
const trashIcon = `<svg viewBox="0 0 24 24" width="14" height="14" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><polyline points="3 6 5 6 21 6"></polyline><path d="M19 6v14a2 2 0 0 1-2 2H7a2 2 0 0 1-2-2V6m3 0V4a2 2 0 0 1 2-2h4a2 2 0 0 1 2 2v2"></path></svg>`
</script>
<style scoped>
.attachments-list {
display: flex;
flex-wrap: wrap;
gap: 6px;
margin-bottom: 8px;
width: 100%;
}
.attachment-item {
display: flex;
align-items: center;
gap: 6px;
padding: 4px 10px;
background: var(--bg-code);
border: 1px solid var(--border-light);
border-radius: 6px;
font-size: 12px;
color: var(--text-secondary);
}
.attachment-icon {
background: var(--attachment-bg);
color: var(--attachment-color);
padding: 2px 6px;
border-radius: 4px;
font-size: 11px;
font-weight: 600;
}
.attachment-name {
color: var(--text-primary);
font-weight: 500;
}
.message-footer {
display: flex;
align-items: center;
gap: 8px;
padding: 6px 0 0;
font-size: 12px;
}
.token-count,
.message-time {
font-size: 12px;
color: var(--text-tertiary);
}
</style>

View File

@ -0,0 +1,373 @@
<template>
<div ref="processRef" class="process-block" :class="{ 'is-streaming': streaming }">
<!-- Thinking Steps -->
<div
v-for="item in thinkingItems"
:key="item.key"
class="step-item thinking"
>
<div class="step-header" @click="toggleExpand(item.key)">
<span v-html="brainIcon"></span>
<span class="step-label">思考中</span>
<span class="step-brief">{{ item.brief || '正在思考...' }}</span>
<span v-if="streaming && item.key === lastThinkingKey" class="loading-dots">...</span>
<span class="arrow" :class="{ open: expandedKeys.has(item.key) }" v-html="chevronDown"></span>
</div>
<div v-if="expandedKeys.has(item.key)" class="step-content">
<div class="thinking-text">{{ item.content }}</div>
</div>
</div>
<!-- Tool Call Steps -->
<div
v-for="item in toolCallItems"
:key="item.key"
class="step-item tool_call"
:class="{ loading: item.loading }"
>
<div class="step-header" @click="toggleExpand(item.key)">
<span v-html="toolIcon"></span>
<span class="step-label">{{ item.name || '工具调用' }}</span>
<span class="step-brief">{{ item.brief || '' }}</span>
<span v-if="item.loading" class="loading-dots">...</span>
<span v-else-if="item.isSuccess === true" class="step-badge success">成功</span>
<span v-else-if="item.isSuccess === false" class="step-badge error">失败</span>
<span class="arrow" :class="{ open: expandedKeys.has(item.key) }" v-html="chevronDown"></span>
</div>
<div v-if="expandedKeys.has(item.key)" class="step-content">
<div class="tool-detail">
<span class="detail-label">参数</span>
<pre>{{ formatArgs(item.args) }}</pre>
</div>
<div v-if="item.resultSummary || item.fullResult" class="tool-detail" style="margin-top: 8px;">
<span class="detail-label">结果</span>
<pre>{{ item.fullResult || item.resultSummary }}</pre>
</div>
</div>
</div>
<!-- Text Steps -->
<div
v-for="item in textItems"
:key="item.key"
class="text-content"
v-html="renderMarkdown(item.content)"
></div>
<!-- Streaming indicator -->
<div v-if="streaming && !hasContent" class="streaming-indicator">
<span v-html="sparkleIcon"></span>
<span>AI 正在输入...</span>
</div>
</div>
</template>
<script setup>
import { ref, computed } from 'vue'
import { renderMarkdown } from '../utils/markdown.js'
const props = defineProps({
processSteps: { type: Array, default: () => [] },
toolCalls: { type: Array, default: () => [] },
streaming: { type: Boolean, default: false },
})
const processRef = ref(null)
const expandedKeys = ref(new Set())
// processItems processSteps
const allItems = computed(() => {
const items = []
if (props.processSteps && props.processSteps.length > 0) {
for (const step of props.processSteps) {
if (step.type === 'thinking') {
items.push({
key: step.id || `thinking-${step.index}`,
type: 'thinking',
content: step.content || '',
brief: step.content ? step.content.slice(0, 50) + (step.content.length > 50 ? '...' : '') : '',
})
} else if (step.type === 'tool_call') {
items.push({
key: step.id || `tool-${step.index}`,
type: 'tool_call',
id: step.id,
name: step.name,
args: step.args,
brief: step.name || '',
loading: step.loading,
isSuccess: step.isSuccess,
resultSummary: step.resultSummary,
fullResult: step.fullResult,
})
} else if (step.type === 'tool_result') {
// tool_result tool_call
const toolId = step.id_ref || step.id
const match = items.findLast(it => it.type === 'tool_call' && it.id === toolId)
if (match) {
match.resultSummary = step.content ? step.content.slice(0, 200) : ''
match.fullResult = step.content || ''
match.isSuccess = step.success !== false
match.loading = false
} else {
// tool_call
items.push({
key: `result-${step.id || step.index}`,
type: 'tool_call',
id: step.id_ref || step.id,
name: step.name || '工具结果',
args: '{}',
brief: step.name || '工具结果',
loading: false,
isSuccess: true,
resultSummary: step.content ? step.content.slice(0, 200) : '',
fullResult: step.content || ''
})
}
} else if (step.type === 'text') {
items.push({
key: step.id || `text-${step.index}`,
type: 'text',
content: step.content || '',
})
}
}
} else if (props.toolCalls && props.toolCalls.length > 0) {
// toolCalls
for (const tc of props.toolCalls) {
items.push({
key: tc.id || `tool-${tc.index}`,
type: 'tool_call',
id: tc.id,
name: tc.name,
args: tc.arguments,
brief: tc.name || '',
})
}
}
return items
})
const thinkingItems = computed(() => allItems.value.filter(i => i.type === 'thinking'))
const toolCallItems = computed(() => allItems.value.filter(i => i.type === 'tool_call'))
const textItems = computed(() => allItems.value.filter(i => i.type === 'text'))
const hasContent = computed(() => allItems.value.length > 0)
const lastThinkingKey = computed(() => {
const thinkingItems = allItems.value.filter(i => i.type === 'thinking')
return thinkingItems.length > 0 ? thinkingItems[thinkingItems.length - 1].key : null
})
function toggleExpand(key) {
if (expandedKeys.value.has(key)) {
expandedKeys.value.delete(key)
} else {
expandedKeys.value.add(key)
}
expandedKeys.value = new Set(expandedKeys.value)
}
function formatArgs(args) {
if (!args) return '{}'
if (typeof args === 'string') {
try {
return JSON.stringify(JSON.parse(args), null, 2)
} catch {
return args
}
}
return JSON.stringify(args, null, 2)
}
// Icons
const brainIcon = `<svg viewBox="0 0 24 24" width="16" height="16" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M12 5a3 3 0 1 0-5.997.125 4 4 0 0 0-2.526 5.77 4 4 0 0 0 .556 6.588A4 4 0 1 0 12 18Z"></path><path d="M12 5a3 3 0 1 1 5.997.125 4 4 0 0 1 2.526 5.77 4 4 0 0 1-.556 6.588A4 4 0 1 1 12 18Z"></path><path d="M15 13a4.5 4.5 0 0 1-3-4 4.5 4.5 0 0 1-3 4"></path><path d="M17.599 6.5a3 3 0 0 0 .399-1.375"></path><path d="M6.003 5.125A3 3 0 0 0 6.401 6.5"></path><path d="M3.477 10.896a4 4 0 0 1 .585-.396"></path><path d="M19.938 10.5a4 4 0 0 1 .585.396"></path><path d="M6 18a4 4 0 0 1-1.967-.516"></path><path d="M19.967 17.484A4 4 0 0 1 18 18"></path></svg>`
const toolIcon = `<svg viewBox="0 0 24 24" width="16" height="16" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M14.7 6.3a1 1 0 0 0 0 1.4l1.6 1.6a1 1 0 0 0 1.4 0l3.77-3.77a6 6 0 0 1-7.94 7.94l-6.91 6.91a2.12 2.12 0 0 1-3-3l6.91-6.91a6 6 0 0 1 7.94-7.94l-3.76 3.76z"></path></svg>`
const chevronDown = `<svg viewBox="0 0 24 24" width="16" height="16" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><polyline points="6 9 12 15 18 9"></polyline></svg>`
const sparkleIcon = `<svg viewBox="0 0 24 24" width="14" height="14" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="m12 3-1.912 5.813a2 2 0 0 1-1.275 1.275L3 12l5.813 1.912a2 2 0 0 1 1.275 1.275L12 21l1.912-5.813a2 2 0 0 1 1.275-1.275L21 12l-5.813-1.912a2 2 0 0 1-1.275-1.275L12 3Z"></path></svg>`
</script>
<style scoped>
.process-block {
width: 100%;
}
/* Step items (shared) */
.step-item {
margin-bottom: 8px;
}
.step-item:last-child {
margin-bottom: 0;
}
@keyframes pulse {
0%, 100% { opacity: 0.4; }
50% { opacity: 1; }
}
/* Step header (shared by thinking and tool_call) */
.thinking .step-header,
.tool_call .step-header {
display: flex;
align-items: center;
gap: 8px;
padding: 8px 12px;
background: var(--bg-secondary);
border: 1px solid var(--border-light);
border-radius: 8px;
cursor: pointer;
font-size: 13px;
transition: background 0.15s;
}
.thinking .step-header:hover,
.tool_call .step-header:hover {
background: var(--bg-hover);
}
.thinking .step-header svg:first-child {
color: #f59e0b;
}
.tool_call .step-header svg:first-child {
color: var(--tool-color);
}
.step-label {
font-weight: 500;
color: var(--text-primary);
flex-shrink: 0;
min-width: 130px;
max-width: 130px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.arrow {
margin-left: auto;
transition: transform 0.2s;
color: var(--text-tertiary);
flex-shrink: 0;
}
.step-badge {
font-size: 11px;
padding: 2px 8px;
border-radius: 10px;
font-weight: 500;
}
.step-badge.success {
background: var(--success-bg);
color: var(--success-color);
}
.step-badge.error {
background: var(--danger-bg);
color: var(--danger-color);
}
.step-brief {
font-size: 11px;
color: var(--text-tertiary);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
flex: 1;
min-width: 0;
}
.arrow.open {
transform: rotate(180deg);
}
.loading-dots {
font-size: 16px;
font-weight: 700;
color: var(--tool-color);
animation: pulse 1s ease-in-out infinite;
}
.tool_call.loading .step-header {
background: var(--bg-hover);
}
/* Expandable step content panel */
.step-content {
padding: 12px;
margin-top: 4px;
background: var(--bg-code);
border: 1px solid var(--border-light);
border-radius: 8px;
overflow: hidden;
}
.thinking-text {
font-size: 13px;
color: var(--text-secondary);
line-height: 1.6;
white-space: pre-wrap;
}
.tool-detail {
font-size: 13px;
}
.detail-label {
color: var(--text-tertiary);
font-size: 11px;
font-weight: 600;
display: block;
margin-bottom: 4px;
}
.tool-detail pre {
padding: 8px;
background: var(--bg-primary);
border-radius: 4px;
border: 1px solid var(--border-light);
font-family: 'JetBrains Mono', 'Fira Code', monospace;
font-size: 12px;
line-height: 1.5;
color: var(--text-secondary);
overflow-x: auto;
white-space: pre-wrap;
word-break: break-word;
}
/* Text content */
.text-content {
padding: 0;
font-size: 15px;
line-height: 1.7;
color: var(--text-primary);
word-break: break-word;
contain: layout style;
}
.text-content :deep(.placeholder) {
color: var(--text-tertiary);
}
/* Streaming cursor indicator */
.streaming-indicator {
display: flex;
align-items: center;
gap: 8px;
font-size: 12px;
color: var(--text-tertiary);
}
/* Add separator only when there are step items above the indicator */
.process-block:has(.step-item) .streaming-indicator {
margin-top: 8px;
padding: 8px 0 0;
border-top: 1px solid var(--border-light);
}
</style>

View File

@ -36,101 +36,133 @@ api.interceptors.response.use(
}
)
/**
* SSE 流式请求处理器
* @param {string} url - API URL (不含 baseURL 前缀)
* @param {object} body - 请求体
* @param {object} callbacks - 事件回调: { onProcessStep, onDone, onError }
* @returns {{ abort: () => void }}
*/
export function createSSEStream(url, body, { onProcessStep, onDone, onError }) {
const token = localStorage.getItem('access_token')
const controller = new AbortController()
const promise = (async () => {
try {
const res = await fetch(`/api${url}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`
},
body: JSON.stringify(body),
signal: controller.signal
})
if (!res.ok) {
const err = await res.json().catch(() => ({}))
throw new Error(err.message || `HTTP ${res.status}`)
}
const reader = res.body.getReader()
const decoder = new TextDecoder()
let buffer = ''
let completed = false
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
let currentEvent = ''
for (const line of lines) {
if (line.startsWith('event: ')) {
currentEvent = line.slice(7).trim()
} else if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6))
if (currentEvent === 'process_step' && onProcessStep) {
onProcessStep(data)
} else if (currentEvent === 'done' && onDone) {
completed = true
onDone(data)
} else if (currentEvent === 'error' && onError) {
onError(data.content)
}
}
}
}
if (!completed && onError) {
onError('stream ended unexpectedly')
}
} catch (e) {
if (e.name !== 'AbortError' && onError) {
onError(e.message)
}
}
})()
promise.abort = () => controller.abort()
return promise
}
// ============ 认证接口 ============
export const authAPI = {
// 用户登录
login: (data) => api.post('/auth/login', data),
// 用户注册
register: (data) => api.post('/auth/register', data),
// 用户登出
logout: () => api.post('/auth/logout'),
// 获取当前用户信息
getMe: () => api.get('/auth/me')
}
// ============ 会话接口 ============
export const conversationsAPI = {
// 获取会话列表
list: (params) => api.get('/conversations/', { params }),
// 创建会话
create: (data) => api.post('/conversations/', data),
// 获取会话详情
get: (id) => api.get(`/conversations/${id}`),
// 更新会话
update: (id, data) => api.put(`/conversations/${id}`, data),
// 删除会话
delete: (id) => api.delete(`/conversations/${id}`)
}
// ============ 消息接口 ============
export const messagesAPI = {
// 获取消息列表
list: (conversationId, params) => api.get('/messages/', { params: { conversation_id: conversationId, ...params } }),
// 发送消息(非流式)
send: (data) => api.post('/messages/', data),
// 发送消息(流式)- 使用原生 fetch 避免 axios 拦截
sendStream: (data) => {
const token = localStorage.getItem('access_token')
return fetch('/api/messages/stream', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`
},
body: JSON.stringify(data)
})
// 发送消息(流式)
sendStream: (data, callbacks) => {
return createSSEStream('/messages/stream', {
conversation_id: data.conversation_id,
content: data.content,
tools_enabled: callbacks.toolsEnabled !== false
}, callbacks)
},
// 删除消息
delete: (id) => api.delete(`/messages/${id}`)
}
// ============ 工具接口 ============
export const toolsAPI = {
// 获取工具列表
list: (params) => api.get('/tools/', { params }),
// 获取工具详情
get: (name) => api.get(`/tools/${name}`),
// 执行工具
execute: (name, data) => api.post(`/tools/${name}/execute`, data)
}
// ============ LLM Provider 接口 ============
export const providersAPI = {
// 获取提供商列表
list: () => api.get('/providers/'),
// 创建提供商
create: (data) => api.post('/providers/', data),
// 获取提供商详情
get: (id) => api.get(`/providers/${id}`),
// 更新提供商
update: (id, data) => api.put(`/providers/${id}`, data),
// 删除提供商
delete: (id) => api.delete(`/providers/${id}`),
// 测试连接
test: (id) => api.post(`/providers/${id}/test`)
}
// 默认导出
export default api

View File

@ -1,294 +1,455 @@
/* ============ Global Reset & Base ============ */
:root {
--text: #6b6375;
--text-h: #08060d;
--bg: #fff;
--border: #e5e4e7;
--code-bg: #f4f3ec;
--accent: #2563eb;
--accent-bg: rgba(37, 99, 235, 0.1);
--accent-border: rgba(37, 99, 235, 0.5);
--social-bg: rgba(244, 243, 236, 0.5);
--shadow:
rgba(0, 0, 0, 0.1) 0 10px 15px -3px, rgba(0, 0, 0, 0.05) 0 4px 6px -2px;
/* 背景色 */
--bg-primary: #ffffff;
--bg-secondary: #f8fafc;
--bg-tertiary: #f0f4f8;
--bg-hover: rgba(37, 99, 235, 0.06);
--bg-active: rgba(37, 99, 235, 0.12);
--bg-input: #f8fafc;
--bg-code: #f1f5f9;
--bg-thinking: #f1f5f9;
--sans: system-ui, 'Segoe UI', Roboto, sans-serif;
--heading: system-ui, 'Segoe UI', Roboto, sans-serif;
--mono: ui-monospace, Consolas, monospace;
/* 文字颜色 */
--text-primary: #1e293b;
--text-secondary: #64748b;
--text-tertiary: #94a3b8;
/* 边框颜色 */
--border-light: rgba(0, 0, 0, 0.06);
--border-medium: rgba(0, 0, 0, 0.08);
--border-input: rgba(0, 0, 0, 0.08);
/* 主题色 */
--accent-primary: #2563eb;
--accent-primary-hover: #3b82f6;
--accent-primary-light: rgba(37, 99, 235, 0.08);
--accent-primary-medium: rgba(37, 99, 235, 0.15);
/* 工具调用颜色 */
--tool-color: #5478FF;
--tool-color-hover: #3d5ce0;
--tool-bg: rgba(84, 120, 255, 0.18);
--tool-bg-hover: rgba(84, 120, 255, 0.28);
--tool-border: rgba(84, 120, 255, 0.22);
/* 附件颜色 */
--attachment-color: #ca8a04;
--attachment-color-hover: #a16207;
--attachment-bg: rgba(202, 138, 4, 0.15);
/* 状态颜色 */
--success-color: #059669;
--success-bg: rgba(16, 185, 129, 0.1);
--danger-color: #ef4444;
--danger-bg: rgba(239, 68, 68, 0.08);
/* 滚动条颜色 */
--scrollbar-thumb: rgba(0, 0, 0, 0.08);
--scrollbar-thumb-sidebar: rgba(0, 0, 0, 0.1);
/* 遮罩背景 */
--overlay-bg: rgba(0, 0, 0, 0.3);
--avatar-gradient: linear-gradient(135deg, #3b82f6, #60a5fa);
/* 兼容旧变量 */
--text: var(--text-primary);
--text-h: var(--text-primary);
--bg: var(--bg-primary);
--border: var(--border-light);
--accent: var(--accent-primary);
--accent-bg: var(--accent-primary-light);
--accent-border: var(--accent-primary-medium);
--social-bg: rgba(244, 243, 236, 0.5);
--sans: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
--heading: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
--mono: ui-monospace, Consolas, 'JetBrains Mono', monospace;
font: 18px/145% var(--sans);
letter-spacing: 0.18px;
color-scheme: light dark;
color: var(--text);
background: var(--bg);
color: var(--text-primary);
background: var(--bg-primary);
font-synthesis: none;
text-rendering: optimizeLegibility;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
@media (max-width: 1024px) {
font-size: 16px;
}
}
@media (prefers-color-scheme: dark) {
:root {
--text: #9ca3af;
--text-h: #f3f4f6;
--bg: #16171d;
--border: #2e303a;
--code-bg: #1f2028;
--accent: #60a5fa;
--accent-bg: rgba(96, 165, 250, 0.15);
--accent-border: rgba(96, 165, 250, 0.5);
[data-theme="dark"] {
--bg-primary: #1a1a1a;
--bg-secondary: #141414;
--bg-tertiary: #0a0a0a;
--bg-hover: rgba(255, 255, 255, 0.08);
--bg-active: rgba(255, 255, 255, 0.12);
--bg-input: #141414;
--bg-code: #141414;
--bg-thinking: #141414;
--text-primary: #f0f0f0;
--text-secondary: #a0a0a0;
--text-tertiary: #606060;
--border-light: rgba(255, 255, 255, 0.08);
--border-medium: rgba(255, 255, 255, 0.12);
--border-input: rgba(255, 255, 255, 0.1);
--accent-primary: #3b82f6;
--accent-primary-hover: #60a5fa;
--accent-primary-light: rgba(59, 130, 246, 0.15);
--accent-primary-medium: rgba(59, 130, 246, 0.25);
--tool-color: #5478FF;
--tool-color-hover: #7a96ff;
--tool-bg: rgba(84, 120, 255, 0.28);
--tool-bg-hover: rgba(84, 120, 255, 0.40);
--tool-border: rgba(84, 120, 255, 0.32);
--attachment-color: #facc15;
--attachment-color-hover: #fde047;
--attachment-bg: rgba(250, 204, 21, 0.22);
--success-color: #34d399;
--success-bg: rgba(52, 211, 153, 0.15);
--danger-color: #f87171;
--danger-bg: rgba(248, 113, 113, 0.15);
--scrollbar-thumb: rgba(255, 255, 255, 0.1);
--scrollbar-thumb-sidebar: rgba(255, 255, 255, 0.15);
--overlay-bg: rgba(0, 0, 0, 0.6);
--avatar-gradient: linear-gradient(135deg, #3b82f6, #60a5fa);
/* 兼容旧变量 */
--text: var(--text-primary);
--text-h: var(--text-primary);
--bg: var(--bg-primary);
--border: var(--border-light);
--accent: var(--accent-primary);
--accent-bg: var(--accent-primary-light);
--accent-border: var(--accent-primary-medium);
--social-bg: rgba(47, 48, 58, 0.5);
--shadow:
rgba(0, 0, 0, 0.4) 0 10px 15px -3px, rgba(0, 0, 0, 0.25) 0 4px 6px -2px;
}
#social .button-icon {
filter: invert(1) brightness(2);
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
html, body {
height: 100%;
overflow: hidden;
}
body {
margin: 0;
}
h1,
h2 {
font-family: var(--heading);
font-weight: 500;
color: var(--text-h);
}
h1 {
font-size: 56px;
letter-spacing: -1.68px;
margin: 32px 0;
@media (max-width: 1024px) {
font-size: 36px;
margin: 20px 0;
}
}
h2 {
font-size: 24px;
line-height: 118%;
letter-spacing: -0.24px;
margin: 0 0 8px;
@media (max-width: 1024px) {
font-size: 20px;
}
}
p {
margin: 0;
}
code,
.counter {
font-family: var(--mono);
display: inline-flex;
border-radius: 4px;
color: var(--text-h);
}
code {
font-size: 15px;
line-height: 135%;
padding: 4px 8px;
background: var(--code-bg);
}
.counter {
font-size: 16px;
padding: 5px 10px;
border-radius: 5px;
color: var(--accent);
background: var(--accent-bg);
border: 2px solid transparent;
transition: border-color 0.3s;
margin-bottom: 24px;
&:hover {
border-color: var(--accent-border);
}
&:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 2px;
}
}
.hero {
position: relative;
.base,
.framework,
.vite {
inset-inline: 0;
margin: 0 auto;
}
.base {
width: 170px;
position: relative;
z-index: 0;
}
.framework,
.vite {
position: absolute;
}
.framework {
z-index: 1;
top: 34px;
height: 28px;
transform: perspective(2000px) rotateZ(300deg) rotateX(44deg) rotateY(39deg)
scale(1.4);
}
.vite {
z-index: 0;
top: 107px;
height: 26px;
width: auto;
transform: perspective(2000px) rotateZ(300deg) rotateX(40deg) rotateY(39deg)
scale(0.8);
}
}
#app {
width: 100%;
max-width: 100%;
margin: 0 auto;
min-height: 100vh;
display: flex;
flex-direction: column;
box-sizing: border-box;
height: 100%;
}
#center {
display: flex;
flex-direction: column;
gap: 25px;
place-content: center;
place-items: center;
flex-grow: 1;
@media (max-width: 1024px) {
padding: 32px 20px 24px;
gap: 18px;
}
/* ============ Scrollbar ============ */
::-webkit-scrollbar {
width: 6px;
height: 6px;
}
#next-steps {
display: flex;
border-top: 1px solid var(--border);
text-align: left;
& > div {
flex: 1 1 0;
padding: 32px;
@media (max-width: 1024px) {
padding: 24px 20px;
}
::-webkit-scrollbar-track {
background: transparent;
}
.icon {
margin-bottom: 16px;
width: 22px;
height: 22px;
::-webkit-scrollbar-thumb {
background: var(--scrollbar-thumb);
border-radius: 3px;
}
@media (max-width: 1024px) {
flex-direction: column;
text-align: center;
}
::-webkit-scrollbar-thumb:hover {
background: var(--border-medium);
}
#docs {
border-right: 1px solid var(--border);
@media (max-width: 1024px) {
border-right: none;
border-bottom: 1px solid var(--border);
}
}
#next-steps ul {
list-style: none;
padding: 0;
display: flex;
gap: 8px;
margin: 32px 0 0;
.logo {
height: 18px;
}
a {
color: var(--text-h);
font-size: 16px;
border-radius: 6px;
background: var(--social-bg);
display: flex;
padding: 6px 12px;
/* ============ Ghost Button ============ */
.ghost-btn {
background: none;
border: none;
color: var(--text-tertiary);
cursor: pointer;
padding: 4px 6px;
border-radius: 4px;
font-size: 14px;
display: inline-flex;
align-items: center;
gap: 8px;
justify-content: center;
transition: all 0.15s;
}
.ghost-btn:hover {
background: var(--bg-hover);
color: var(--text-secondary);
}
.ghost-btn.danger:hover {
background: var(--danger-bg);
color: var(--danger-color);
}
.ghost-btn.success:hover {
background: var(--success-bg);
color: var(--success-color);
}
.ghost-btn.accent:hover {
background: var(--accent-primary-light);
color: var(--accent-primary);
}
/* ============ Markdown Content ============ */
.md-content {
font-size: 15px;
line-height: 1.7;
color: var(--text-primary);
word-break: break-word;
}
.md-content h1,
.md-content h2,
.md-content h3 {
margin: 0.8em 0 0.4em;
font-weight: 600;
}
.md-content h1 { font-size: 1.4em; }
.md-content h2 { font-size: 1.2em; }
.md-content h3 { font-size: 1.1em; }
.md-content p {
margin: 0.4em 0;
}
.md-content code {
background: var(--bg-code);
padding: 2px 6px;
border-radius: 4px;
font-family: var(--mono);
font-size: 0.9em;
}
.md-content pre {
background: var(--bg-code);
padding: 12px;
border-radius: 8px;
overflow-x: auto;
margin: 0.5em 0;
}
.md-content pre code {
background: transparent;
padding: 0;
}
.md-content blockquote {
border-left: 3px solid var(--border-medium);
padding-left: 12px;
margin: 0.5em 0;
color: var(--text-secondary);
}
.md-content ul,
.md-content ol {
padding-left: 20px;
margin: 0.5em 0;
}
.md-content a {
color: var(--accent-primary);
text-decoration: none;
transition: box-shadow 0.3s;
&:hover {
box-shadow: var(--shadow);
}
.button-icon {
height: 18px;
width: 18px;
}
}
@media (max-width: 1024px) {
margin-top: 20px;
flex-wrap: wrap;
justify-content: center;
li {
flex: 1 1 calc(50% - 8px);
.md-content a:hover {
text-decoration: underline;
}
a {
.md-content img {
max-width: 100%;
border-radius: 8px;
}
.md-content table {
border-collapse: collapse;
width: 100%;
justify-content: center;
box-sizing: border-box;
}
}
margin: 0.5em 0;
}
#spacer {
height: 88px;
border-top: 1px solid var(--border);
@media (max-width: 1024px) {
height: 48px;
}
.md-content th,
.md-content td {
border: 1px solid var(--border-light);
padding: 8px 12px;
text-align: left;
}
.ticks {
position: relative;
.md-content th {
background: var(--bg-secondary);
}
/* ============ Message Bubble Shared ============ */
.message-bubble {
display: flex;
gap: 12px;
margin-bottom: 16px;
width: 100%;
&::before,
&::after {
content: '';
position: absolute;
top: -4.5px;
border: 5px solid transparent;
}
&::before {
left: 0;
border-left-color: var(--border);
.message-bubble.user {
flex-direction: row-reverse;
}
&::after {
right: 0;
border-right-color: var(--border);
.message-container {
display: flex;
flex-direction: column;
min-width: 200px;
width: 100%;
}
.message-bubble.user .message-container {
align-items: flex-end;
width: fit-content;
max-width: 85%;
}
.message-bubble.assistant .message-container {
align-items: flex-start;
flex: 1 1 auto;
width: 100%;
min-width: 0;
}
.message-bubble.assistant .message-body {
width: 100%;
}
.avatar {
width: 32px;
height: 32px;
border-radius: 8px;
display: flex;
align-items: center;
justify-content: center;
font-size: 11px;
font-weight: 700;
letter-spacing: -0.3px;
flex-shrink: 0;
}
.user .avatar {
background: linear-gradient(135deg, #2563eb, #3b82f6);
color: white;
font-size: 12px;
}
.assistant .avatar {
background: var(--avatar-gradient);
color: white;
font-size: 12px;
}
.message-body {
flex: 1;
min-width: 0;
padding: 16px;
border: 1px solid var(--border-light);
border-radius: 12px;
background: var(--bg-primary);
transition: background 0.2s, border-color 0.2s;
}
/* ============ App Layout ============ */
.app {
height: 100vh;
display: flex;
overflow: hidden;
}
.main-panel {
flex: 1 1 0;
min-width: 0;
overflow: hidden;
transition: all 0.2s;
}
/* ============ Transitions ============ */
.fade-enter-active,
.fade-leave-active {
transition: opacity 0.2s ease;
}
.fade-enter-from,
.fade-leave-to {
opacity: 0;
}
/* ============ Modal Overlay ============ */
.modal-overlay {
position: fixed;
inset: 0;
background: var(--overlay-bg);
display: flex;
align-items: center;
justify-content: center;
z-index: 100;
}
.modal-content {
background: var(--bg-primary);
border-radius: 12px;
max-width: 600px;
width: 90%;
max-height: 80vh;
overflow: auto;
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
}
/* ============ Form ============ */
.form-group {
margin-bottom: 16px;
}
.form-group label {
display: block;
font-size: 13px;
font-weight: 500;
color: var(--text-secondary);
margin-bottom: 8px;
}
.form-group input,
.form-group textarea,
.form-group select {
width: 100%;
padding: 10px 12px;
background: var(--bg-input);
border: 1px solid var(--border-input);
border-radius: 8px;
color: var(--text-primary);
font-size: 14px;
outline: none;
transition: border-color 0.2s;
}
.form-group input:focus,
.form-group textarea:focus,
.form-group select:focus {
border-color: var(--accent-primary);
}
.form-group textarea {
resize: vertical;
min-height: 80px;
}
.form-group .hint {
font-size: 12px;
color: var(--text-tertiary);
margin-top: 4px;
}

View File

@ -0,0 +1,62 @@
import { marked } from 'marked'
import katex from 'katex'
function renderMath(text, displayMode) {
try {
return katex.renderToString(text, {
displayMode,
throwOnError: false,
strict: false,
})
} catch {
return text
}
}
// marked extension for inline math $...$
const mathExtension = {
name: 'math',
level: 'inline',
start(src) {
const idx = src.search(/(?<!\$)\$(?!\$)/)
return idx === -1 ? undefined : idx
},
tokenizer(src) {
const match = src.match(/^\$\s*([^\$\n]+?)\s*\$/)
if (match) {
return { type: 'math', raw: match[0], text: match[1].trim(), displayMode: false }
}
},
renderer(token) {
return renderMath(token.text, token.displayMode)
},
}
// marked extension for block math $$...$$
const blockMathExtension = {
name: 'blockMath',
level: 'block',
start(src) {
const idx = src.indexOf('$$')
return idx === -1 ? undefined : idx
},
tokenizer(src) {
const match = src.match(/^\$\$\s*([\s\S]+?)\s*\$\$\n?/)
if (match) {
return { type: 'blockMath', raw: match[0], text: match[1].trim() }
}
},
renderer(token) {
return `<div class="math-block">${renderMath(token.text, true)}</div>`
},
}
marked.use({
extensions: [blockMathExtension, mathExtension],
breaks: true,
gfm: true
})
export function renderMarkdown(text) {
return marked.parse(text)
}

View File

@ -1,55 +1,131 @@
<template>
<div class="chat-view">
<div class="chat-container">
<div class="messages" ref="messagesContainer">
<div v-if="loading" class="loading">加载中...</div>
<div v-else-if="!messages.length" class="empty">
<p>开始对话吧</p>
<div class="chat-view main-panel">
<div v-if="!conversationId" class="welcome">
<div class="welcome-icon">
<svg viewBox="0 0 64 64" width="36" height="36">
<rect width="64" height="64" rx="14" fill="url(#favBg)"/>
<defs>
<linearGradient id="favBg" x1="0%" y1="0%" x2="100%" y2="100%">
<stop offset="0%" stop-color="#2563eb"/>
<stop offset="100%" stop-color="#60a5fa"/>
</linearGradient>
</defs>
<text x="32" y="40" text-anchor="middle" font-family="-apple-system,BlinkMacSystemFont,sans-serif" font-size="18" font-weight="800" fill="#fff" letter-spacing="-0.5">Luxx</text>
</svg>
</div>
<div v-for="msg in messages" :key="msg.id" :class="['message', msg.role]">
<div class="message-avatar">{{ msg.role === 'user' ? 'U' : 'A' }}</div>
<div class="message-content">
<div class="message-text">{{ msg.content }}</div>
<div class="message-time">{{ formatTime(msg.created_at) }}</div>
<h1>Chat</h1>
<p>选择一个对话开始或创建新对话</p>
</div>
<template v-else>
<div class="chat-header">
<div class="chat-title-area">
<h2 class="chat-title">{{ conversationTitle || '新对话' }}</h2>
</div>
</div>
<div v-if="streaming" class="message assistant streaming">
<div class="message-avatar">A</div>
<div class="message-content">
<div class="message-text">{{ streamContent }}<span class="cursor"></span></div>
<div ref="messagesContainer" class="messages-container">
<div v-if="loading" class="load-more-top">
<span>加载中...</span>
</div>
<div class="messages-list">
<div
v-for="msg in messages"
:key="msg.id"
:data-msg-id="msg.id"
>
<MessageBubble
:role="msg.role"
:text="msg.text || msg.content"
:tool-calls="msg.tool_calls"
:process-steps="msg.process_steps"
:token-count="msg.token_count"
:created-at="msg.created_at"
:deletable="msg.role === 'user'"
:attachments="msg.attachments"
@delete="deleteMessage(msg.id)"
/>
</div>
<!-- 流式消息 -->
<div v-if="streamingMessage" class="message-bubble assistant streaming">
<div class="avatar">Luxx</div>
<div class="message-body">
<ProcessBlock
:process-steps="streamingMessage.process_steps"
:streaming="true"
/>
</div>
</div>
</div>
</div>
<div class="input-area">
<div class="message-input">
<div class="input-container">
<textarea
ref="textareaRef"
v-model="inputMessage"
@keydown.enter.exact.prevent="sendMessage"
placeholder="输入消息..."
:placeholder="sending ? 'AI 正在回复中...' : '输入消息... (Shift+Enter 换行)'"
rows="1"
@input="autoResize"
@keydown="onKeydown"
></textarea>
<button @click="sendMessage" :disabled="!inputMessage.trim() || sending" class="send-btn">
{{ sending ? '发送中...' : '发送' }}
<div class="input-footer">
<div class="input-actions">
<button
class="btn-send"
:class="{ active: canSend }"
:disabled="!canSend || sending"
@click="sendMessage"
>
<span v-html="sendIcon"></span>
</button>
</div>
</div>
</div>
<div class="input-hint">AI 助手回复内容仅供参考</div>
</div>
</template>
</div>
</template>
<script setup>
import { ref, onMounted, nextTick } from 'vue'
import { ref, computed, onMounted, nextTick, watch } from 'vue'
import { useRoute } from 'vue-router'
import { conversationsAPI, messagesAPI } from '../services/api.js'
import ProcessBlock from '../components/ProcessBlock.vue'
import MessageBubble from '../components/MessageBubble.vue'
import { renderMarkdown } from '../utils/markdown.js'
const route = useRoute()
const messages = ref([])
const inputMessage = ref('')
const loading = ref(true)
const sending = ref(false)
const streaming = ref(false)
const streamContent = ref('')
const streamingMessage = ref(null)
const messagesContainer = ref(null)
const textareaRef = ref(null)
const conversationId = ref(route.params.id)
const conversationTitle = ref('')
const canSend = computed(() => inputMessage.value.trim().length > 0)
const sendIcon = `<svg viewBox="0 0 24 24" width="18" height="18" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><line x1="22" y1="2" x2="11" y2="13"></line><polygon points="22 2 15 22 11 13 2 9 22 2"></polygon></svg>`
function autoResize() {
const el = textareaRef.value
if (!el) return
el.style.height = 'auto'
el.style.height = Math.min(el.scrollHeight, 200) + 'px'
}
function onKeydown(e) {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
sendMessage()
}
}
const loadMessages = async () => {
loading.value = true
@ -57,6 +133,9 @@ const loadMessages = async () => {
const res = await messagesAPI.list(conversationId.value)
if (res.success) {
messages.value = res.data.messages || []
if (messages.value.length > 0) {
conversationTitle.value = res.data.title || ''
}
}
} catch (e) {
console.error(e)
@ -66,6 +145,15 @@ const loadMessages = async () => {
}
}
const deleteMessage = async (msgId) => {
try {
await messagesAPI.delete(msgId)
messages.value = messages.value.filter(m => m.id !== msgId)
} catch (e) {
console.error(e)
}
}
const sendMessage = async () => {
if (!inputMessage.value.trim() || sending.value) return
@ -73,119 +161,92 @@ const sendMessage = async () => {
inputMessage.value = ''
sending.value = true
//
nextTick(() => {
autoResize()
})
//
messages.value.push({
id: Date.now(),
role: 'user',
content: content,
text: content,
attachments: [],
process_steps: [],
created_at: new Date().toISOString()
})
scrollToBottom()
try {
streaming.value = true
streamContent.value = ''
const response = await messagesAPI.sendStream({
conversation_id: conversationId.value,
content: content,
tools_enabled: true
})
const reader = response.body.getReader()
const decoder = new TextDecoder()
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value)
const lines = chunk.split('\n')
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6)
if (data === '[DONE]') continue
try {
const parsed = JSON.parse(data)
if (parsed.type === 'text') {
streamContent.value += parsed.content
} else if (parsed.type === 'tool_call') {
//
const data = parsed.data
if (data && Array.isArray(data) && data.length > 0) {
//
const hasFunctionName = data.some(tc => tc.function && tc.function.name)
if (hasFunctionName) {
streamContent.value += '\n\n[调用工具] '
data.forEach(tc => {
if (tc.function && tc.function.name) {
streamContent.value += `${tc.function.name} `
}
})
}
}
} else if (parsed.type === 'tool_result') {
//
streamContent.value += '\n\n[工具结果]\n'
if (Array.isArray(parsed.data)) {
parsed.data.forEach(tr => {
if (tr.content) {
try {
const result = JSON.parse(tr.content)
if (result.success && result.data && result.data.results) {
result.data.results.forEach(r => {
streamContent.value += `${r.title}\n${r.snippet}\n\n`
})
} else {
streamContent.value += tr.content.substring(0, 500)
}
} catch {
streamContent.value += tr.content.substring(0, 500)
}
} else {
streamContent.value += '无结果'
}
})
}
} else if (parsed.type === 'error') {
streamContent.value += '\n\n[错误] ' + (parsed.error || '未知错误')
}
} catch (e) {
console.error('Parse error:', e, data)
}
}
}
}
//
if (streamContent.value) {
messages.value.push({
//
streamingMessage.value = {
id: Date.now() + 1,
role: 'assistant',
content: streamContent.value,
process_steps: [],
created_at: new Date().toISOString()
}
// SSE
messagesAPI.sendStream(
{ conversation_id: conversationId.value, content },
{
onProcessStep: (step) => {
if (!streamingMessage.value) return
// id
const idx = streamingMessage.value.process_steps.findIndex(s => s.id === step.id)
if (idx >= 0) {
streamingMessage.value.process_steps[idx] = step
} else {
streamingMessage.value.process_steps.push(step)
}
},
onDone: () => {
//
if (streamingMessage.value) {
messages.value.push({
...streamingMessage.value,
created_at: new Date().toISOString()
})
streamingMessage.value = null
}
} catch (e) {
console.error('发送失败:', e)
alert('发送失败: ' + e.message)
} finally {
sending.value = false
streaming.value = false
scrollToBottom()
},
onError: (error) => {
console.error('Stream error:', error)
if (streamingMessage.value) {
streamingMessage.value.process_steps.push({
id: 'error-' + Date.now(),
index: streamingMessage.value.process_steps.length,
type: 'text',
content: `[错误] ${error}`
})
}
sending.value = false
}
}
)
scrollToBottom()
}
const scrollToBottom = () => {
nextTick(() => {
if (messagesContainer.value) {
messagesContainer.value.scrollTop = messagesContainer.value.scrollHeight
messagesContainer.value.scrollTo({
top: messagesContainer.value.scrollHeight,
behavior: streamingMessage.value ? 'instant' : 'smooth'
})
}
})
}
//
watch(() => streamingMessage.value?.process_steps?.length, () => {
if (streamingMessage.value) {
scrollToBottom()
}
})
const formatTime = (time) => {
if (!time) return ''
return new Date(time).toLocaleTimeString('zh-CN', { hour: '2-digit', minute: '2-digit' })
@ -195,23 +256,191 @@ onMounted(loadMessages)
</script>
<style scoped>
.chat-view { height: calc(100vh - 70px); display: flex; flex-direction: column; }
.chat-container { flex: 1; display: flex; flex-direction: column; max-width: 900px; margin: 0 auto; width: 100%; }
.messages { flex: 1; overflow-y: auto; padding: 1rem; }
.loading, .empty { text-align: center; padding: 4rem; color: var(--text); }
.message { display: flex; gap: 1rem; margin-bottom: 1.5rem; }
.message.user { flex-direction: row-reverse; }
.message-avatar { width: 40px; height: 40px; background: var(--code-bg); border-radius: 50%; display: flex; align-items: center; justify-content: center; font-size: 1.2rem; flex-shrink: 0; }
.message.user .message-avatar { background: var(--accent-bg); }
.message-content { max-width: 70%; }
.message-text { padding: 1rem; background: var(--code-bg); border-radius: 12px; line-height: 1.6; white-space: pre-wrap; }
.message.user .message-text { background: var(--accent); color: white; }
.message-time { font-size: 0.75rem; color: var(--text); margin-top: 0.25rem; }
.cursor { animation: blink 1s infinite; }
@keyframes blink { 0%, 50% { opacity: 1; } 51%, 100% { opacity: 0; } }
.input-area { display: flex; gap: 0.75rem; padding: 1rem; border-top: 1px solid var(--border); }
.input-area textarea { flex: 1; padding: 0.875rem 1rem; border: 1px solid var(--border); border-radius: 12px; resize: none; font-size: 1rem; background: var(--bg); color: var(--text); }
.input-area textarea:focus { outline: none; border-color: var(--accent); }
.send-btn { padding: 0.875rem 1.5rem; background: var(--accent); color: white; border: none; border-radius: 12px; font-size: 1rem; cursor: pointer; white-space: nowrap; }
.send-btn:disabled { opacity: 0.6; cursor: not-allowed; }
.chat-view {
flex: 1 1 0;
display: flex;
flex-direction: column;
height: 100vh;
overflow: hidden;
min-width: 0;
}
.welcome {
flex: 1;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
color: var(--text-tertiary);
}
.welcome-icon {
width: 64px;
height: 64px;
border-radius: 16px;
display: flex;
align-items: center;
justify-content: center;
margin-bottom: 20px;
overflow: hidden;
}
.welcome h1 {
font-size: 24px;
color: var(--text-primary);
margin: 0 0 8px;
}
.welcome p {
font-size: 14px;
}
.chat-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 12px 24px;
border-bottom: 1px solid var(--border-light);
background: color-mix(in srgb, var(--bg-primary) 70%, transparent);
backdrop-filter: blur(40px);
-webkit-backdrop-filter: blur(40px);
transition: background 0.2s, border-color 0.2s;
}
.chat-title-area {
display: flex;
align-items: center;
gap: 8px;
min-width: 0;
}
.chat-title {
font-size: 16px;
font-weight: 600;
color: var(--text-primary);
margin: 0;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.messages-container {
flex: 1 1 auto;
overflow-y: auto;
padding: 16px 0;
width: 100%;
display: flex;
flex-direction: column;
scrollbar-width: none;
-ms-overflow-style: none;
}
.messages-container::-webkit-scrollbar {
display: none;
}
.load-more-top {
text-align: center;
padding: 12px 0;
color: var(--text-tertiary);
font-size: 13px;
}
.messages-list {
width: 80%;
margin: 0 auto;
padding: 0 16px;
}
/* Message Input */
.message-input {
padding: 16px 24px 12px;
background: var(--bg-primary);
border-top: 1px solid var(--border-light);
transition: background 0.2s, border-color 0.2s;
}
.input-container {
display: flex;
flex-direction: column;
background: var(--bg-input);
border: 1px solid var(--border-input);
border-radius: 12px;
padding: 12px;
transition: border-color 0.2s, background 0.2s;
}
.input-container:focus-within {
border-color: var(--accent-primary);
}
textarea {
width: 100%;
background: none;
border: none;
color: var(--text-primary);
font-size: 15px;
line-height: 1.6;
resize: none;
outline: none;
font-family: inherit;
min-height: 36px;
max-height: 200px;
padding: 0;
}
textarea::placeholder {
color: var(--text-tertiary);
}
.input-footer {
display: flex;
justify-content: flex-end;
padding-top: 8px;
margin-top: 4px;
}
.input-actions {
display: flex;
align-items: center;
gap: 8px;
}
.btn-send {
width: 36px;
height: 36px;
border-radius: 8px;
border: none;
background: var(--bg-code);
color: var(--text-tertiary);
cursor: not-allowed;
display: flex;
align-items: center;
justify-content: center;
transition: all 0.15s ease;
}
.btn-send.active {
background: var(--accent-primary);
color: white;
cursor: pointer;
}
.btn-send.active:hover {
background: var(--accent-primary-hover);
transform: translateY(-1px);
box-shadow: 0 2px 8px rgba(37, 99, 235, 0.3);
}
.btn-send.active:active {
transform: translateY(0);
box-shadow: none;
}
.input-hint {
text-align: center;
font-size: 12px;
color: var(--text-tertiary);
margin-top: 8px;
}
</style>

View File

@ -130,13 +130,36 @@ class Conversation(Base):
class Message(Base):
"""Message model"""
"""Message model
content 字段统一使用 JSON 格式存储
**User 消息**
{
"text": "用户输入的文本内容",
"attachments": [
{"name": "utils.py", "extension": "py", "content": "..."}
]
}
**Assistant 消息**
{
"text": "AI 回复的文本内容",
"tool_calls": [...], // 遗留的扁平结构
"steps": [ // 有序步骤用于渲染主要数据源
{"id": "step-0", "index": 0, "type": "thinking", "content": "..."},
{"id": "step-1", "index": 1, "type": "text", "content": "..."},
{"id": "step-2", "index": 2, "type": "tool_call", "id_ref": "call_xxx", "name": "...", "arguments": "..."},
{"id": "step-3", "index": 3, "type": "tool_result", "id_ref": "call_xxx", "name": "...", "content": "..."}
]
}
"""
__tablename__ = "messages"
id: Mapped[str] = mapped_column(String(64), primary_key=True)
conversation_id: Mapped[str] = mapped_column(String(64), ForeignKey("conversations.id"), nullable=False)
role: Mapped[str] = mapped_column(String(16), nullable=False)
content: Mapped[str] = mapped_column(Text, nullable=False)
role: Mapped[str] = mapped_column(String(16), nullable=False) # user, assistant, system, tool
content: Mapped[str] = mapped_column(Text, nullable=False, default="")
token_count: Mapped[int] = mapped_column(Integer, default=0)
created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow)
@ -144,11 +167,39 @@ class Message(Base):
conversation: Mapped["Conversation"] = relationship("Conversation", back_populates="messages")
def to_dict(self):
return {
"""Convert to dictionary, extracting process_steps for frontend"""
import json
result = {
"id": self.id,
"conversation_id": self.conversation_id,
"role": self.role,
"content": self.content,
"token_count": self.token_count,
"created_at": self.created_at.isoformat() if self.created_at else None
}
# Parse content JSON
try:
content_obj = json.loads(self.content) if self.content else {}
except json.JSONDecodeError:
# Legacy plain text content
result["content"] = self.content
result["text"] = self.content
result["attachments"] = []
result["tool_calls"] = []
result["process_steps"] = []
return result
# Extract common fields
result["text"] = content_obj.get("text", "")
result["attachments"] = content_obj.get("attachments", [])
result["tool_calls"] = content_obj.get("tool_calls", [])
# Extract steps as process_steps for frontend rendering
result["process_steps"] = content_obj.get("steps", [])
# For backward compatibility
if "content" not in result:
result["content"] = result["text"]
return result

View File

@ -50,7 +50,8 @@ def list_messages(
).order_by(Message.created_at).all()
return success_response(data={
"messages": [m.to_dict() for m in messages]
"messages": [m.to_dict() for m in messages],
"title": conversation.title
})
@ -136,46 +137,13 @@ async def stream_message(
db.commit()
async def event_generator():
full_response = ""
async for event in chat_service.stream_response(
async for sse_str in chat_service.stream_response(
conversation=conversation,
user_message=data.content,
tools_enabled=tools_enabled
):
event_type = event.get("type")
if event_type == "text":
content = event.get("content", "")
full_response += content
yield f"data: {json.dumps({'type': 'text', 'content': content})}\n\n"
elif event_type == "tool_call":
yield f"data: {json.dumps({'type': 'tool_call', 'data': event.get('data')})}\n\n"
elif event_type == "tool_result":
yield f"data: {json.dumps({'type': 'tool_result', 'data': event.get('data')})}\n\n"
elif event_type == "done":
try:
ai_message = Message(
id=generate_id("msg"),
conversation_id=data.conversation_id,
role="assistant",
content=full_response,
token_count=len(full_response) // 4
)
db.add(ai_message)
db.commit()
except Exception:
pass
yield f"data: {json.dumps({'type': 'done', 'message_id': ai_message.id if 'ai_message' in dir() else None})}\n\n"
elif event_type == "error":
yield f"data: {json.dumps({'type': 'error', 'error': event.get('error')})}\n\n"
yield "data: [DONE]\n\n"
# Chat service returns raw SSE strings (including done event)
yield sse_str
return StreamingResponse(
event_generator(),

View File

@ -1,5 +1,6 @@
"""Chat service module"""
import json
import uuid
from typing import List, Dict, Any, AsyncGenerator
from luxx.models import Conversation, Message
@ -13,6 +14,11 @@ from luxx.config import config
MAX_ITERATIONS = 10
def _sse_event(event: str, data: dict) -> str:
"""Format a Server-Sent Event string."""
return f"event: {event}\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"
def get_llm_client(conversation: Conversation = None):
"""Get LLM client, optionally using conversation's provider"""
if conversation and conversation.provider_id:
@ -37,7 +43,7 @@ def get_llm_client(conversation: Conversation = None):
class ChatService:
"""Chat service"""
"""Chat service with tool support"""
def __init__(self):
self.tool_executor = ToolExecutor()
@ -66,9 +72,19 @@ class ChatService:
).order_by(Message.created_at).all()
for msg in db_messages:
# Parse JSON content if possible
try:
content_obj = json.loads(msg.content) if msg.content else {}
if isinstance(content_obj, dict):
content = content_obj.get("text", msg.content)
else:
content = msg.content
except (json.JSONDecodeError, TypeError):
content = msg.content
messages.append({
"role": msg.role,
"content": msg.content
"content": content
})
finally:
db.close()
@ -80,163 +96,273 @@ class ChatService:
conversation: Conversation,
user_message: str,
tools_enabled: bool = True
) -> AsyncGenerator[Dict[str, Any], None]:
) -> AsyncGenerator[Dict[str, str], None]:
"""
Streaming response generator
Event types:
- process_step: thinking/text/tool_call/tool_result step
- done: final response complete
- error: on error
Yields raw SSE event strings for direct forwarding.
"""
try:
messages = self.build_messages(conversation)
messages.append({
"role": "user",
"content": user_message
"content": json.dumps({"text": user_message, "attachments": []})
})
tools = registry.list_all() if tools_enabled else None
iteration = 0
llm = get_llm_client(conversation)
model = conversation.model or llm.default_model or "gpt-4"
while iteration < MAX_ITERATIONS:
iteration += 1
print(f"[CHAT DEBUG] ====== Starting iteration {iteration} ======")
print(f"[CHAT DEBUG] Messages count: {len(messages)}")
# State tracking
all_steps = []
all_tool_calls = []
all_tool_results = []
step_index = 0
tool_calls_this_round = None
# Global step IDs for thinking and text (persist across iterations)
thinking_step_id = None
thinking_step_idx = None
text_step_id = None
text_step_idx = None
async for event in llm.stream_call(
for iteration in range(MAX_ITERATIONS):
print(f"[CHAT] Starting iteration {iteration + 1}, messages: {len(messages)}")
# Stream from LLM
full_content = ""
full_thinking = ""
tool_calls_list = []
# Generate new step IDs for each iteration to track multiple thoughts/tools
iteration_thinking_step_id = f"thinking-{iteration}"
iteration_text_step_id = f"text-{iteration}"
async for sse_line in llm.stream_call(
model=model,
messages=messages,
tools=tools,
temperature=conversation.temperature,
max_tokens=conversation.max_tokens
):
event_type = event.get("type")
# Parse SSE line
# Format: "event: xxx\ndata: {...}\n\n"
event_type = None
data_str = None
if event_type == "content_delta":
content = event.get("content", "")
if content:
print(f"[CHAT DEBUG] Iteration {iteration} content: {content[:100]}...")
yield {"type": "text", "content": content}
for line in sse_line.strip().split('\n'):
if line.startswith('event: '):
event_type = line[7:].strip()
elif line.startswith('data: '):
data_str = line[6:].strip()
elif event_type == "tool_call_delta":
tool_call = event.get("tool_call", {})
yield {"type": "tool_call", "data": tool_call}
if data_str is None:
continue
elif event_type == "done":
tool_calls_this_round = event.get("tool_calls")
print(f"[CHAT DEBUG] Done event, tool_calls: {tool_calls_this_round}")
if tool_calls_this_round and tools_enabled:
print(f"[CHAT DEBUG] Executing tools: {tool_calls_this_round}")
yield {"type": "tool_call", "data": tool_calls_this_round}
tool_results = self.tool_executor.process_tool_calls_parallel(
tool_calls_this_round,
{}
)
messages.append({
"role": "assistant",
"content": "",
"tool_calls": tool_calls_this_round
})
for tr in tool_results:
messages.append({
"role": "tool",
"tool_call_id": tr.get("tool_call_id"),
"content": str(tr.get("result", ""))
})
yield {"type": "tool_result", "data": tool_results}
else:
break
if not tool_calls_this_round or not tools_enabled:
break
yield {"type": "done"}
except Exception as e:
print(f"[CHAT ERROR] Exception in stream_response: {type(e).__name__}: {str(e)}")
yield {"type": "error", "error": str(e)}
except Exception as e:
yield {"type": "error", "error": str(e)}
def non_stream_response(
self,
conversation: Conversation,
user_message: str,
tools_enabled: bool = False
) -> Dict[str, Any]:
"""Non-streaming response"""
# Handle error events from LLM
if event_type == 'error':
try:
messages = self.build_messages(conversation)
messages.append({
"role": "user",
"content": user_message
error_data = json.loads(data_str)
yield _sse_event("error", {"content": error_data.get("content", "Unknown error")})
except json.JSONDecodeError:
yield _sse_event("error", {"content": data_str})
return
# Parse the data
try:
chunk = json.loads(data_str)
except json.JSONDecodeError:
continue
# Get delta
choices = chunk.get("choices", [])
if not choices:
continue
delta = choices[0].get("delta", {})
# Handle reasoning (thinking)
reasoning = delta.get("reasoning_content", "")
if reasoning:
full_thinking += reasoning
if thinking_step_id is None:
thinking_step_id = iteration_thinking_step_id
thinking_step_idx = step_index
step_index += 1
yield _sse_event("process_step", {
"id": thinking_step_id,
"index": thinking_step_idx,
"type": "thinking",
"content": full_thinking
})
tools = registry.list_all() if tools_enabled else None
# Handle content
content = delta.get("content", "")
if content:
full_content += content
if text_step_id is None:
text_step_idx = step_index
text_step_id = iteration_text_step_id
step_index += 1
yield _sse_event("process_step", {
"id": text_step_id,
"index": text_step_idx,
"type": "text",
"content": full_content
})
iteration = 0
# Accumulate tool calls
tool_calls_delta = delta.get("tool_calls", [])
for tc in tool_calls_delta:
idx = tc.get("index", 0)
if idx >= len(tool_calls_list):
tool_calls_list.append({
"id": tc.get("id", ""),
"type": "function",
"function": {"name": "", "arguments": ""}
})
func = tc.get("function", {})
if func.get("name"):
tool_calls_list[idx]["function"]["name"] += func["name"]
if func.get("arguments"):
tool_calls_list[idx]["function"]["arguments"] += func["arguments"]
llm_client = get_llm_client(conversation)
model = conversation.model or llm_client.default_model or "gpt-4"
# Save thinking step
if thinking_step_id is not None:
all_steps.append({
"id": thinking_step_id,
"index": thinking_step_idx,
"type": "thinking",
"content": full_thinking
})
while iteration < MAX_ITERATIONS:
iteration += 1
# Save text step
if text_step_id is not None:
all_steps.append({
"id": text_step_id,
"index": text_step_idx,
"type": "text",
"content": full_content
})
response = llm_client.sync_call(
model=model,
messages=messages,
tools=tools,
temperature=conversation.temperature,
max_tokens=conversation.max_tokens
# Handle tool calls
if tool_calls_list:
all_tool_calls.extend(tool_calls_list)
# Yield tool_call steps
tool_call_step_ids = [] # Track step IDs for tool calls
for tc in tool_calls_list:
call_step_id = f"tool-{iteration}-{tc.get('function', {}).get('name', 'unknown')}"
tool_call_step_ids.append(call_step_id)
call_step = {
"id": call_step_id,
"index": step_index,
"type": "tool_call",
"id_ref": tc.get("id", ""),
"name": tc["function"]["name"],
"arguments": tc["function"]["arguments"]
}
all_steps.append(call_step)
yield _sse_event("process_step", call_step)
step_index += 1
# Execute tools
tool_results = self.tool_executor.process_tool_calls_parallel(
tool_calls_list, {}
)
tool_calls = response.tool_calls
# Yield tool_result steps
for i, tr in enumerate(tool_results):
tool_call_step_id = tool_call_step_ids[i] if i < len(tool_call_step_ids) else f"tool-{i}"
result_step = {
"id": f"result-{iteration}-{tr.get('name', 'unknown')}",
"index": step_index,
"type": "tool_result",
"id_ref": tool_call_step_id, # Reference to the tool_call step
"name": tr.get("name", ""),
"content": tr.get("content", "")
}
all_steps.append(result_step)
yield _sse_event("process_step", result_step)
step_index += 1
if tool_calls and tools_enabled:
all_tool_results.append({
"role": "tool",
"tool_call_id": tr.get("tool_call_id", ""),
"content": tr.get("content", "")
})
# Add assistant message with tool calls for next iteration
messages.append({
"role": "assistant",
"content": response.content,
"tool_calls": tool_calls
"content": full_content or "",
"tool_calls": tool_calls_list
})
messages.extend(all_tool_results[-len(tool_results):])
all_tool_results = []
continue
tool_results = self.tool_executor.process_tool_calls_parallel(tool_calls)
# No tool calls - final iteration, save message
msg_id = str(uuid.uuid4())
self._save_message(
conversation.id,
msg_id,
full_content,
all_tool_calls,
all_tool_results,
all_steps
)
for tr in tool_results:
messages.append({
"role": "tool",
"tool_call_id": tr.get("tool_call_id"),
"content": str(tr.get("result", ""))
yield _sse_event("done", {
"message_id": msg_id,
"token_count": len(full_content) // 4
})
else:
return {
"success": True,
"content": response.content
}
return
return {
"success": True,
"content": "Max iterations reached"
}
# Max iterations exceeded
yield _sse_event("error", {"content": "Exceeded maximum tool call iterations"})
except Exception as e:
return {
"success": False,
"error": str(e)
print(f"[CHAT] Exception: {type(e).__name__}: {str(e)}")
yield _sse_event("error", {"content": str(e)})
def _save_message(
self,
conversation_id: str,
msg_id: str,
full_content: str,
all_tool_calls: list,
all_tool_results: list,
all_steps: list
):
"""Save the assistant message to database."""
from luxx.database import SessionLocal
from luxx.models import Message
content_json = {
"text": full_content,
"steps": all_steps
}
if all_tool_calls:
content_json["tool_calls"] = all_tool_calls
db = SessionLocal()
try:
msg = Message(
id=msg_id,
conversation_id=conversation_id,
role="assistant",
content=json.dumps(content_json, ensure_ascii=False),
token_count=len(full_content) // 4
)
db.add(msg)
db.commit()
except Exception as e:
print(f"[CHAT] Failed to save message: {e}")
db.rollback()
finally:
db.close()
# Global chat service

View File

@ -136,82 +136,39 @@ class LLMClient:
messages: List[Dict],
tools: Optional[List[Dict]] = None,
**kwargs
) -> AsyncGenerator[Dict[str, Any], None]:
"""Stream call LLM API"""
) -> AsyncGenerator[str, None]:
"""Stream call LLM API - yields raw SSE event lines
Yields:
str: Raw SSE event lines for direct forwarding
"""
body = self._build_body(model, messages, tools, stream=True, **kwargs)
# Accumulators for tool calls (need to collect from delta chunks)
accumulated_tool_calls = {}
print(f"[LLM] Starting stream_call for model: {model}")
print(f"[LLM] Messages count: {len(messages)}")
try:
async with httpx.AsyncClient(timeout=120.0) as client:
print(f"[LLM] Sending request to {self.api_url}")
async with client.stream(
"POST",
self.api_url,
headers=self._build_headers(),
json=body
) as response:
print(f"[LLM] Response status: {response.status_code}")
response.raise_for_status()
async for line in response.aiter_lines():
if not line.strip():
continue
if line.startswith("data: "):
data_str = line[6:]
if data_str == "[DONE]":
yield {"type": "done"}
continue
try:
chunk = json.loads(data_str)
except json.JSONDecodeError:
continue
if "choices" not in chunk:
continue
delta = chunk.get("choices", [{}])[0].get("delta", {})
# DeepSeek reasoner: use content if available, otherwise fall back to reasoning_content
content = delta.get("content")
reasoning = delta.get("reasoning_content", "")
if content and isinstance(content, str) and content.strip():
yield {"type": "content_delta", "content": content}
elif reasoning:
yield {"type": "content_delta", "content": reasoning}
# Accumulate tool calls from delta chunks (DeepSeek sends them incrementally)
tool_calls_delta = delta.get("tool_calls", [])
for tc in tool_calls_delta:
idx = tc.get("index", 0)
if idx not in accumulated_tool_calls:
accumulated_tool_calls[idx] = {"index": idx}
if "function" in tc:
if "function" not in accumulated_tool_calls[idx]:
accumulated_tool_calls[idx]["function"] = {"name": "", "arguments": ""}
if "name" in tc["function"]:
accumulated_tool_calls[idx]["function"]["name"] += tc["function"]["name"]
if "arguments" in tc["function"]:
accumulated_tool_calls[idx]["function"]["arguments"] += tc["function"]["arguments"]
if tool_calls_delta:
yield {"type": "tool_call_delta", "tool_call": tool_calls_delta}
# Check for finish_reason to signal end of stream
choice = chunk.get("choices", [{}])[0]
finish_reason = choice.get("finish_reason")
if finish_reason:
# Build final tool_calls list from accumulated chunks
final_tool_calls = list(accumulated_tool_calls.values()) if accumulated_tool_calls else None
yield {"type": "done", "tool_calls": final_tool_calls}
if line.strip():
yield line + "\n"
except httpx.HTTPStatusError as e:
# Return error as an event instead of raising
error_text = e.response.text if e.response else str(e)
yield {"type": "error", "error": f"HTTP {e.response.status_code}: {error_text}"}
status_code = e.response.status_code if e.response else "?"
print(f"[LLM] HTTP error: {status_code}")
yield f"event: error\ndata: {json.dumps({'content': f'HTTP {status_code}: Request failed'})}\n\n"
except Exception as e:
yield {"type": "error", "error": str(e)}
print(f"[LLM] Exception: {type(e).__name__}: {str(e)}")
yield f"event: error\ndata: {json.dumps({'content': str(e)})}\n\n"
# Global LLM client