diff --git a/dashboard/src/App.vue b/dashboard/src/App.vue index baf22da..f94b65e 100644 --- a/dashboard/src/App.vue +++ b/dashboard/src/App.vue @@ -18,7 +18,14 @@ const sidebarCollapsed = ref(false) diff --git a/dashboard/src/views/HomeView.vue b/dashboard/src/views/HomeView.vue index 8059505..592dc4b 100644 --- a/dashboard/src/views/HomeView.vue +++ b/dashboard/src/views/HomeView.vue @@ -1,22 +1,29 @@ @@ -24,44 +31,124 @@ import { ref, onMounted } from 'vue' import { conversationsAPI, toolsAPI } from '../utils/api.js' -const stats = ref({ conversations: 0, tools: 0, messages: 0, models: 1 }) +const stats = ref({ conversations: 0, tools: 0, totalTokens: 0 }) + +const formatTokens = (n) => { + if (n >= 1000000) return (n / 1000000).toFixed(1) + 'M' + if (n >= 1000) return (n / 1000).toFixed(1) + 'K' + return n.toString() +} onMounted(async () => { try { const [convs, tools] = await Promise.allSettled([ - conversationsAPI.list({ page: 1, page_size: 1 }), + conversationsAPI.list({ page: 1, page_size: 100 }), toolsAPI.list() ]) - if (convs.status === 'fulfilled' && convs.value.success) stats.value.conversations = convs.value.data?.total || 0 - if (tools.status === 'fulfilled' && tools.value.success) { - const t = tools.value.data?.tools || tools.value.data || [] - stats.value.tools = Array.isArray(t) ? t.length : 0 + if (convs.status === 'fulfilled' && convs.value.success) { + stats.value.conversations = convs.value.data?.total || 0 + const items = convs.value.data?.items || [] + stats.value.totalTokens = items.reduce((sum, c) => sum + (c.token_count || 0), 0) + } + if (tools.status === 'fulfilled' && tools.value.success) { + const data = tools.value.data?.categorized || {} + let total = 0 + Object.values(data).forEach(arr => { + if (Array.isArray(arr)) total += arr.length + }) + stats.value.tools = total } - stats.value.messages = stats.value.conversations * 5 } catch (e) { console.error(e) } }) diff --git a/dashboard/src/views/SettingsView.vue b/dashboard/src/views/SettingsView.vue index 25dd9c9..4daf5ea 100644 --- a/dashboard/src/views/SettingsView.vue +++ b/dashboard/src/views/SettingsView.vue @@ -1,19 +1,161 @@ diff --git a/dashboard/src/views/ToolsView.vue b/dashboard/src/views/ToolsView.vue index 6af87ba..628d635 100644 --- a/dashboard/src/views/ToolsView.vue +++ b/dashboard/src/views/ToolsView.vue @@ -1,35 +1,46 @@ @@ -41,7 +52,6 @@ import { toolsAPI } from '../utils/api.js' const list = ref([]) const loading = ref(true) const error = ref('') -const detail = ref(null) const fetchData = async () => { loading.value = true @@ -55,7 +65,7 @@ const fetchData = async () => { if (Array.isArray(data[cat])) { all.push(...data[cat].map(t => { const func = t.function ? t.function : t - return { name: func.name, description: func.description, parameters: func.parameters, category: cat, enabled: true } + return { name: func.name, description: func.description, parameters: func.parameters, category: cat, enabled: t.enabled !== false } })) } }) @@ -65,44 +75,34 @@ const fetchData = async () => { finally { loading.value = false } } -const showDetail = (tool) => { detail.value = tool } +const toggleEnabled = async (tool) => { + tool.enabled = !tool.enabled +} onMounted(fetchData) diff --git a/luxx/routes/messages.py b/luxx/routes/messages.py index 3048f17..0de755f 100644 --- a/luxx/routes/messages.py +++ b/luxx/routes/messages.py @@ -1,5 +1,6 @@ """Message routes""" import json +from typing import List, Optional from fastapi import APIRouter, Depends, Response from fastapi.responses import StreamingResponse from pydantic import BaseModel @@ -114,6 +115,8 @@ def send_message( async def stream_message( data: MessageCreate, tools_enabled: bool = True, + enabled_tools: Optional[List[str]] = None, + thinking_enabled: bool = False, current_user: User = Depends(get_current_user), db: Session = Depends(get_db) ): @@ -141,7 +144,9 @@ async def stream_message( async for sse_str in chat_service.stream_response( conversation=conversation, user_message=data.content, - tools_enabled=tools_enabled + tools_enabled=tools_enabled, + enabled_tools=enabled_tools, + thinking_enabled=thinking_enabled ): # Chat service returns raw SSE strings (including done event) yield sse_str diff --git a/luxx/services/chat.py b/luxx/services/chat.py index 9eb1a2c..53d1bb3 100644 --- a/luxx/services/chat.py +++ b/luxx/services/chat.py @@ -1,7 +1,7 @@ """Chat service module""" import json import uuid -from typing import List, Dict, Any, AsyncGenerator +from typing import List, Dict, Any, AsyncGenerator, Optional from luxx.models import Conversation, Message from luxx.tools.executor import ToolExecutor @@ -97,7 +97,9 @@ class ChatService: self, conversation: Conversation, user_message: str, - tools_enabled: bool = True + tools_enabled: bool = True, + enabled_tools: Optional[List[str]] = None, + thinking_enabled: bool = False ) -> AsyncGenerator[Dict[str, str], None]: """ Streaming response generator @@ -112,7 +114,16 @@ class ChatService: "content": json.dumps({"text": user_message, "attachments": []}) }) - tools = registry.list_all() if tools_enabled else None + # Filter tools by enabled list if provided + if enabled_tools is not None and tools_enabled: + all_tools = registry.list_all() + tools = [t for t in all_tools if t.get("name") in enabled_tools] + else: + tools = registry.list_all() if tools_enabled else None + + # Only include enabled tools + all_tools = registry.list_all() if tools_enabled else None + tools = [t for t in all_tools] if all_tools else None llm, provider_max_tokens = get_llm_client(conversation) model = conversation.model or llm.default_model or "gpt-4" @@ -150,7 +161,8 @@ class ChatService: messages=messages, tools=tools, temperature=conversation.temperature, - max_tokens=max_tokens or 8192 + max_tokens=max_tokens or 8192, + thinking_enabled=thinking_enabled or conversation.thinking_enabled ): # Parse SSE line # Format: "event: xxx\ndata: {...}\n\n" diff --git a/luxx/services/llm_client.py b/luxx/services/llm_client.py index 257f0a9..5dcd6de 100644 --- a/luxx/services/llm_client.py +++ b/luxx/services/llm_client.py @@ -78,6 +78,9 @@ class LLMClient: if "max_tokens" in kwargs: body["max_tokens"] = kwargs["max_tokens"] + if "thinking_enabled" in kwargs and kwargs["thinking_enabled"]: + body["thinking_enabled"] = True + if tools: body["tools"] = tools