feat: 增加模型选择

This commit is contained in:
ViperEkura 2026-03-24 16:25:32 +08:00
parent 46c8f85e9b
commit 8fdb119dcf
3 changed files with 39 additions and 9 deletions

View File

@ -11,8 +11,10 @@ from . import load_config
bp = Blueprint("api", __name__) bp = Blueprint("api", __name__)
cfg = load_config() cfg = load_config()
GLM_API_URL = cfg.get("api_url") API_URL = cfg.get("api_url")
GLM_API_KEY = cfg["api_key"] API_KEY = cfg["api_key"]
MODELS = cfg.get("models", [])
DEFAULT_MODEL = cfg.get("default_model", "glm-5")
# -- Helpers ---------------------------------------------- # -- Helpers ----------------------------------------------
@ -57,6 +59,14 @@ def build_glm_messages(conv):
return msgs return msgs
# -- Models API -------------------------------------------
@bp.route("/api/models", methods=["GET"])
def list_models():
"""获取可用模型列表"""
return ok(MODELS)
# -- Conversation CRUD ------------------------------------ # -- Conversation CRUD ------------------------------------
@bp.route("/api/conversations", methods=["GET", "POST"]) @bp.route("/api/conversations", methods=["GET", "POST"])
@ -68,7 +78,7 @@ def conversation_list():
id=str(uuid.uuid4()), id=str(uuid.uuid4()),
user_id=user.id, user_id=user.id,
title=d.get("title", ""), title=d.get("title", ""),
model=d.get("model", "glm-5"), model=d.get("model", DEFAULT_MODEL),
system_prompt=d.get("system_prompt", ""), system_prompt=d.get("system_prompt", ""),
temperature=d.get("temperature", 1.0), temperature=d.get("temperature", 1.0),
max_tokens=d.get("max_tokens", 65536), max_tokens=d.get("max_tokens", 65536),
@ -183,8 +193,8 @@ def _call_glm(conv, stream=False):
if stream: if stream:
body["stream"] = True body["stream"] = True
return requests.post( return requests.post(
GLM_API_URL, API_URL,
headers={"Content-Type": "application/json", "Authorization": f"Bearer {GLM_API_KEY}"}, headers={"Content-Type": "application/json", "Authorization": f"Bearer {API_KEY}"},
json=body, stream=stream, timeout=120, json=body, stream=stream, timeout=120,
) )

View File

@ -13,6 +13,12 @@ async function request(url, options = {}) {
return data return data
} }
export const modelApi = {
list() {
return request('/models')
},
}
export const conversationApi = { export const conversationApi = {
list(cursor, limit = 20) { list(cursor, limit = 20) {
const params = new URLSearchParams() const params = new URLSearchParams()

View File

@ -21,7 +21,7 @@
<div class="form-group"> <div class="form-group">
<label>模型</label> <label>模型</label>
<select v-model="form.model"> <select v-model="form.model">
<option value="glm-5">GLM-5</option> <option v-for="m in models" :key="m.id" :value="m.id">{{ m.name }}</option>
</select> </select>
</div> </div>
@ -94,7 +94,8 @@
</template> </template>
<script setup> <script setup>
import { reactive, watch } from 'vue' import { reactive, ref, watch, onMounted } from 'vue'
import { modelApi } from '../api'
const props = defineProps({ const props = defineProps({
visible: { type: Boolean, default: false }, visible: { type: Boolean, default: false },
@ -103,19 +104,30 @@ const props = defineProps({
const emit = defineEmits(['close', 'save']) const emit = defineEmits(['close', 'save'])
const models = ref([])
const form = reactive({ const form = reactive({
title: '', title: '',
model: 'glm-5', model: '',
system_prompt: '', system_prompt: '',
temperature: 1.0, temperature: 1.0,
max_tokens: 65536, max_tokens: 65536,
thinking_enabled: false, thinking_enabled: false,
}) })
async function loadModels() {
try {
const res = await modelApi.list()
models.value = res.data || []
} catch (e) {
console.error('Failed to load models:', e)
}
}
watch(() => props.visible, (val) => { watch(() => props.visible, (val) => {
if (val && props.conversation) { if (val && props.conversation) {
form.title = props.conversation.title || '' form.title = props.conversation.title || ''
form.model = props.conversation.model || 'default' form.model = props.conversation.model || ''
form.system_prompt = props.conversation.system_prompt || '' form.system_prompt = props.conversation.system_prompt || ''
form.temperature = props.conversation.temperature ?? 1.0 form.temperature = props.conversation.temperature ?? 1.0
form.max_tokens = props.conversation.max_tokens ?? 65536 form.max_tokens = props.conversation.max_tokens ?? 65536
@ -123,6 +135,8 @@ watch(() => props.visible, (val) => {
} }
}) })
onMounted(loadModels)
function save() { function save() {
emit('save', { ...form }) emit('save', { ...form })
emit('close') emit('close')