Fix garbled text checks and L0 LLM handling

This commit is contained in:
2026-02-06 15:08:20 +08:00
parent 44ca06f9b9
commit 56e30bfe02
9 changed files with 163 additions and 48 deletions

View File

@@ -1,10 +1,12 @@
// ═══════════════════════════════════════════════════════════════════════════
// ═══════════════════════════════════════════════════════════════════════════
// vector/llm/llm-service.js
// ═══════════════════════════════════════════════════════════════════════════
import { xbLog } from '../../../../core/debug-core.js';
import { getVectorConfig } from '../../data/config.js';
const MODULE_ID = 'vector-llm-service';
const SILICONFLOW_API_URL = 'https://api.siliconflow.cn';
const DEFAULT_L0_MODEL = 'Qwen/Qwen3-8B';
// 唯一 ID 计数器
let callCounter = 0;
@@ -36,11 +38,17 @@ export async function callLLM(messages, options = {}) {
} = options;
const mod = getStreamingModule();
if (!mod) throw new Error('生成模块未加载');
if (!mod) throw new Error('Streaming module not ready');
const cfg = getVectorConfig();
const apiKey = cfg?.online?.key || '';
if (!apiKey) {
throw new Error('L0 requires siliconflow API key');
}
const top64 = b64UrlEncode(JSON.stringify(messages));
// 每次调用用唯一 ID避免 session 冲突
// 每次调用用唯一 ID避免 session 冲突
const uniqueId = generateUniqueId('l0');
const args = {
@@ -50,6 +58,10 @@ export async function callLLM(messages, options = {}) {
id: uniqueId,
temperature: String(temperature),
max_tokens: String(max_tokens),
api: 'openai',
apiurl: SILICONFLOW_API_URL,
apipassword: apiKey,
model: DEFAULT_L0_MODEL,
};
try {