Add L0 index and anchor UI updates

This commit is contained in:
2026-02-06 11:22:02 +08:00
parent c36efe6805
commit 44ca06f9b9
23 changed files with 1749 additions and 3898 deletions

View File

@@ -0,0 +1,251 @@
// ============================================================================
// atom-extraction.js - 30并发 + 首批错开 + 取消支持 + 进度回调
// ============================================================================
import { callLLM, parseJson } from './llm-service.js';
import { xbLog } from '../../../../core/debug-core.js';
import { filterText } from '../utils/text-filter.js';
const MODULE_ID = 'atom-extraction';
const CONCURRENCY = 10;
const RETRY_COUNT = 2;
const RETRY_DELAY = 500;
const DEFAULT_TIMEOUT = 20000;
const STAGGER_DELAY = 80; // 首批错开延迟ms
let batchCancelled = false;
export function cancelBatchExtraction() {
batchCancelled = true;
}
export function isBatchCancelled() {
return batchCancelled;
}
const SYSTEM_PROMPT = `你是叙事锚点提取器。从一轮对话(用户发言+角色回复中提取4-8个关键锚点。
只输出JSON
{"atoms":[{"t":"类型","s":"主体","v":"值","f":"来源"}]}
类型t
- emo: 情绪状态需要s主体
- loc: 地点/场景
- act: 关键动作需要s主体
- rev: 揭示/发现
- ten: 冲突/张力
- dec: 决定/承诺
规则:
- s: 主体(谁)
- v: 简洁值10字内
- f: "u"=用户发言中, "a"=角色回复中
- 只提取对未来检索有价值的锚点
- 无明显锚点返回空数组`;
function buildSemantic(atom, userName, aiName) {
const speaker = atom.f === 'u' ? userName : aiName;
const s = atom.s || speaker;
switch (atom.t) {
case 'emo': return `${s}感到${atom.v}`;
case 'loc': return `场景:${atom.v}`;
case 'act': return `${s}${atom.v}`;
case 'rev': return `揭示:${atom.v}`;
case 'ten': return `冲突:${atom.v}`;
case 'dec': return `${s}决定${atom.v}`;
default: return `${s} ${atom.v}`;
}
}
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
async function extractAtomsForRoundWithRetry(userMessage, aiMessage, aiFloor, options = {}) {
const { timeout = DEFAULT_TIMEOUT } = options;
if (!aiMessage?.mes?.trim()) return [];
const parts = [];
const userName = userMessage?.name || '用户';
const aiName = aiMessage.name || '角色';
if (userMessage?.mes?.trim()) {
const userText = filterText(userMessage.mes);
parts.push(`【用户:${userName}\n${userText}`);
}
const aiText = filterText(aiMessage.mes);
parts.push(`【角色:${aiName}\n${aiText}`);
const input = parts.join('\n\n---\n\n');
xbLog.info(MODULE_ID, `floor ${aiFloor} 发送输入 len=${input.length}`);
for (let attempt = 0; attempt <= RETRY_COUNT; attempt++) {
if (batchCancelled) return [];
try {
const response = await callLLM([
{ role: 'system', content: SYSTEM_PROMPT },
{ role: 'user', content: input },
], {
temperature: 0.2,
max_tokens: 500,
timeout,
});
if (!response || !String(response).trim()) {
xbLog.warn(MODULE_ID, `floor ${aiFloor} 解析失败:响应为空`);
if (attempt < RETRY_COUNT) {
await sleep(RETRY_DELAY);
continue;
}
return [];
}
let parsed;
try {
parsed = parseJson(response);
} catch (e) {
xbLog.warn(MODULE_ID, `floor ${aiFloor} 解析失败JSON 异常`);
if (attempt < RETRY_COUNT) {
await sleep(RETRY_DELAY);
continue;
}
return [];
}
if (!parsed?.atoms || !Array.isArray(parsed.atoms)) {
xbLog.warn(MODULE_ID, `floor ${aiFloor} 解析失败atoms 缺失`);
if (attempt < RETRY_COUNT) {
await sleep(RETRY_DELAY);
continue;
}
return [];
}
return parsed.atoms
.filter(a => a?.t && a?.v)
.map((a, idx) => ({
atomId: `atom-${aiFloor}-${idx}`,
floor: aiFloor,
type: a.t,
subject: a.s || null,
value: String(a.v).slice(0, 30),
source: a.f === 'u' ? 'user' : 'ai',
semantic: buildSemantic(a, userName, aiName),
}));
} catch (e) {
if (batchCancelled) return [];
if (attempt < RETRY_COUNT) {
xbLog.warn(MODULE_ID, `floor ${aiFloor}${attempt + 1}次失败,重试...`, e?.message);
await sleep(RETRY_DELAY * (attempt + 1));
continue;
}
xbLog.error(MODULE_ID, `floor ${aiFloor} 失败`, e);
return [];
}
}
return [];
}
/**
* 单轮配对提取(增量时使用)
*/
export async function extractAtomsForRound(userMessage, aiMessage, aiFloor, options = {}) {
return extractAtomsForRoundWithRetry(userMessage, aiMessage, aiFloor, options);
}
/**
* 批量提取(首批 staggered 启动)
* @param {Array} chat
* @param {Function} onProgress - (current, total, failed) => void
*/
export async function batchExtractAtoms(chat, onProgress) {
if (!chat?.length) return [];
batchCancelled = false;
const pairs = [];
for (let i = 0; i < chat.length; i++) {
if (!chat[i].is_user) {
const userMsg = (i > 0 && chat[i - 1]?.is_user) ? chat[i - 1] : null;
pairs.push({ userMsg, aiMsg: chat[i], aiFloor: i });
}
}
if (!pairs.length) return [];
const allAtoms = [];
let completed = 0;
let failed = 0;
for (let i = 0; i < pairs.length; i += CONCURRENCY) {
if (batchCancelled) {
xbLog.info(MODULE_ID, `批量提取已取消 (${completed}/${pairs.length})`);
break;
}
const batch = pairs.slice(i, i + CONCURRENCY);
// ★ 首批 staggered 启动:错开 80ms 发送
if (i === 0) {
const promises = batch.map((pair, idx) => (async () => {
await sleep(idx * STAGGER_DELAY);
if (batchCancelled) return;
try {
const atoms = await extractAtomsForRoundWithRetry(pair.userMsg, pair.aiMsg, pair.aiFloor, { timeout: DEFAULT_TIMEOUT });
if (atoms?.length) {
allAtoms.push(...atoms);
} else {
failed++;
}
} catch {
failed++;
}
completed++;
onProgress?.(completed, pairs.length, failed);
})());
await Promise.all(promises);
} else {
// 后续批次正常并行
const promises = batch.map(pair =>
extractAtomsForRoundWithRetry(pair.userMsg, pair.aiMsg, pair.aiFloor, { timeout: DEFAULT_TIMEOUT })
.then(atoms => {
if (batchCancelled) return;
if (atoms?.length) {
allAtoms.push(...atoms);
} else {
failed++;
}
completed++;
onProgress?.(completed, pairs.length, failed);
})
.catch(() => {
if (batchCancelled) return;
failed++;
completed++;
onProgress?.(completed, pairs.length, failed);
})
);
await Promise.all(promises);
}
// 批次间隔
if (i + CONCURRENCY < pairs.length && !batchCancelled) {
await sleep(30);
}
}
const status = batchCancelled ? '已取消' : '完成';
xbLog.info(MODULE_ID, `批量提取${status}: ${allAtoms.length} atoms, ${completed}/${pairs.length}, ${failed} 失败`);
return allAtoms;
}

View File

@@ -0,0 +1,72 @@
// ═══════════════════════════════════════════════════════════════════════════
// vector/llm/llm-service.js
// ═══════════════════════════════════════════════════════════════════════════
import { xbLog } from '../../../../core/debug-core.js';
const MODULE_ID = 'vector-llm-service';
// 唯一 ID 计数器
let callCounter = 0;
function getStreamingModule() {
const mod = window.xiaobaixStreamingGeneration;
return mod?.xbgenrawCommand ? mod : null;
}
function generateUniqueId(prefix = 'llm') {
callCounter = (callCounter + 1) % 100000;
return `${prefix}-${callCounter}-${Date.now().toString(36)}`;
}
function b64UrlEncode(str) {
const utf8 = new TextEncoder().encode(String(str));
let bin = '';
utf8.forEach(b => bin += String.fromCharCode(b));
return btoa(bin).replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, '');
}
/**
* 统一LLM调用 - 走酒馆后端(非流式)
*/
export async function callLLM(messages, options = {}) {
const {
temperature = 0.2,
max_tokens = 500,
} = options;
const mod = getStreamingModule();
if (!mod) throw new Error('生成模块未加载');
const top64 = b64UrlEncode(JSON.stringify(messages));
// ★ 每次调用用唯一 ID避免 session 冲突
const uniqueId = generateUniqueId('l0');
const args = {
as: 'user',
nonstream: 'true',
top64,
id: uniqueId,
temperature: String(temperature),
max_tokens: String(max_tokens),
};
try {
// 非流式直接返回结果
const result = await mod.xbgenrawCommand(args, '');
return String(result ?? '');
} catch (e) {
xbLog.error(MODULE_ID, 'LLM调用失败', e);
throw e;
}
}
export function parseJson(text) {
if (!text) return null;
let s = text.trim().replace(/^```(?:json)?\s*/i, '').replace(/\s*```$/i, '').trim();
try { return JSON.parse(s); } catch { }
const i = s.indexOf('{'), j = s.lastIndexOf('}');
if (i !== -1 && j > i) try { return JSON.parse(s.slice(i, j + 1)); } catch { }
return null;
}

View File

@@ -0,0 +1,102 @@
// ═══════════════════════════════════════════════════════════════════════════
// query-expansion.js - 完整输入,不截断
// ═══════════════════════════════════════════════════════════════════════════
import { callLLM, parseJson } from './llm-service.js';
import { xbLog } from '../../../../core/debug-core.js';
import { filterText } from '../utils/text-filter.js';
const MODULE_ID = 'query-expansion';
const SESSION_ID = 'xb6';
const SYSTEM_PROMPT = `你是检索词生成器。根据最近对话,输出用于检索历史剧情的关键词。
只输出JSON
{"e":["显式人物/地名"],"i":["隐含人物/情绪/话题"],"q":["检索短句"]}
规则:
- e: 对话中明确提到的人名/地名1-4个
- i: 推断出的相关人物/情绪/话题1-5个
- q: 用于向量检索的短句2-3个每个15字内
- 关注:正在讨论什么、涉及谁、情绪氛围`;
/**
* Query Expansion
* @param {Array} messages - 完整消息数组最后2-3轮
*/
export async function expandQuery(messages, options = {}) {
const { timeout = 6000 } = options;
if (!messages?.length) {
return { entities: [], implicit: [], queries: [] };
}
// 完整格式化,不截断
const input = messages.map(m => {
const speaker = m.is_user ? '用户' : (m.name || '角色');
const text = filterText(m.mes || '').trim();
return `${speaker}\n${text}`;
}).join('\n\n');
const T0 = performance.now();
try {
const response = await callLLM([
{ role: 'system', content: SYSTEM_PROMPT },
{ role: 'user', content: input },
], {
temperature: 0.15,
max_tokens: 250,
timeout,
sessionId: SESSION_ID,
});
const parsed = parseJson(response);
if (!parsed) {
xbLog.warn(MODULE_ID, 'JSON解析失败', response?.slice(0, 200));
return { entities: [], implicit: [], queries: [] };
}
const result = {
entities: Array.isArray(parsed.e) ? parsed.e.slice(0, 5) : [],
implicit: Array.isArray(parsed.i) ? parsed.i.slice(0, 6) : [],
queries: Array.isArray(parsed.q) ? parsed.q.slice(0, 4) : [],
};
xbLog.info(MODULE_ID, `完成 (${Math.round(performance.now() - T0)}ms) e=${result.entities.length} i=${result.implicit.length} q=${result.queries.length}`);
return result;
} catch (e) {
xbLog.error(MODULE_ID, '调用失败', e);
return { entities: [], implicit: [], queries: [] };
}
}
// 缓存
const cache = new Map();
const CACHE_TTL = 300000;
function hashMessages(messages) {
const text = messages.slice(-2).map(m => (m.mes || '').slice(0, 100)).join('|');
let h = 0;
for (let i = 0; i < text.length; i++) h = ((h << 5) - h + text.charCodeAt(i)) | 0;
return h.toString(36);
}
export async function expandQueryCached(messages, options = {}) {
const key = hashMessages(messages);
const cached = cache.get(key);
if (cached && Date.now() - cached.time < CACHE_TTL) return cached.result;
const result = await expandQuery(messages, options);
if (result.entities.length || result.queries.length) {
if (cache.size > 50) cache.delete(cache.keys().next().value);
cache.set(key, { result, time: Date.now() });
}
return result;
}
export function buildSearchText(expansion) {
return [...(expansion.entities || []), ...(expansion.implicit || []), ...(expansion.queries || [])]
.filter(Boolean).join(' ');
}

View File

@@ -0,0 +1,59 @@
// ═══════════════════════════════════════════════════════════════════════════
// siliconflow.js - 仅保留 Embedding
// ═══════════════════════════════════════════════════════════════════════════
const BASE_URL = 'https://api.siliconflow.cn';
const EMBEDDING_MODEL = 'BAAI/bge-m3';
export function getApiKey() {
try {
const raw = localStorage.getItem('summary_panel_config');
if (raw) {
const parsed = JSON.parse(raw);
return parsed.vector?.online?.key || null;
}
} catch { }
return null;
}
export async function embed(texts, options = {}) {
if (!texts?.length) return [];
const key = getApiKey();
if (!key) throw new Error('未配置硅基 API Key');
const { timeout = 30000, signal } = options;
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), timeout);
try {
const response = await fetch(`${BASE_URL}/v1/embeddings`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${key}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: EMBEDDING_MODEL,
input: texts,
}),
signal: signal || controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
const errorText = await response.text().catch(() => '');
throw new Error(`Embedding ${response.status}: ${errorText.slice(0, 200)}`);
}
const data = await response.json();
return (data.data || [])
.sort((a, b) => a.index - b.index)
.map(item => Array.isArray(item.embedding) ? item.embedding : Array.from(item.embedding));
} finally {
clearTimeout(timeoutId);
}
}
export { EMBEDDING_MODEL as MODELS };