// ═══════════════════════════════════════════════════════════════════════════ // Story Summary - Recall Engine (v7 - Two-Stage: L0 Locate → L1 Evidence) // // 命名规范: // - 存储层用 L0/L1/L2/L3(StateAtom/Chunk/Event/Fact) // - 召回层用语义名称:anchor/evidence/event/constraint // // 架构: // 阶段 1: Query Build(确定性,无 LLM) // 阶段 2: Round 1 Dense Retrieval(L0 + L2) // 阶段 3: Query Refinement(用已命中记忆增强) // 阶段 4: Round 2 Dense Retrieval(L0 + L2) // 阶段 5: Lexical Retrieval // 阶段 6: Floor W-RRF Fusion + Rerank + L1 配对 // 阶段 7: L1 配对组装(L0 → top-1 AI L1 + top-1 USER L1) // 阶段 8: Causation Trace // ═══════════════════════════════════════════════════════════════════════════ import { getAllEventVectors, getChunksByFloors, getMeta, getChunkVectorsByIds } from '../storage/chunk-store.js'; import { getAllStateVectors, getStateAtoms } from '../storage/state-store.js'; import { getEngineFingerprint, embed } from '../utils/embedder.js'; import { xbLog } from '../../../../core/debug-core.js'; import { getContext } from '../../../../../../../extensions.js'; import { buildQueryBundle, refineQueryBundle } from './query-builder.js'; import { getLexicalIndex, searchLexicalIndex } from './lexical-index.js'; import { rerankChunks } from '../llm/reranker.js'; import { createMetrics, calcSimilarityStats } from './metrics.js'; const MODULE_ID = 'recall'; // ═══════════════════════════════════════════════════════════════════════════ // 配置 // ═══════════════════════════════════════════════════════════════════════════ const CONFIG = { // 窗口 LAST_MESSAGES_K: 2, // Anchor (L0 StateAtoms) ANCHOR_MIN_SIMILARITY: 0.58, // Event (L2 Events) EVENT_CANDIDATE_MAX: 100, EVENT_SELECT_MAX: 50, EVENT_MIN_SIMILARITY: 0.55, EVENT_MMR_LAMBDA: 0.72, // W-RRF 融合(L0-only) RRF_K: 60, RRF_W_DENSE: 1.0, RRF_W_LEX: 0.9, FUSION_CAP: 60, // Rerank(floor-level) RERANK_TOP_N: 20, RERANK_MIN_SCORE: 0.15, // 因果链 CAUSAL_CHAIN_MAX_DEPTH: 10, CAUSAL_INJECT_MAX: 30, }; // ═══════════════════════════════════════════════════════════════════════════ // 工具函数 // ═══════════════════════════════════════════════════════════════════════════ /** * 计算余弦相似度 * @param {number[]} a * @param {number[]} b * @returns {number} */ function cosineSimilarity(a, b) { if (!a?.length || !b?.length || a.length !== b.length) return 0; let dot = 0, nA = 0, nB = 0; for (let i = 0; i < a.length; i++) { dot += a[i] * b[i]; nA += a[i] * a[i]; nB += b[i] * b[i]; } return nA && nB ? dot / (Math.sqrt(nA) * Math.sqrt(nB)) : 0; } /** * 标准化字符串 * @param {string} s * @returns {string} */ function normalize(s) { return String(s || '') .normalize('NFKC') .replace(/[\u200B-\u200D\uFEFF]/g, '') .trim() .toLowerCase(); } /** * 获取最近消息 * @param {object[]} chat * @param {number} count * @param {boolean} excludeLastAi * @returns {object[]} */ function getLastMessages(chat, count = 2, excludeLastAi = false) { if (!chat?.length) return []; let messages = [...chat]; if (excludeLastAi && messages.length > 0 && !messages[messages.length - 1]?.is_user) { messages = messages.slice(0, -1); } return messages.slice(-count); } // ═══════════════════════════════════════════════════════════════════════════ // MMR 选择算法 // ═══════════════════════════════════════════════════════════════════════════ /** * Maximal Marginal Relevance 选择 * @param {object[]} candidates * @param {number} k * @param {number} lambda * @param {Function} getVector * @param {Function} getScore * @returns {object[]} */ function mmrSelect(candidates, k, lambda, getVector, getScore) { const selected = []; const ids = new Set(); while (selected.length < k && candidates.length) { let best = null; let bestScore = -Infinity; for (const c of candidates) { if (ids.has(c._id)) continue; const rel = getScore(c); let div = 0; if (selected.length) { const vC = getVector(c); if (vC?.length) { for (const s of selected) { const sim = cosineSimilarity(vC, getVector(s)); if (sim > div) div = sim; } } } const score = lambda * rel - (1 - lambda) * div; if (score > bestScore) { bestScore = score; best = c; } } if (!best) break; selected.push(best); ids.add(best._id); } return selected; } // ═══════════════════════════════════════════════════════════════════════════ // [Anchors] L0 StateAtoms 检索 // ═══════════════════════════════════════════════════════════════════════════ /** * 检索语义锚点 * @param {number[]} queryVector * @param {object} vectorConfig * @param {object|null} metrics * @returns {Promise<{hits: object[], floors: Set}>} */ async function recallAnchors(queryVector, vectorConfig, metrics) { const { chatId } = getContext(); if (!chatId || !queryVector?.length) { return { hits: [], floors: new Set() }; } const meta = await getMeta(chatId); const fp = getEngineFingerprint(vectorConfig); if (meta.fingerprint && meta.fingerprint !== fp) { xbLog.warn(MODULE_ID, 'Anchor fingerprint 不匹配'); return { hits: [], floors: new Set() }; } const stateVectors = await getAllStateVectors(chatId); if (!stateVectors.length) { return { hits: [], floors: new Set() }; } const atomsList = getStateAtoms(); const atomMap = new Map(atomsList.map(a => [a.atomId, a])); const scored = stateVectors .map(sv => { const atom = atomMap.get(sv.atomId); if (!atom) return null; return { atomId: sv.atomId, floor: sv.floor, similarity: cosineSimilarity(queryVector, sv.vector), atom, }; }) .filter(Boolean) .filter(s => s.similarity >= CONFIG.ANCHOR_MIN_SIMILARITY) .sort((a, b) => b.similarity - a.similarity); const floors = new Set(scored.map(s => s.floor)); if (metrics) { metrics.anchor.matched = scored.length; metrics.anchor.floorsHit = floors.size; metrics.anchor.topHits = scored.slice(0, 5).map(s => ({ floor: s.floor, semantic: s.atom?.semantic?.slice(0, 50), similarity: Math.round(s.similarity * 1000) / 1000, })); } return { hits: scored, floors }; } // ═══════════════════════════════════════════════════════════════════════════ // [Events] L2 Events 检索 // ═══════════════════════════════════════════════════════════════════════════ /** * 检索事件 * @param {number[]} queryVector * @param {object[]} allEvents * @param {object} vectorConfig * @param {string[]} focusEntities * @param {object|null} metrics * @returns {Promise} */ async function recallEvents(queryVector, allEvents, vectorConfig, focusEntities, metrics) { const { chatId } = getContext(); if (!chatId || !queryVector?.length || !allEvents?.length) { return []; } const meta = await getMeta(chatId); const fp = getEngineFingerprint(vectorConfig); if (meta.fingerprint && meta.fingerprint !== fp) { xbLog.warn(MODULE_ID, 'Event fingerprint 不匹配'); return []; } const eventVectors = await getAllEventVectors(chatId); const vectorMap = new Map(eventVectors.map(v => [v.eventId, v.vector])); if (!vectorMap.size) { return []; } const focusSet = new Set((focusEntities || []).map(normalize)); const scored = allEvents.map(event => { const v = vectorMap.get(event.id); const baseSim = v ? cosineSimilarity(queryVector, v) : 0; const participants = (event.participants || []).map(p => normalize(p)); const hasEntityMatch = participants.some(p => focusSet.has(p)); return { _id: event.id, event, similarity: baseSim, _hasEntityMatch: hasEntityMatch, vector: v, }; }); if (metrics) { metrics.event.inStore = allEvents.length; } let candidates = scored .filter(s => s.similarity >= CONFIG.EVENT_MIN_SIMILARITY) .sort((a, b) => b.similarity - a.similarity) .slice(0, CONFIG.EVENT_CANDIDATE_MAX); if (metrics) { metrics.event.considered = candidates.length; } // 实体过滤 if (focusSet.size > 0) { const beforeFilter = candidates.length; candidates = candidates.filter(c => { if (c.similarity >= 0.85) return true; return c._hasEntityMatch; }); if (metrics) { metrics.event.entityFilter = { focusEntities: focusEntities || [], before: beforeFilter, after: candidates.length, filtered: beforeFilter - candidates.length, }; } } // MMR 选择 const selected = mmrSelect( candidates, CONFIG.EVENT_SELECT_MAX, CONFIG.EVENT_MMR_LAMBDA, c => c.vector, c => c.similarity ); let directCount = 0; let relatedCount = 0; const results = selected.map(s => { const recallType = s._hasEntityMatch ? 'DIRECT' : 'RELATED'; if (recallType === 'DIRECT') directCount++; else relatedCount++; return { event: s.event, similarity: s.similarity, _recallType: recallType, }; }); if (metrics) { metrics.event.selected = results.length; metrics.event.byRecallType = { direct: directCount, related: relatedCount, causal: 0, lexical: 0 }; metrics.event.similarityDistribution = calcSimilarityStats(results.map(r => r.similarity)); } return results; } // ═══════════════════════════════════════════════════════════════════════════ // [Causation] 因果链追溯 // ═══════════════════════════════════════════════════════════════════════════ /** * 构建事件索引 * @param {object[]} allEvents * @returns {Map} */ function buildEventIndex(allEvents) { const map = new Map(); for (const e of allEvents || []) { if (e?.id) map.set(e.id, e); } return map; } /** * 追溯因果链 * @param {object[]} eventHits * @param {Map} eventIndex * @param {number} maxDepth * @returns {{results: object[], maxDepth: number}} */ function traceCausation(eventHits, eventIndex, maxDepth = CONFIG.CAUSAL_CHAIN_MAX_DEPTH) { const out = new Map(); const idRe = /^evt-\d+$/; let maxActualDepth = 0; function visit(parentId, depth, chainFrom) { if (depth > maxDepth) return; if (!idRe.test(parentId)) return; const ev = eventIndex.get(parentId); if (!ev) return; if (depth > maxActualDepth) maxActualDepth = depth; const existed = out.get(parentId); if (!existed) { out.set(parentId, { event: ev, depth, chainFrom: [chainFrom] }); } else { if (depth < existed.depth) existed.depth = depth; if (!existed.chainFrom.includes(chainFrom)) existed.chainFrom.push(chainFrom); } for (const next of (ev.causedBy || [])) { visit(String(next || '').trim(), depth + 1, chainFrom); } } for (const r of eventHits || []) { const rid = r?.event?.id; if (!rid) continue; for (const cid of (r.event?.causedBy || [])) { visit(String(cid || '').trim(), 1, rid); } } const results = Array.from(out.values()) .sort((a, b) => { const refDiff = b.chainFrom.length - a.chainFrom.length; if (refDiff !== 0) return refDiff; return a.depth - b.depth; }) .slice(0, CONFIG.CAUSAL_INJECT_MAX); return { results, maxDepth: maxActualDepth }; } // ═══════════════════════════════════════════════════════════════════════════ // [W-RRF] 加权倒数排名融合(L0-only) // ═══════════════════════════════════════════════════════════════════════════ /** * @typedef {object} RankedItem * @property {string} id - 唯一标识符 * @property {number} score - 该路的原始分数 */ /** * W-RRF 加权倒数排名融合(floor 粒度) * * @param {{id: number, score: number}[]} denseRank - Dense 路(floor → max cosine,降序) * @param {{id: number, score: number}[]} lexRank - Lexical 路(floor → max bm25,降序) * @param {number} cap - 输出上限 * @returns {{top: {id: number, fusionScore: number}[], totalUnique: number}} */ function fuseByFloor(denseRank, lexRank, cap = CONFIG.FUSION_CAP) { const k = CONFIG.RRF_K; const wD = CONFIG.RRF_W_DENSE; const wL = CONFIG.RRF_W_LEX; const buildRankMap = (ranked) => { const map = new Map(); for (let i = 0; i < ranked.length; i++) { const id = ranked[i].id; if (!map.has(id)) map.set(id, i); } return map; }; const denseMap = buildRankMap(denseRank || []); const lexMap = buildRankMap(lexRank || []); const allIds = new Set([...denseMap.keys(), ...lexMap.keys()]); const totalUnique = allIds.size; const scored = []; for (const id of allIds) { let score = 0; if (denseMap.has(id)) score += wD / (k + denseMap.get(id)); if (lexMap.has(id)) score += wL / (k + lexMap.get(id)); scored.push({ id, fusionScore: score }); } scored.sort((a, b) => b.fusionScore - a.fusionScore); return { top: scored.slice(0, cap), totalUnique }; } // ═══════════════════════════════════════════════════════════════════════════ // [Stage 6] Floor 融合 + Rerank + L1 配对 // ═══════════════════════════════════════════════════════════════════════════ /** * Floor 粒度融合 + Rerank + L1 配对 * * @param {object[]} anchorHits - L0 dense 命中(Round 2) * @param {number[]} queryVector - 查询向量(v1) * @param {string} rerankQuery - rerank 查询文本(纯自然语言) * @param {object} lexicalResult - 词法检索结果 * @param {object} metrics * @returns {Promise<{l0Selected: object[], l1ByFloor: Map}>} */ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexicalResult, metrics) { const { chatId, chat, name1, name2 } = getContext(); if (!chatId) return { l0Selected: [], l1ByFloor: new Map() }; const T_Start = performance.now(); // ───────────────────────────────────────────────────────────────── // 6a. Dense floor rank(每个 floor 取 max cosine) // ───────────────────────────────────────────────────────────────── const denseFloorMap = new Map(); for (const a of (anchorHits || [])) { const cur = denseFloorMap.get(a.floor) || 0; if (a.similarity > cur) denseFloorMap.set(a.floor, a.similarity); } const denseFloorRank = [...denseFloorMap.entries()] .sort((a, b) => b[1] - a[1]) .map(([floor, score]) => ({ id: floor, score })); // ───────────────────────────────────────────────────────────────── // 6b. Lexical floor rank(chunkScores → floor 聚合 + USER→AI 映射 + 预过滤) // ───────────────────────────────────────────────────────────────── const atomFloorSet = new Set(getStateAtoms().map(a => a.floor)); const lexFloorScores = new Map(); for (const { chunkId, score } of (lexicalResult?.chunkScores || [])) { const match = chunkId?.match(/^c-(\d+)-/); if (!match) continue; let floor = parseInt(match[1], 10); // USER floor → AI floor 映射 if (chat?.[floor]?.is_user) { const aiFloor = floor + 1; if (aiFloor < chat.length && !chat[aiFloor]?.is_user) { floor = aiFloor; } else { continue; } } // 预过滤:必须有 L0 atoms if (!atomFloorSet.has(floor)) continue; const cur = lexFloorScores.get(floor) || 0; if (score > cur) lexFloorScores.set(floor, score); } const lexFloorRank = [...lexFloorScores.entries()] .sort((a, b) => b[1] - a[1]) .map(([floor, score]) => ({ id: floor, score })); // ───────────────────────────────────────────────────────────────── // 6c. Floor W-RRF 融合 // ───────────────────────────────────────────────────────────────── const T_Fusion_Start = performance.now(); const { top: fusedFloors, totalUnique } = fuseByFloor(denseFloorRank, lexFloorRank, CONFIG.FUSION_CAP); const fusionTime = Math.round(performance.now() - T_Fusion_Start); if (metrics) { metrics.fusion.denseFloors = denseFloorRank.length; metrics.fusion.lexFloors = lexFloorRank.length; metrics.fusion.totalUnique = totalUnique; metrics.fusion.afterCap = fusedFloors.length; metrics.fusion.time = fusionTime; metrics.evidence.floorCandidates = fusedFloors.length; } if (fusedFloors.length === 0) { if (metrics) { metrics.evidence.floorsSelected = 0; metrics.evidence.l0Collected = 0; metrics.evidence.l1Pulled = 0; metrics.evidence.l1Attached = 0; metrics.evidence.l1CosineTime = 0; metrics.evidence.rerankApplied = false; } return { l0Selected: [], l1ByFloor: new Map() }; } // ───────────────────────────────────────────────────────────────── // 6d. 拉取 L1 chunks + cosine 打分 // ───────────────────────────────────────────────────────────────── const floorsToFetch = new Set(); for (const f of fusedFloors) { floorsToFetch.add(f.id); const userFloor = f.id - 1; if (userFloor >= 0 && chat?.[userFloor]?.is_user) { floorsToFetch.add(userFloor); } } const l1ScoredByFloor = await pullAndScoreL1(chatId, [...floorsToFetch], queryVector, chat); if (metrics) { let totalPulled = 0; for (const [key, chunks] of l1ScoredByFloor) { if (key === '_cosineTime') continue; totalPulled += chunks.length; } metrics.evidence.l1Pulled = totalPulled; metrics.evidence.l1CosineTime = l1ScoredByFloor._cosineTime || 0; } // ───────────────────────────────────────────────────────────────── // 6e. 构建 rerank documents(每个 floor: USER chunks + AI chunks) // ───────────────────────────────────────────────────────────────── const rerankCandidates = []; for (const f of fusedFloors) { const aiFloor = f.id; const userFloor = aiFloor - 1; const aiChunks = l1ScoredByFloor.get(aiFloor) || []; const userChunks = (userFloor >= 0 && chat?.[userFloor]?.is_user) ? (l1ScoredByFloor.get(userFloor) || []) : []; const parts = []; const userName = chat?.[userFloor]?.name || name1 || '用户'; const aiName = chat?.[aiFloor]?.name || name2 || '角色'; if (userChunks.length > 0) { parts.push(`${userName}:${userChunks.map(c => c.text).join(' ')}`); } if (aiChunks.length > 0) { parts.push(`${aiName}:${aiChunks.map(c => c.text).join(' ')}`); } const text = parts.join('\n'); if (!text.trim()) continue; rerankCandidates.push({ floor: aiFloor, text, fusionScore: f.fusionScore, }); } // ───────────────────────────────────────────────────────────────── // 6f. 并发 Rerank // ───────────────────────────────────────────────────────────────── const T_Rerank_Start = performance.now(); const reranked = await rerankChunks(rerankQuery, rerankCandidates, { topN: CONFIG.RERANK_TOP_N, minScore: CONFIG.RERANK_MIN_SCORE, }); const rerankTime = Math.round(performance.now() - T_Rerank_Start); if (metrics) { metrics.evidence.rerankApplied = true; metrics.evidence.beforeRerank = rerankCandidates.length; metrics.evidence.afterRerank = reranked.length; metrics.evidence.rerankFailed = reranked.some(c => c._rerankFailed); metrics.evidence.rerankTime = rerankTime; metrics.timing.evidenceRerank = rerankTime; const scores = reranked.map(c => c._rerankScore || 0).filter(s => s > 0); if (scores.length > 0) { scores.sort((a, b) => a - b); metrics.evidence.rerankScores = { min: Number(scores[0].toFixed(3)), max: Number(scores[scores.length - 1].toFixed(3)), mean: Number((scores.reduce((a, b) => a + b, 0) / scores.length).toFixed(3)), }; } // document 平均长度 if (rerankCandidates.length > 0) { const totalLen = rerankCandidates.reduce((s, c) => s + (c.text?.length || 0), 0); metrics.evidence.rerankDocAvgLength = Math.round(totalLen / rerankCandidates.length); } } // ───────────────────────────────────────────────────────────────── // 6g. 收集 L0 atoms + L1 top-1 配对 // ───────────────────────────────────────────────────────────────── const atomsList = getStateAtoms(); const atomsByFloor = new Map(); for (const atom of atomsList) { if (typeof atom.floor !== 'number' || atom.floor < 0) continue; if (!atomsByFloor.has(atom.floor)) atomsByFloor.set(atom.floor, []); atomsByFloor.get(atom.floor).push(atom); } const l0Selected = []; const l1ByFloor = new Map(); let contextPairsAdded = 0; for (const item of reranked) { const floor = item.floor; const rerankScore = item._rerankScore || 0; const denseSim = denseFloorMap.get(floor) || 0; // 收集该 floor 所有 L0 atoms,共享 floor 的 rerankScore const floorAtoms = atomsByFloor.get(floor) || []; for (const atom of floorAtoms) { l0Selected.push({ id: `anchor-${atom.atomId}`, atomId: atom.atomId, floor: atom.floor, similarity: denseSim, rerankScore, atom, text: atom.semantic || '', }); } // L1 top-1 配对(cosine 最高) const aiChunks = l1ScoredByFloor.get(floor) || []; const userFloor = floor - 1; const userChunks = (userFloor >= 0 && chat?.[userFloor]?.is_user) ? (l1ScoredByFloor.get(userFloor) || []) : []; const aiTop1 = aiChunks.length > 0 ? aiChunks.reduce((best, c) => (c._cosineScore > best._cosineScore ? c : best)) : null; const userTop1 = userChunks.length > 0 ? userChunks.reduce((best, c) => (c._cosineScore > best._cosineScore ? c : best)) : null; if (userTop1) contextPairsAdded++; l1ByFloor.set(floor, { aiTop1, userTop1 }); } // ───────────────────────────────────────────────────────────────── // 6h. Metrics // ───────────────────────────────────────────────────────────────── if (metrics) { metrics.evidence.floorsSelected = reranked.length; metrics.evidence.l0Collected = l0Selected.length; let totalAttached = 0; for (const [, pair] of l1ByFloor) { if (pair.aiTop1) totalAttached++; if (pair.userTop1) totalAttached++; } metrics.evidence.l1Attached = totalAttached; metrics.evidence.contextPairsAdded = contextPairsAdded; } const totalTime = Math.round(performance.now() - T_Start); if (metrics) { metrics.timing.evidenceRetrieval = Math.max(0, totalTime - fusionTime - rerankTime); } xbLog.info(MODULE_ID, `Evidence: ${denseFloorRank.length} dense floors + ${lexFloorRank.length} lex floors → fusion=${fusedFloors.length} → rerank=${reranked.length} floors → L0=${l0Selected.length} L1 attached=${metrics?.evidence?.l1Attached || 0} (${totalTime}ms)` ); return { l0Selected, l1ByFloor }; } // [L1] 拉取 + Cosine 打分(并发子任务) // ═══════════════════════════════════════════════════════════════════════════ /** * 从 IndexedDB 拉取指定楼层的 L1 chunks + 向量,用 queryVector cosine 打分 * * @param {string} chatId * @param {number[]} floors - 需要拉取的楼层列表 * @param {number[]} queryVector - 查询向量(v1) * @param {object[]} chat - 聊天消息数组 * @returns {Promise>} floor → scored chunks(带 _cosineScore) */ async function pullAndScoreL1(chatId, floors, queryVector, chat) { const T0 = performance.now(); /** @type {Map} */ const result = new Map(); if (!chatId || !floors?.length || !queryVector?.length) { result._cosineTime = 0; return result; } // 拉取 chunks let dbChunks = []; try { dbChunks = await getChunksByFloors(chatId, floors); } catch (e) { xbLog.warn(MODULE_ID, 'L1 chunks 拉取失败', e); result._cosineTime = Math.round(performance.now() - T0); return result; } if (!dbChunks.length) { result._cosineTime = Math.round(performance.now() - T0); return result; } // 拉取向量 const chunkIds = dbChunks.map(c => c.chunkId); let chunkVectors = []; try { chunkVectors = await getChunkVectorsByIds(chatId, chunkIds); } catch (e) { xbLog.warn(MODULE_ID, 'L1 向量拉取失败', e); result._cosineTime = Math.round(performance.now() - T0); return result; } const vectorMap = new Map(chunkVectors.map(v => [v.chunkId, v.vector])); // Cosine 打分 + 按楼层分组 for (const chunk of dbChunks) { const vec = vectorMap.get(chunk.chunkId); const cosineScore = vec?.length ? cosineSimilarity(queryVector, vec) : 0; const scored = { chunkId: chunk.chunkId, floor: chunk.floor, chunkIdx: chunk.chunkIdx, speaker: chunk.speaker, isUser: chunk.isUser, text: chunk.text, _cosineScore: cosineScore, }; if (!result.has(chunk.floor)) { result.set(chunk.floor, []); } result.get(chunk.floor).push(scored); } // 每楼层按 cosine 降序排序 for (const [, chunks] of result) { chunks.sort((a, b) => b._cosineScore - a._cosineScore); } result._cosineTime = Math.round(performance.now() - T0); xbLog.info(MODULE_ID, `L1 pull: ${floors.length} floors → ${dbChunks.length} chunks → scored (${result._cosineTime}ms)` ); return result; } // ═══════════════════════════════════════════════════════════════════════════ // 主函数 // ═══════════════════════════════════════════════════════════════════════════ /** * 执行记忆召回 * * @param {object[]} allEvents - 所有事件(L2) * @param {object} vectorConfig - 向量配置 * @param {object} options * @param {boolean} options.excludeLastAi * @param {string|null} options.pendingUserMessage * @returns {Promise} */ export async function recallMemory(allEvents, vectorConfig, options = {}) { const T0 = performance.now(); const { chat } = getContext(); const { pendingUserMessage = null, excludeLastAi = false } = options; const metrics = createMetrics(); if (!allEvents?.length) { metrics.anchor.needRecall = false; metrics.timing.total = Math.round(performance.now() - T0); return { events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [], focusEntities: [], elapsed: metrics.timing.total, logText: 'No events.', metrics, }; } metrics.anchor.needRecall = true; // ═══════════════════════════════════════════════════════════════════ // 阶段 1: Query Build // ═══════════════════════════════════════════════════════════════════ const T_Build_Start = performance.now(); const lastMessages = getLastMessages(chat, CONFIG.LAST_MESSAGES_K, excludeLastAi); const bundle = buildQueryBundle(lastMessages, pendingUserMessage); metrics.query.buildTime = Math.round(performance.now() - T_Build_Start); metrics.anchor.focusEntities = bundle.focusEntities; if (metrics.query?.lengths) { metrics.query.lengths.v0Chars = String(bundle.queryText_v0 || '').length; metrics.query.lengths.v1Chars = null; metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v0 || '').length; } xbLog.info(MODULE_ID, `Query Build: focus=[${bundle.focusEntities.join(',')}] lexTerms=[${bundle.lexicalTerms.slice(0, 5).join(',')}]` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 2: Round 1 Dense Retrieval // ═══════════════════════════════════════════════════════════════════ let queryVector_v0; try { const [vec] = await embed([bundle.queryText_v0], vectorConfig, { timeout: 10000 }); queryVector_v0 = vec; } catch (e1) { xbLog.warn(MODULE_ID, 'Round 1 向量化失败,500ms 后重试', e1); await new Promise(r => setTimeout(r, 500)); try { const [vec] = await embed([bundle.queryText_v0], vectorConfig, { timeout: 15000 }); queryVector_v0 = vec; } catch (e2) { xbLog.error(MODULE_ID, 'Round 1 向量化重试仍失败', e2); metrics.timing.total = Math.round(performance.now() - T0); return { events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [], focusEntities: bundle.focusEntities, elapsed: metrics.timing.total, logText: 'Embedding failed (round 1, after retry).', metrics, }; } } if (!queryVector_v0?.length) { metrics.timing.total = Math.round(performance.now() - T0); return { events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [], focusEntities: bundle.focusEntities, elapsed: metrics.timing.total, logText: 'Empty query vector (round 1).', metrics, }; } const T_R1_Anchor_Start = performance.now(); const { hits: anchorHits_v0 } = await recallAnchors(queryVector_v0, vectorConfig, null); const r1AnchorTime = Math.round(performance.now() - T_R1_Anchor_Start); const T_R1_Event_Start = performance.now(); const eventHits_v0 = await recallEvents(queryVector_v0, allEvents, vectorConfig, bundle.focusEntities, null); const r1EventTime = Math.round(performance.now() - T_R1_Event_Start); xbLog.info(MODULE_ID, `Round 1: anchors=${anchorHits_v0.length} events=${eventHits_v0.length} (anchor=${r1AnchorTime}ms event=${r1EventTime}ms)` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 3: Query Refinement // ═══════════════════════════════════════════════════════════════════ const T_Refine_Start = performance.now(); refineQueryBundle(bundle, anchorHits_v0, eventHits_v0); metrics.query.refineTime = Math.round(performance.now() - T_Refine_Start); metrics.anchor.focusEntities = bundle.focusEntities; if (metrics.query?.lengths) { metrics.query.lengths.v1Chars = bundle.queryText_v1 == null ? null : String(bundle.queryText_v1).length; metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v1 || bundle.queryText_v0 || '').length; } xbLog.info(MODULE_ID, `Refinement: focus=[${bundle.focusEntities.join(',')}] hasV1=${!!bundle.queryText_v1} (${metrics.query.refineTime}ms)` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 4: Round 2 Dense Retrieval // ═══════════════════════════════════════════════════════════════════ const queryTextFinal = bundle.queryText_v1 || bundle.queryText_v0; let queryVector_v1; try { const [vec] = await embed([queryTextFinal], vectorConfig, { timeout: 10000 }); queryVector_v1 = vec; } catch (e) { xbLog.warn(MODULE_ID, 'Round 2 向量化失败,降级使用 Round 1 向量', e); queryVector_v1 = queryVector_v0; } const T_R2_Anchor_Start = performance.now(); const { hits: anchorHits, floors: anchorFloors_dense } = await recallAnchors(queryVector_v1, vectorConfig, metrics); metrics.timing.anchorSearch = Math.round(performance.now() - T_R2_Anchor_Start); const T_R2_Event_Start = performance.now(); let eventHits = await recallEvents(queryVector_v1, allEvents, vectorConfig, bundle.focusEntities, metrics); metrics.timing.eventRetrieval = Math.round(performance.now() - T_R2_Event_Start); xbLog.info(MODULE_ID, `Round 2: anchors=${anchorHits.length} floors=${anchorFloors_dense.size} events=${eventHits.length}` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 5: Lexical Retrieval // ═══════════════════════════════════════════════════════════════════ const T_Lex_Start = performance.now(); let lexicalResult = { atomIds: [], atomFloors: new Set(), chunkIds: [], chunkFloors: new Set(), eventIds: [], chunkScores: [], searchTime: 0, }; try { const index = await getLexicalIndex(); if (index) { lexicalResult = searchLexicalIndex(index, bundle.lexicalTerms); } } catch (e) { xbLog.warn(MODULE_ID, 'Lexical 检索失败', e); } const lexTime = Math.round(performance.now() - T_Lex_Start); if (metrics) { metrics.lexical.atomHits = lexicalResult.atomIds.length; metrics.lexical.chunkHits = lexicalResult.chunkIds.length; metrics.lexical.eventHits = lexicalResult.eventIds.length; metrics.lexical.searchTime = lexTime; metrics.lexical.terms = bundle.lexicalTerms.slice(0, 10); } // 合并 L2 events(lexical 命中但 dense 未命中的 events) const existingEventIds = new Set(eventHits.map(e => e.event?.id).filter(Boolean)); const eventIndex = buildEventIndex(allEvents); let lexicalEventCount = 0; for (const eid of lexicalResult.eventIds) { if (!existingEventIds.has(eid)) { const ev = eventIndex.get(eid); if (ev) { eventHits.push({ event: ev, similarity: 0, _recallType: 'LEXICAL', }); existingEventIds.add(eid); lexicalEventCount++; } } } if (metrics && lexicalEventCount > 0) { metrics.event.byRecallType.lexical = lexicalEventCount; metrics.event.selected += lexicalEventCount; } xbLog.info(MODULE_ID, `Lexical: chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length} mergedEvents=+${lexicalEventCount} (${lexTime}ms)` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 6: Floor 粒度融合 + Rerank + L1 配对 // ═══════════════════════════════════════════════════════════════════ const { l0Selected, l1ByFloor } = await locateAndPullEvidence( anchorHits, queryVector_v1, bundle.rerankQuery, lexicalResult, metrics ); // ═══════════════════════════════════════════════════════════════════ // 阶段 7: Causation Trace // ═══════════════════════════════════════════════════════════════════ const { results: causalMap, maxDepth: causalMaxDepth } = traceCausation(eventHits, eventIndex); const recalledIdSet = new Set(eventHits.map(x => x?.event?.id).filter(Boolean)); const causalChain = causalMap .filter(x => x?.event?.id && !recalledIdSet.has(x.event.id)) .map(x => ({ event: x.event, similarity: 0, _recallType: 'CAUSAL', _causalDepth: x.depth, chainFrom: x.chainFrom, })); if (metrics.event.byRecallType) { metrics.event.byRecallType.causal = causalChain.length; } metrics.event.causalChainDepth = causalMaxDepth; metrics.event.causalCount = causalChain.length; // ═══════════════════════════════════════════════════════════════════ // 完成 // ═══════════════════════════════════════════════════════════════════ metrics.timing.total = Math.round(performance.now() - T0); metrics.event.entityNames = bundle.focusEntities; metrics.event.entitiesUsed = bundle.focusEntities.length; console.group('%c[Recall v7]', 'color: #7c3aed; font-weight: bold'); console.log(`Total: ${metrics.timing.total}ms`); console.log(`Query Build: ${metrics.query.buildTime}ms | Refine: ${metrics.query.refineTime}ms`); console.log(`Focus: [${bundle.focusEntities.join(', ')}]`); console.log(`Round 2 Anchors: ${anchorHits.length} hits → ${anchorFloors_dense.size} floors`); console.log(`Lexical: chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length}`); console.log(`Fusion (floor): dense=${metrics.fusion.denseFloors} lex=${metrics.fusion.lexFloors} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`); console.log(`Floor Rerank: ${metrics.evidence.beforeRerank || 0} → ${metrics.evidence.floorsSelected || 0} floors → L0=${metrics.evidence.l0Collected || 0} (${metrics.evidence.rerankTime || 0}ms)`); console.log(`L1: ${metrics.evidence.l1Pulled || 0} pulled → ${metrics.evidence.l1Attached || 0} attached (${metrics.evidence.l1CosineTime || 0}ms)`); console.log(`Events: ${eventHits.length} hits, ${causalChain.length} causal`); console.groupEnd(); return { events: eventHits, causalChain, l0Selected, l1ByFloor, focusEntities: bundle.focusEntities, elapsed: metrics.timing.total, metrics, }; }