// ═══════════════════════════════════════════════════════════════════════════ // Story Summary - Recall Engine (v6 - Deterministic Query + Hybrid + W-RRF) // // 命名规范: // - 存储层用 L0/L1/L2/L3(StateAtom/Chunk/Event/Fact) // - 召回层用语义名称:anchor/evidence/event/constraint // // 架构: // 阶段 1: Query Build(确定性,无 LLM) // 阶段 2: Round 1 Dense Retrieval // 阶段 3: Query Refinement(用已命中记忆增强) // 阶段 4: Round 2 Dense Retrieval // 阶段 5: Lexical Retrieval + Merge // 阶段 6: Evidence Pull + W-RRF Fusion + Cap100 + Rerank // 阶段 7: Causation Trace // ═══════════════════════════════════════════════════════════════════════════ import { getAllEventVectors, getChunksByFloors, getMeta, getChunkVectorsByIds } from '../storage/chunk-store.js'; import { getAllStateVectors, getStateAtoms } from '../storage/state-store.js'; import { getEngineFingerprint, embed } from '../utils/embedder.js'; import { xbLog } from '../../../../core/debug-core.js'; import { getContext } from '../../../../../../../extensions.js'; import { buildQueryBundle, refineQueryBundle } from './query-builder.js'; import { getLexicalIndex, searchLexicalIndex } from './lexical-index.js'; import { rerankChunks } from '../llm/reranker.js'; import { createMetrics, calcSimilarityStats } from './metrics.js'; const MODULE_ID = 'recall'; // ═══════════════════════════════════════════════════════════════════════════ // 配置 // ═══════════════════════════════════════════════════════════════════════════ const CONFIG = { // 窗口 LAST_MESSAGES_K: 2, // Anchor (L0 StateAtoms) ANCHOR_MIN_SIMILARITY: 0.58, // Evidence (L1 Chunks) Dense 粗筛 EVIDENCE_DENSE_COARSE_MAX: 200, // Event (L2 Events) EVENT_CANDIDATE_MAX: 100, EVENT_SELECT_MAX: 50, EVENT_MIN_SIMILARITY: 0.55, EVENT_MMR_LAMBDA: 0.72, // W-RRF 融合 RRF_K: 60, RRF_W_DENSE: 1.0, RRF_W_LEX: 0.9, RRF_W_ANCHOR: 0.7, FUSION_CAP: 100, // Rerank RERANK_TOP_N: 50, RERANK_MIN_SCORE: 0.15, // 因果链 CAUSAL_CHAIN_MAX_DEPTH: 10, CAUSAL_INJECT_MAX: 30, }; // ═══════════════════════════════════════════════════════════════════════════ // 工具函数 // ═══════════════════════════════════════════════════════════════════════════ /** * 计算余弦相似度 * @param {number[]} a * @param {number[]} b * @returns {number} */ function cosineSimilarity(a, b) { if (!a?.length || !b?.length || a.length !== b.length) return 0; let dot = 0, nA = 0, nB = 0; for (let i = 0; i < a.length; i++) { dot += a[i] * b[i]; nA += a[i] * a[i]; nB += b[i] * b[i]; } return nA && nB ? dot / (Math.sqrt(nA) * Math.sqrt(nB)) : 0; } /** * 标准化字符串 * @param {string} s * @returns {string} */ function normalize(s) { return String(s || '') .normalize('NFKC') .replace(/[\u200B-\u200D\uFEFF]/g, '') .trim() .toLowerCase(); } /** * 获取最近消息 * @param {object[]} chat * @param {number} count * @param {boolean} excludeLastAi * @returns {object[]} */ function getLastMessages(chat, count = 2, excludeLastAi = false) { if (!chat?.length) return []; let messages = [...chat]; if (excludeLastAi && messages.length > 0 && !messages[messages.length - 1]?.is_user) { messages = messages.slice(0, -1); } return messages.slice(-count); } // ═══════════════════════════════════════════════════════════════════════════ // MMR 选择算法 // ═══════════════════════════════════════════════════════════════════════════ /** * Maximal Marginal Relevance 选择 * @param {object[]} candidates * @param {number} k * @param {number} lambda * @param {Function} getVector * @param {Function} getScore * @returns {object[]} */ function mmrSelect(candidates, k, lambda, getVector, getScore) { const selected = []; const ids = new Set(); while (selected.length < k && candidates.length) { let best = null; let bestScore = -Infinity; for (const c of candidates) { if (ids.has(c._id)) continue; const rel = getScore(c); let div = 0; if (selected.length) { const vC = getVector(c); if (vC?.length) { for (const s of selected) { const sim = cosineSimilarity(vC, getVector(s)); if (sim > div) div = sim; } } } const score = lambda * rel - (1 - lambda) * div; if (score > bestScore) { bestScore = score; best = c; } } if (!best) break; selected.push(best); ids.add(best._id); } return selected; } // ═══════════════════════════════════════════════════════════════════════════ // [Anchors] L0 StateAtoms 检索 // ═══════════════════════════════════════════════════════════════════════════ /** * 检索语义锚点 * @param {number[]} queryVector * @param {object} vectorConfig * @param {object|null} metrics * @returns {Promise<{hits: object[], floors: Set}>} */ async function recallAnchors(queryVector, vectorConfig, metrics) { const { chatId } = getContext(); if (!chatId || !queryVector?.length) { return { hits: [], floors: new Set() }; } const meta = await getMeta(chatId); const fp = getEngineFingerprint(vectorConfig); if (meta.fingerprint && meta.fingerprint !== fp) { xbLog.warn(MODULE_ID, 'Anchor fingerprint 不匹配'); return { hits: [], floors: new Set() }; } const stateVectors = await getAllStateVectors(chatId); if (!stateVectors.length) { return { hits: [], floors: new Set() }; } const atomsList = getStateAtoms(); const atomMap = new Map(atomsList.map(a => [a.atomId, a])); const scored = stateVectors .map(sv => { const atom = atomMap.get(sv.atomId); if (!atom) return null; return { atomId: sv.atomId, floor: sv.floor, similarity: cosineSimilarity(queryVector, sv.vector), atom, }; }) .filter(Boolean) .filter(s => s.similarity >= CONFIG.ANCHOR_MIN_SIMILARITY) .sort((a, b) => b.similarity - a.similarity); const floors = new Set(scored.map(s => s.floor)); if (metrics) { metrics.anchor.matched = scored.length; metrics.anchor.floorsHit = floors.size; metrics.anchor.topHits = scored.slice(0, 5).map(s => ({ floor: s.floor, semantic: s.atom?.semantic?.slice(0, 50), similarity: Math.round(s.similarity * 1000) / 1000, })); } return { hits: scored, floors }; } // ═══════════════════════════════════════════════════════════════════════════ // [Events] L2 Events 检索(无 entity bonus) // ═══════════════════════════════════════════════════════════════════════════ /** * 检索事件 * @param {number[]} queryVector * @param {object[]} allEvents * @param {object} vectorConfig * @param {string[]} focusEntities * @param {object|null} metrics * @returns {Promise} */ async function recallEvents(queryVector, allEvents, vectorConfig, focusEntities, metrics) { const { chatId } = getContext(); if (!chatId || !queryVector?.length || !allEvents?.length) { return []; } const meta = await getMeta(chatId); const fp = getEngineFingerprint(vectorConfig); if (meta.fingerprint && meta.fingerprint !== fp) { xbLog.warn(MODULE_ID, 'Event fingerprint 不匹配'); return []; } const eventVectors = await getAllEventVectors(chatId); const vectorMap = new Map(eventVectors.map(v => [v.eventId, v.vector])); if (!vectorMap.size) { return []; } const focusSet = new Set((focusEntities || []).map(normalize)); const scored = allEvents.map(event => { const v = vectorMap.get(event.id); const baseSim = v ? cosineSimilarity(queryVector, v) : 0; const participants = (event.participants || []).map(p => normalize(p)); const hasEntityMatch = participants.some(p => focusSet.has(p)); return { _id: event.id, event, similarity: baseSim, _hasEntityMatch: hasEntityMatch, vector: v, }; }); if (metrics) { metrics.event.inStore = allEvents.length; } let candidates = scored .filter(s => s.similarity >= CONFIG.EVENT_MIN_SIMILARITY) .sort((a, b) => b.similarity - a.similarity) .slice(0, CONFIG.EVENT_CANDIDATE_MAX); if (metrics) { metrics.event.considered = candidates.length; } // 实体过滤 if (focusSet.size > 0) { const beforeFilter = candidates.length; candidates = candidates.filter(c => { if (c.similarity >= 0.85) return true; return c._hasEntityMatch; }); if (metrics) { metrics.event.entityFilter = { focusEntities: focusEntities || [], before: beforeFilter, after: candidates.length, filtered: beforeFilter - candidates.length, }; } } // MMR 选择 const selected = mmrSelect( candidates, CONFIG.EVENT_SELECT_MAX, CONFIG.EVENT_MMR_LAMBDA, c => c.vector, c => c.similarity ); let directCount = 0; let relatedCount = 0; const results = selected.map(s => { const recallType = s._hasEntityMatch ? 'DIRECT' : 'RELATED'; if (recallType === 'DIRECT') directCount++; else relatedCount++; return { event: s.event, similarity: s.similarity, _recallType: recallType, }; }); if (metrics) { metrics.event.selected = results.length; metrics.event.byRecallType = { direct: directCount, related: relatedCount, causal: 0, lexical: 0 }; metrics.event.similarityDistribution = calcSimilarityStats(results.map(r => r.similarity)); } return results; } // ═══════════════════════════════════════════════════════════════════════════ // [Causation] 因果链追溯 // ═══════════════════════════════════════════════════════════════════════════ /** * 构建事件索引 * @param {object[]} allEvents * @returns {Map} */ function buildEventIndex(allEvents) { const map = new Map(); for (const e of allEvents || []) { if (e?.id) map.set(e.id, e); } return map; } /** * 追溯因果链 * @param {object[]} eventHits * @param {Map} eventIndex * @param {number} maxDepth * @returns {{results: object[], maxDepth: number}} */ function traceCausation(eventHits, eventIndex, maxDepth = CONFIG.CAUSAL_CHAIN_MAX_DEPTH) { const out = new Map(); const idRe = /^evt-\d+$/; let maxActualDepth = 0; function visit(parentId, depth, chainFrom) { if (depth > maxDepth) return; if (!idRe.test(parentId)) return; const ev = eventIndex.get(parentId); if (!ev) return; if (depth > maxActualDepth) maxActualDepth = depth; const existed = out.get(parentId); if (!existed) { out.set(parentId, { event: ev, depth, chainFrom: [chainFrom] }); } else { if (depth < existed.depth) existed.depth = depth; if (!existed.chainFrom.includes(chainFrom)) existed.chainFrom.push(chainFrom); } for (const next of (ev.causedBy || [])) { visit(String(next || '').trim(), depth + 1, chainFrom); } } for (const r of eventHits || []) { const rid = r?.event?.id; if (!rid) continue; for (const cid of (r.event?.causedBy || [])) { visit(String(cid || '').trim(), 1, rid); } } const results = Array.from(out.values()) .sort((a, b) => { const refDiff = b.chainFrom.length - a.chainFrom.length; if (refDiff !== 0) return refDiff; return a.depth - b.depth; }) .slice(0, CONFIG.CAUSAL_INJECT_MAX); return { results, maxDepth: maxActualDepth }; } // ═══════════════════════════════════════════════════════════════════════════ // [W-RRF] 加权倒数排名融合 // ═══════════════════════════════════════════════════════════════════════════ /** * @typedef {object} RankedItem * @property {string} chunkId - chunk 的唯一标识符 * @property {number} score - 该路的原始分数(用于日志,不参与 RRF 计算) */ /** * W-RRF 融合三路 chunk 候选 * * @param {RankedItem[]} denseRank - Dense 路(cosine 降序) * @param {RankedItem[]} lexRank - Lexical 路(MiniSearch score 降序) * @param {RankedItem[]} anchorRank - Anchor 路(anchor similarity 降序) * @param {number} cap - 输出上限 * @returns {{top: {chunkId: string, fusionScore: number}[], totalUnique: number}} */ function fuseChunkCandidates(denseRank, lexRank, anchorRank, cap = CONFIG.FUSION_CAP) { const k = CONFIG.RRF_K; const wD = CONFIG.RRF_W_DENSE; const wL = CONFIG.RRF_W_LEX; const wA = CONFIG.RRF_W_ANCHOR; // 构建 rank map: chunkId → 0-based rank const buildRankMap = (ranked) => { const map = new Map(); for (let i = 0; i < ranked.length; i++) { const id = ranked[i].chunkId; if (!map.has(id)) map.set(id, i); } return map; }; const denseMap = buildRankMap(denseRank || []); const lexMap = buildRankMap(lexRank || []); const anchorMap = buildRankMap(anchorRank || []); // 收集所有 chunkId(去重) const allIds = new Set([ ...denseMap.keys(), ...lexMap.keys(), ...anchorMap.keys(), ]); // ★ 修复 E:记录去重后的总数 const totalUnique = allIds.size; // 计算融合分数 const scored = []; for (const id of allIds) { let score = 0; if (denseMap.has(id)) { score += wD / (k + denseMap.get(id)); } if (lexMap.has(id)) { score += wL / (k + lexMap.get(id)); } if (anchorMap.has(id)) { score += wA / (k + anchorMap.get(id)); } scored.push({ chunkId: id, fusionScore: score }); } // 按融合分数降序,取前 cap 个 scored.sort((a, b) => b.fusionScore - a.fusionScore); return { top: scored.slice(0, cap), totalUnique, }; } // ═══════════════════════════════════════════════════════════════════════════ // [Evidence] L1 Chunks 拉取 + 融合 + Rerank // ═══════════════════════════════════════════════════════════════════════════ /** * 统计 evidence 类型构成 * @param {object[]} chunks * @returns {{anchorVirtual: number, chunkReal: number}} */ function countEvidenceByType(chunks) { let anchorVirtual = 0; let chunkReal = 0; for (const c of chunks || []) { if (c.isAnchorVirtual) anchorVirtual++; else chunkReal++; } return { anchorVirtual, chunkReal }; } /** * 拉取 evidence + W-RRF 融合 + Cap100 + Rerank * * @param {object[]} anchorHits - L0 命中 * @param {Set} anchorFloors - 锚点命中楼层(含 lexical 扩展) * @param {number[]} queryVector - 查询向量 * @param {string} rerankQuery - rerank 查询文本 * @param {object} lexicalResult - 词法检索结果 * @param {object} metrics * @returns {Promise} */ async function pullAndFuseEvidence(anchorHits, anchorFloors, queryVector, rerankQuery, lexicalResult, metrics) { const { chatId } = getContext(); if (!chatId) return []; const T_Start = performance.now(); // ───────────────────────────────────────────────────────────────── // 6a. 构建 Anchor Virtual Chunks(来自 L0) // ───────────────────────────────────────────────────────────────── const anchorVirtualChunks = (anchorHits || []).map(a => ({ chunkId: `anchor-${a.atomId}`, floor: a.floor, chunkIdx: -1, speaker: '📌', isUser: false, text: a.atom?.semantic || '', similarity: a.similarity, isAnchorVirtual: true, _atom: a.atom, })); // ───────────────────────────────────────────────────────────────── // 6b. 拉取真实 L1 Chunks(从 anchorFloors) // ───────────────────────────────────────────────────────────────── const floorArray = Array.from(anchorFloors); let dbChunks = []; try { if (floorArray.length > 0) { dbChunks = await getChunksByFloors(chatId, floorArray); } } catch (e) { xbLog.warn(MODULE_ID, '从 DB 拉取 chunks 失败', e); } // ───────────────────────────────────────────────────────────────── // 6c. Dense 粗筛(对真实 chunks 按 queryVector 排序) // ───────────────────────────────────────────────────────────────── let denseCoarseChunks = []; if (dbChunks.length > 0 && queryVector?.length) { const chunkIds = dbChunks.map(c => c.chunkId); let chunkVectors = []; try { chunkVectors = await getChunkVectorsByIds(chatId, chunkIds); } catch (e) { xbLog.warn(MODULE_ID, 'L1 向量获取失败', e); } const vectorMap = new Map(chunkVectors.map(v => [v.chunkId, v.vector])); denseCoarseChunks = dbChunks .map(c => { const vec = vectorMap.get(c.chunkId); if (!vec?.length) return null; return { ...c, isAnchorVirtual: false, similarity: cosineSimilarity(queryVector, vec), }; }) .filter(Boolean) .sort((a, b) => b.similarity - a.similarity) .slice(0, CONFIG.EVIDENCE_DENSE_COARSE_MAX); } // ───────────────────────────────────────────────────────────────── // 6d. 构建三路排名 // ───────────────────────────────────────────────────────────────── // Dense 路:anchorVirtual + denseCoarse,按 similarity 排序 const denseRank = [ ...anchorVirtualChunks.map(c => ({ chunkId: c.chunkId, score: c.similarity })), ...denseCoarseChunks.map(c => ({ chunkId: c.chunkId, score: c.similarity })), ].sort((a, b) => b.score - a.score); // Lexical 路:从 lexicalResult.chunkScores const lexRank = (lexicalResult?.chunkScores || []) .sort((a, b) => b.score - a.score) .map(cs => ({ chunkId: cs.chunkId, score: cs.score })); // Anchor 路:anchorVirtual 按 similarity 排序 const anchorRank = anchorVirtualChunks .map(c => ({ chunkId: c.chunkId, score: c.similarity })) .sort((a, b) => b.score - a.score); // ───────────────────────────────────────────────────────────────── // 6e. W-RRF 融合 + Cap100 // ───────────────────────────────────────────────────────────────── const T_Fusion_Start = performance.now(); const { top: fusionResult } = fuseChunkCandidates(denseRank, lexRank, anchorRank, CONFIG.FUSION_CAP); const fusionChunkIds = new Set(fusionResult.map(f => f.chunkId)); const fusionTime = Math.round(performance.now() - T_Fusion_Start); // ───────────────────────────────────────────────────────────────── // 6f. 构建最终候选 chunk 对象列表(用于 rerank) // ───────────────────────────────────────────────────────────────── // 构建 chunkId → chunk 对象的映射 const chunkObjectMap = new Map(); for (const c of anchorVirtualChunks) { chunkObjectMap.set(c.chunkId, c); } for (const c of denseCoarseChunks) { if (!chunkObjectMap.has(c.chunkId)) { chunkObjectMap.set(c.chunkId, c); } } // Lexical 命中的 chunks 可能不在 denseCoarse 里,需要从 dbChunks 补充 const dbChunkMap = new Map(dbChunks.map(c => [c.chunkId, c])); for (const cs of (lexicalResult?.chunkScores || [])) { if (fusionChunkIds.has(cs.chunkId) && !chunkObjectMap.has(cs.chunkId)) { const dbChunk = dbChunkMap.get(cs.chunkId); if (dbChunk) { chunkObjectMap.set(cs.chunkId, { ...dbChunk, isAnchorVirtual: false, similarity: 0, }); } } } // 按 fusionScore 排序的候选列表 const rerankCandidates = fusionResult .map(f => { const chunk = chunkObjectMap.get(f.chunkId); if (!chunk) return null; return { ...chunk, _fusionScore: f.fusionScore, }; }) .filter(Boolean); // ───────────────────────────────────────────────────────────────── // 更新 metrics // ───────────────────────────────────────────────────────────────── if (metrics) { metrics.evidence.floorsFromAnchors = floorArray.length; metrics.evidence.chunkTotal = dbChunks.length; metrics.evidence.denseCoarse = denseCoarseChunks.length; metrics.fusion.denseCount = denseRank.length; metrics.fusion.lexCount = lexRank.length; metrics.fusion.anchorCount = anchorRank.length; metrics.fusion.totalUnique = fusionResult.length + (denseRank.length + lexRank.length + anchorRank.length - fusionResult.length); metrics.fusion.afterCap = rerankCandidates.length; metrics.fusion.time = fusionTime; metrics.evidence.merged = rerankCandidates.length; metrics.evidence.mergedByType = countEvidenceByType(rerankCandidates); } // ───────────────────────────────────────────────────────────────── // 6g. Rerank // ───────────────────────────────────────────────────────────────── if (rerankCandidates.length === 0) { if (metrics) { metrics.evidence.rerankApplied = false; metrics.evidence.selected = 0; metrics.evidence.selectedByType = { anchorVirtual: 0, chunkReal: 0 }; } return []; } const T_Rerank_Start = performance.now(); const reranked = await rerankChunks(rerankQuery, rerankCandidates, { topN: CONFIG.RERANK_TOP_N, minScore: CONFIG.RERANK_MIN_SCORE, }); const rerankTime = Math.round(performance.now() - T_Rerank_Start); if (metrics) { metrics.evidence.rerankApplied = true; metrics.evidence.beforeRerank = rerankCandidates.length; metrics.evidence.afterRerank = reranked.length; metrics.evidence.selected = reranked.length; metrics.evidence.selectedByType = countEvidenceByType(reranked); metrics.evidence.rerankTime = rerankTime; metrics.timing.evidenceRerank = rerankTime; const scores = reranked.map(c => c._rerankScore || 0).filter(s => s > 0); if (scores.length > 0) { scores.sort((a, b) => a - b); metrics.evidence.rerankScores = { min: Number(scores[0].toFixed(3)), max: Number(scores[scores.length - 1].toFixed(3)), mean: Number((scores.reduce((a, b) => a + b, 0) / scores.length).toFixed(3)), }; } } const totalTime = Math.round(performance.now() - T_Start); metrics.timing.evidenceRetrieval = Math.max(0, totalTime - fusionTime - rerankTime); xbLog.info(MODULE_ID, `Evidence: ${dbChunks.length} L1 → dense=${denseCoarseChunks.length} lex=${lexRank.length} → fusion=${rerankCandidates.length} → rerank=${reranked.length} (${totalTime}ms)` ); return reranked; } // ═══════════════════════════════════════════════════════════════════════════ // 主函数 // ═══════════════════════════════════════════════════════════════════════════ /** * 执行记忆召回 * * @param {object[]} allEvents - 所有事件(L2) * @param {object} vectorConfig - 向量配置 * @param {object} options * @param {boolean} options.excludeLastAi * @param {string|null} options.pendingUserMessage * @returns {Promise} */ export async function recallMemory(allEvents, vectorConfig, options = {}) { const T0 = performance.now(); const { chat } = getContext(); const { pendingUserMessage = null, excludeLastAi = false } = options; const metrics = createMetrics(); if (!allEvents?.length) { metrics.anchor.needRecall = false; metrics.timing.total = Math.round(performance.now() - T0); return { events: [], evidenceChunks: [], causalChain: [], focusEntities: [], elapsed: metrics.timing.total, logText: 'No events.', metrics, }; } metrics.anchor.needRecall = true; // ═══════════════════════════════════════════════════════════════════ // 阶段 1: Query Build // ═══════════════════════════════════════════════════════════════════ const T_Build_Start = performance.now(); const lastMessages = getLastMessages(chat, CONFIG.LAST_MESSAGES_K, excludeLastAi); const bundle = buildQueryBundle(lastMessages, pendingUserMessage); metrics.query.buildTime = Math.round(performance.now() - T_Build_Start); metrics.anchor.focusEntities = bundle.focusEntities; // Query lengths (v0 available here) if (metrics.query?.lengths) { metrics.query.lengths.v0Chars = String(bundle.queryText_v0 || '').length; // v1 not built yet metrics.query.lengths.v1Chars = null; metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v0 || '').length; } xbLog.info(MODULE_ID, `Query Build: focus=[${bundle.focusEntities.join(',')}] lexTerms=[${bundle.lexicalTerms.slice(0, 5).join(',')}]` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 2: Round 1 Dense Retrieval // ═══════════════════════════════════════════════════════════════════ let queryVector_v0; try { const [vec] = await embed([bundle.queryText_v0], vectorConfig, { timeout: 10000 }); queryVector_v0 = vec; } catch (e) { xbLog.error(MODULE_ID, 'Round 1 向量化失败', e); metrics.timing.total = Math.round(performance.now() - T0); return { events: [], evidenceChunks: [], causalChain: [], focusEntities: bundle.focusEntities, elapsed: metrics.timing.total, logText: 'Embedding failed (round 1).', metrics, }; } if (!queryVector_v0?.length) { metrics.timing.total = Math.round(performance.now() - T0); return { events: [], evidenceChunks: [], causalChain: [], focusEntities: bundle.focusEntities, elapsed: metrics.timing.total, logText: 'Empty query vector (round 1).', metrics, }; } const T_R1_Anchor_Start = performance.now(); const { hits: anchorHits_v0 } = await recallAnchors(queryVector_v0, vectorConfig, null); const r1AnchorTime = Math.round(performance.now() - T_R1_Anchor_Start); const T_R1_Event_Start = performance.now(); const eventHits_v0 = await recallEvents(queryVector_v0, allEvents, vectorConfig, bundle.focusEntities, null); const r1EventTime = Math.round(performance.now() - T_R1_Event_Start); xbLog.info(MODULE_ID, `Round 1: anchors=${anchorHits_v0.length} events=${eventHits_v0.length} (anchor=${r1AnchorTime}ms event=${r1EventTime}ms)` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 3: Query Refinement // ═══════════════════════════════════════════════════════════════════ const T_Refine_Start = performance.now(); refineQueryBundle(bundle, anchorHits_v0, eventHits_v0); metrics.query.refineTime = Math.round(performance.now() - T_Refine_Start); // 更新 focusEntities(refinement 可能扩展了) metrics.anchor.focusEntities = bundle.focusEntities; // Query lengths (v1/rerank updated here) if (metrics.query?.lengths) { metrics.query.lengths.v1Chars = bundle.queryText_v1 == null ? null : String(bundle.queryText_v1).length; metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v1 || bundle.queryText_v0 || '').length; } xbLog.info(MODULE_ID, `Refinement: focus=[${bundle.focusEntities.join(',')}] hasV1=${!!bundle.queryText_v1} (${metrics.query.refineTime}ms)` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 4: Round 2 Dense Retrieval // ═══════════════════════════════════════════════════════════════════ const queryTextFinal = bundle.queryText_v1 || bundle.queryText_v0; let queryVector_v1; try { const [vec] = await embed([queryTextFinal], vectorConfig, { timeout: 10000 }); queryVector_v1 = vec; } catch (e) { xbLog.warn(MODULE_ID, 'Round 2 向量化失败,降级使用 Round 1 向量', e); queryVector_v1 = queryVector_v0; } const T_R2_Anchor_Start = performance.now(); const { hits: anchorHits, floors: anchorFloors_dense } = await recallAnchors(queryVector_v1, vectorConfig, metrics); metrics.timing.anchorSearch = Math.round(performance.now() - T_R2_Anchor_Start); const T_R2_Event_Start = performance.now(); let eventHits = await recallEvents(queryVector_v1, allEvents, vectorConfig, bundle.focusEntities, metrics); metrics.timing.eventRetrieval = Math.round(performance.now() - T_R2_Event_Start); xbLog.info(MODULE_ID, `Round 2: anchors=${anchorHits.length} floors=${anchorFloors_dense.size} events=${eventHits.length}` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 5: Lexical Retrieval + Merge // ═══════════════════════════════════════════════════════════════════ const T_Lex_Start = performance.now(); let lexicalResult = { atomIds: [], atomFloors: new Set(), chunkIds: [], chunkFloors: new Set(), eventIds: [], chunkScores: [], searchTime: 0 }; try { const index = await getLexicalIndex(); if (index) { lexicalResult = searchLexicalIndex(index, bundle.lexicalTerms); } } catch (e) { xbLog.warn(MODULE_ID, 'Lexical 检索失败', e); } const lexTime = Math.round(performance.now() - T_Lex_Start); if (metrics) { metrics.lexical.atomHits = lexicalResult.atomIds.length; metrics.lexical.chunkHits = lexicalResult.chunkIds.length; metrics.lexical.eventHits = lexicalResult.eventIds.length; metrics.lexical.searchTime = lexTime; metrics.lexical.terms = bundle.lexicalTerms.slice(0, 10); } // 合并 L0 floors const anchorFloors = new Set(anchorFloors_dense); for (const f of lexicalResult.atomFloors) { anchorFloors.add(f); } // Lexical chunk floors 也加入(确保这些楼层的 chunks 被拉取) for (const f of lexicalResult.chunkFloors) { anchorFloors.add(f); } // 合并 L2 events(lexical 命中但 dense 未命中的 events) const existingEventIds = new Set(eventHits.map(e => e.event?.id).filter(Boolean)); const eventIndex = buildEventIndex(allEvents); let lexicalEventCount = 0; for (const eid of lexicalResult.eventIds) { if (!existingEventIds.has(eid)) { const ev = eventIndex.get(eid); if (ev) { eventHits.push({ event: ev, similarity: 0, _recallType: 'LEXICAL', }); existingEventIds.add(eid); lexicalEventCount++; } } } if (metrics && lexicalEventCount > 0) { metrics.event.byRecallType.lexical = lexicalEventCount; metrics.event.selected += lexicalEventCount; } xbLog.info(MODULE_ID, `Lexical: atoms=${lexicalResult.atomIds.length} chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length} mergedFloors=${anchorFloors.size} mergedEvents=+${lexicalEventCount} (${lexTime}ms)` ); // ═══════════════════════════════════════════════════════════════════ // 阶段 6: Evidence Pull + W-RRF Fusion + Cap100 + Rerank // ═══════════════════════════════════════════════════════════════════ const evidenceChunks = await pullAndFuseEvidence( anchorHits, anchorFloors, queryVector_v1, bundle.rerankQuery, lexicalResult, metrics ); // ═══════════════════════════════════════════════════════════════════ // 阶段 7: Causation Trace // ═══════════════════════════════════════════════════════════════════ const { results: causalMap, maxDepth: causalMaxDepth } = traceCausation(eventHits, eventIndex); const recalledIdSet = new Set(eventHits.map(x => x?.event?.id).filter(Boolean)); const causalChain = causalMap .filter(x => x?.event?.id && !recalledIdSet.has(x.event.id)) .map(x => ({ event: x.event, similarity: 0, _recallType: 'CAUSAL', _causalDepth: x.depth, chainFrom: x.chainFrom, })); if (metrics.event.byRecallType) { metrics.event.byRecallType.causal = causalChain.length; } metrics.event.causalChainDepth = causalMaxDepth; metrics.event.causalCount = causalChain.length; // ═══════════════════════════════════════════════════════════════════ // 完成 // ═══════════════════════════════════════════════════════════════════ metrics.timing.total = Math.round(performance.now() - T0); metrics.event.entityNames = bundle.focusEntities; metrics.event.entitiesUsed = bundle.focusEntities.length; console.group('%c[Recall v6]', 'color: #7c3aed; font-weight: bold'); console.log(`Total: ${metrics.timing.total}ms`); console.log(`Query Build: ${metrics.query.buildTime}ms | Refine: ${metrics.query.refineTime}ms`); console.log(`Focus: [${bundle.focusEntities.join(', ')}]`); console.log(`Round 2 Anchors: ${anchorHits.length} hits → ${anchorFloors.size} floors`); console.log(`Lexical: atoms=${lexicalResult.atomIds.length} chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length}`); console.log(`Fusion: dense=${metrics.fusion.denseCount} lex=${metrics.fusion.lexCount} anchor=${metrics.fusion.anchorCount} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`); console.log(`Evidence: ${metrics.evidence.merged} → rerank → ${evidenceChunks.length} (rerank ${metrics.evidence.rerankTime || 0}ms)`); if (metrics.evidence.selectedByType) { console.log(`Evidence types: anchor_virtual=${metrics.evidence.selectedByType.anchorVirtual} chunk_real=${metrics.evidence.selectedByType.chunkReal}`); } console.log(`Events: ${eventHits.length} hits, ${causalChain.length} causal`); console.groupEnd(); return { events: eventHits, causalChain, evidenceChunks, focusEntities: bundle.focusEntities, elapsed: metrics.timing.total, metrics, }; }