Update recall metrics and context pairing

This commit is contained in:
2026-02-10 00:18:51 +08:00
parent da1e3088eb
commit 3af76a9651
3 changed files with 657 additions and 750 deletions

View File

@@ -1,5 +1,5 @@
// ═══════════════════════════════════════════════════════════════════════════
// Story Summary - Recall Engine (v6 - Deterministic Query + Hybrid + W-RRF)
// Story Summary - Recall Engine (v7 - Two-Stage: L0 Locate → L1 Evidence)
//
// 命名规范:
// - 存储层用 L0/L1/L2/L3StateAtom/Chunk/Event/Fact
@@ -7,12 +7,13 @@
//
// 架构:
// 阶段 1: Query Build确定性无 LLM
// 阶段 2: Round 1 Dense Retrieval
// 阶段 2: Round 1 Dense RetrievalL0 + L2
// 阶段 3: Query Refinement用已命中记忆增强
// 阶段 4: Round 2 Dense Retrieval
// 阶段 5: Lexical Retrieval + Merge
// 阶段 6: Evidence Pull + W-RRF Fusion + Cap100 + Rerank
// 阶段 7: Causation Trace
// 阶段 4: Round 2 Dense RetrievalL0 + L2
// 阶段 5: Lexical Retrieval + L0 Merge
// 阶段 6: L0-only W-RRF Fusion + Rerank ‖ 并发 L1 Cosine 预筛选
// 阶段 7: L1 配对组装L0 → top-1 AI L1 + top-1 USER L1
// 阶段 8: Causation Trace
// ═══════════════════════════════════════════════════════════════════════════
import { getAllEventVectors, getChunksByFloors, getMeta, getChunkVectorsByIds } from '../storage/chunk-store.js';
@@ -38,23 +39,19 @@ const CONFIG = {
// Anchor (L0 StateAtoms)
ANCHOR_MIN_SIMILARITY: 0.58,
// Evidence (L1 Chunks) Dense 粗筛
EVIDENCE_DENSE_COARSE_MAX: 200,
// Event (L2 Events)
EVENT_CANDIDATE_MAX: 100,
EVENT_SELECT_MAX: 50,
EVENT_MIN_SIMILARITY: 0.55,
EVENT_MMR_LAMBDA: 0.72,
// W-RRF 融合
// W-RRF 融合L0-only
RRF_K: 60,
RRF_W_DENSE: 1.0,
RRF_W_LEX: 0.9,
RRF_W_ANCHOR: 0.7,
FUSION_CAP: 100,
// Rerank
// RerankL0-only
RERANK_TOP_N: 50,
RERANK_MIN_SCORE: 0.15,
@@ -228,7 +225,7 @@ async function recallAnchors(queryVector, vectorConfig, metrics) {
}
// ═══════════════════════════════════════════════════════════════════════════
// [Events] L2 Events 检索(无 entity bonus
// [Events] L2 Events 检索
// ═══════════════════════════════════════════════════════════════════════════
/**
@@ -414,35 +411,32 @@ function traceCausation(eventHits, eventIndex, maxDepth = CONFIG.CAUSAL_CHAIN_MA
}
// ═══════════════════════════════════════════════════════════════════════════
// [W-RRF] 加权倒数排名融合
// [W-RRF] 加权倒数排名融合L0-only
// ═══════════════════════════════════════════════════════════════════════════
/**
* @typedef {object} RankedItem
* @property {string} chunkId - chunk 的唯一标识符
* @property {number} score - 该路的原始分数(用于日志,不参与 RRF 计算)
* @property {string} id - 唯一标识符
* @property {number} score - 该路的原始分数
*/
/**
* W-RRF 融合chunk 候选
* W-RRF 融合L0 候选dense + lexical
*
* @param {RankedItem[]} denseRank - Dense 路cosine 降序)
* @param {RankedItem[]} lexRank - Lexical 路MiniSearch score 降序)
* @param {RankedItem[]} anchorRank - Anchor 路anchor similarity 降序)
* @param {number} cap - 输出上限
* @returns {{top: {chunkId: string, fusionScore: number}[], totalUnique: number}}
* @returns {{top: {id: string, fusionScore: number}[], totalUnique: number}}
*/
function fuseChunkCandidates(denseRank, lexRank, anchorRank, cap = CONFIG.FUSION_CAP) {
function fuseL0Candidates(denseRank, lexRank, cap = CONFIG.FUSION_CAP) {
const k = CONFIG.RRF_K;
const wD = CONFIG.RRF_W_DENSE;
const wL = CONFIG.RRF_W_LEX;
const wA = CONFIG.RRF_W_ANCHOR;
// 构建 rank map: chunkId → 0-based rank
const buildRankMap = (ranked) => {
const map = new Map();
for (let i = 0; i < ranked.length; i++) {
const id = ranked[i].chunkId;
const id = ranked[i].id;
if (!map.has(id)) map.set(id, i);
}
return map;
@@ -450,37 +444,26 @@ function fuseChunkCandidates(denseRank, lexRank, anchorRank, cap = CONFIG.FUSION
const denseMap = buildRankMap(denseRank || []);
const lexMap = buildRankMap(lexRank || []);
const anchorMap = buildRankMap(anchorRank || []);
// 收集所有 chunkId去重
const allIds = new Set([
...denseMap.keys(),
...lexMap.keys(),
...anchorMap.keys(),
]);
// ★ 修复 E记录去重后的总数
const totalUnique = allIds.size;
// 计算融合分数
const scored = [];
for (const id of allIds) {
let score = 0;
if (denseMap.has(id)) {
score += wD / (k + denseMap.get(id));
}
if (lexMap.has(id)) {
score += wL / (k + lexMap.get(id));
}
if (anchorMap.has(id)) {
score += wA / (k + anchorMap.get(id));
}
scored.push({ chunkId: id, fusionScore: score });
scored.push({ id, fusionScore: score });
}
// 按融合分数降序,取前 cap 个
scored.sort((a, b) => b.fusionScore - a.fusionScore);
return {
@@ -490,228 +473,169 @@ function fuseChunkCandidates(denseRank, lexRank, anchorRank, cap = CONFIG.FUSION
}
// ═══════════════════════════════════════════════════════════════════════════
// [Evidence] L1 Chunks 拉取 + 融合 + Rerank
// [Stage 6] L0-only 融合 + Rerank ‖ 并发 L1 Cosine 预筛选
// ═══════════════════════════════════════════════════════════════════════════
/**
* 统计 evidence 类型构成
* @param {object[]} chunks
* @returns {{anchorVirtual: number, chunkReal: number}}
*/
function countEvidenceByType(chunks) {
let anchorVirtual = 0;
let chunkReal = 0;
for (const c of chunks || []) {
if (c.isAnchorVirtual) anchorVirtual++;
else chunkReal++;
}
return { anchorVirtual, chunkReal };
}
/**
* 拉取 evidence + W-RRF 融合 + Cap100 + Rerank
* L0 融合 + rerank并发拉取 L1 并 cosine 打分
*
* @param {object[]} anchorHits - L0 命中
* @param {Set<number>} anchorFloors - 锚点命中楼层(含 lexical 扩展)
* @param {number[]} queryVector - 查询向量
* @param {object[]} anchorHits - L0 dense 命中Round 2
* @param {Set<number>} anchorFloors - L0 命中楼层(含 lexical 扩展)
* @param {number[]} queryVector - 查询向量v1
* @param {string} rerankQuery - rerank 查询文本
* @param {object} lexicalResult - 词法检索结果
* @param {object} metrics
* @returns {Promise<object[]>}
* @returns {Promise<{l0Selected: object[], l1ByFloor: Map<number, {aiTop1: object|null, userTop1: object|null}>}>}
*/
async function pullAndFuseEvidence(anchorHits, anchorFloors, queryVector, rerankQuery, lexicalResult, metrics) {
const { chatId } = getContext();
if (!chatId) return [];
async function locateAndPullEvidence(anchorHits, anchorFloors, queryVector, rerankQuery, lexicalResult, metrics) {
const { chatId, chat } = getContext();
if (!chatId) return { l0Selected: [], l1ByFloor: new Map() };
const T_Start = performance.now();
// ─────────────────────────────────────────────────────────────────
// 6a. 构建 Anchor Virtual Chunks来自 L0
// 6a. 构建 L0 候选对象(用于 rerank
//
// 重要:支持 lexical-only 的 L0atom进入候选池。
// 否则 hybrid 会退化为 dense-onlylexical 命中的 atom 若未被 dense 命中会被直接丢弃。
// ─────────────────────────────────────────────────────────────────
const anchorVirtualChunks = (anchorHits || []).map(a => ({
chunkId: `anchor-${a.atomId}`,
floor: a.floor,
chunkIdx: -1,
speaker: '📌',
isUser: false,
text: a.atom?.semantic || '',
similarity: a.similarity,
isAnchorVirtual: true,
_atom: a.atom,
}));
const l0ObjectMap = new Map();
for (const a of (anchorHits || [])) {
const id = `anchor-${a.atomId}`;
l0ObjectMap.set(id, {
id,
atomId: a.atomId,
floor: a.floor,
similarity: a.similarity,
atom: a.atom,
text: a.atom?.semantic || '',
});
}
// ─────────────────────────────────────────────────────────────────
// 6b. 拉取真实 L1 Chunks从 anchorFloors
// ─────────────────────────────────────────────────────────────────
// lexical-only atoms从全量 StateAtoms 补齐similarity 记为 0靠 lex rank 贡献 W-RRF
const lexAtomIds = lexicalResult?.atomIds || [];
if (lexAtomIds.length > 0) {
const atomsList = getStateAtoms();
const atomMap = new Map(atomsList.map(a => [a.atomId, a]));
const floorArray = Array.from(anchorFloors);
let dbChunks = [];
try {
if (floorArray.length > 0) {
dbChunks = await getChunksByFloors(chatId, floorArray);
for (const atomId of lexAtomIds) {
const id = `anchor-${atomId}`;
if (l0ObjectMap.has(id)) continue;
const atom = atomMap.get(atomId);
if (!atom) continue;
if (typeof atom.floor !== 'number' || atom.floor < 0) continue;
l0ObjectMap.set(id, {
id,
atomId,
floor: atom.floor,
similarity: 0,
atom,
text: atom.semantic || '',
});
}
} catch (e) {
xbLog.warn(MODULE_ID, '从 DB 拉取 chunks 失败', e);
}
// ─────────────────────────────────────────────────────────────────
// 6c. Dense 粗筛(对真实 chunks 按 queryVector 排序
// 6b. 构建两路排名L0-only
// ─────────────────────────────────────────────────────────────────
let denseCoarseChunks = [];
if (dbChunks.length > 0 && queryVector?.length) {
const chunkIds = dbChunks.map(c => c.chunkId);
let chunkVectors = [];
try {
chunkVectors = await getChunkVectorsByIds(chatId, chunkIds);
} catch (e) {
xbLog.warn(MODULE_ID, 'L1 向量获取失败', e);
}
const vectorMap = new Map(chunkVectors.map(v => [v.chunkId, v.vector]));
denseCoarseChunks = dbChunks
.map(c => {
const vec = vectorMap.get(c.chunkId);
if (!vec?.length) return null;
return {
...c,
isAnchorVirtual: false,
similarity: cosineSimilarity(queryVector, vec),
};
})
.filter(Boolean)
.sort((a, b) => b.similarity - a.similarity)
.slice(0, CONFIG.EVIDENCE_DENSE_COARSE_MAX);
}
// ─────────────────────────────────────────────────────────────────
// 6d. 构建三路排名
// ─────────────────────────────────────────────────────────────────
// Dense 路anchorVirtual + denseCoarse按 similarity 排序
const denseRank = [
...anchorVirtualChunks.map(c => ({ chunkId: c.chunkId, score: c.similarity })),
...denseCoarseChunks.map(c => ({ chunkId: c.chunkId, score: c.similarity })),
].sort((a, b) => b.score - a.score);
// Lexical 路:从 lexicalResult.chunkScores
const lexRank = (lexicalResult?.chunkScores || [])
.sort((a, b) => b.score - a.score)
.map(cs => ({ chunkId: cs.chunkId, score: cs.score }));
// Anchor 路anchorVirtual 按 similarity 排序
const anchorRank = anchorVirtualChunks
.map(c => ({ chunkId: c.chunkId, score: c.similarity }))
// Dense 路anchorHits 按 similarity 排序
const denseRank = (anchorHits || [])
.map(a => ({ id: `anchor-${a.atomId}`, score: a.similarity }))
.sort((a, b) => b.score - a.score);
// Lexical 路:从 lexicalResult.atomIds 构建排名(允许 lexical-only
// atomIds 已按 MiniSearch score 排序searchLexicalIndex 返回顺序W-RRF 依赖 rankscore 为占位
const lexRank = (lexAtomIds || [])
.map(atomId => ({ id: `anchor-${atomId}`, score: 1 }))
.filter(item => l0ObjectMap.has(item.id));
// ─────────────────────────────────────────────────────────────────
// 6e. W-RRF 融合 + Cap100
// 6c. W-RRF 融合L0-only
// ─────────────────────────────────────────────────────────────────
const T_Fusion_Start = performance.now();
const { top: fusionResult } = fuseChunkCandidates(denseRank, lexRank, anchorRank, CONFIG.FUSION_CAP);
const fusionChunkIds = new Set(fusionResult.map(f => f.chunkId));
const { top: fusionResult, totalUnique } = fuseL0Candidates(denseRank, lexRank, CONFIG.FUSION_CAP);
const fusionTime = Math.round(performance.now() - T_Fusion_Start);
// ─────────────────────────────────────────────────────────────────
// 6f. 构建最终候选 chunk 对象列表(用于 rerank
// ─────────────────────────────────────────────────────────────────
// 构建 chunkId → chunk 对象的映射
const chunkObjectMap = new Map();
for (const c of anchorVirtualChunks) {
chunkObjectMap.set(c.chunkId, c);
}
for (const c of denseCoarseChunks) {
if (!chunkObjectMap.has(c.chunkId)) {
chunkObjectMap.set(c.chunkId, c);
}
}
// Lexical 命中的 chunks 可能不在 denseCoarse 里,需要从 dbChunks 补充
const dbChunkMap = new Map(dbChunks.map(c => [c.chunkId, c]));
for (const cs of (lexicalResult?.chunkScores || [])) {
if (fusionChunkIds.has(cs.chunkId) && !chunkObjectMap.has(cs.chunkId)) {
const dbChunk = dbChunkMap.get(cs.chunkId);
if (dbChunk) {
chunkObjectMap.set(cs.chunkId, {
...dbChunk,
isAnchorVirtual: false,
similarity: 0,
});
}
}
}
// 按 fusionScore 排序的候选列表
// 构建 rerank 候选列表
const rerankCandidates = fusionResult
.map(f => {
const chunk = chunkObjectMap.get(f.chunkId);
if (!chunk) return null;
return {
...chunk,
_fusionScore: f.fusionScore,
};
})
.map(f => l0ObjectMap.get(f.id))
.filter(Boolean);
// ─────────────────────────────────────────────────────────────────
// 更新 metrics
// ─────────────────────────────────────────────────────────────────
if (metrics) {
metrics.evidence.floorsFromAnchors = floorArray.length;
metrics.evidence.chunkTotal = dbChunks.length;
metrics.evidence.denseCoarse = denseCoarseChunks.length;
metrics.fusion.denseCount = denseRank.length;
metrics.fusion.lexCount = lexRank.length;
metrics.fusion.anchorCount = anchorRank.length;
metrics.fusion.totalUnique = fusionResult.length + (denseRank.length + lexRank.length + anchorRank.length - fusionResult.length);
metrics.fusion.totalUnique = totalUnique;
metrics.fusion.afterCap = rerankCandidates.length;
metrics.fusion.time = fusionTime;
metrics.evidence.merged = rerankCandidates.length;
metrics.evidence.mergedByType = countEvidenceByType(rerankCandidates);
metrics.evidence.l0Candidates = rerankCandidates.length;
}
// ─────────────────────────────────────────────────────────────────
// 6g. Rerank
// ─────────────────────────────────────────────────────────────────
if (rerankCandidates.length === 0) {
if (metrics) {
metrics.evidence.l0Selected = 0;
metrics.evidence.l1Pulled = 0;
metrics.evidence.l1Attached = 0;
metrics.evidence.l1CosineTime = 0;
metrics.evidence.rerankApplied = false;
metrics.evidence.selected = 0;
metrics.evidence.selectedByType = { anchorVirtual: 0, chunkReal: 0 };
}
return [];
return { l0Selected: [], l1ByFloor: new Map() };
}
// ─────────────────────────────────────────────────────────────────
// 6d. 收集所有候选 L0 的楼层(用于并发拉取 L1
// 包含 AI 楼层本身 + 上方 USER 楼层
// ─────────────────────────────────────────────────────────────────
const candidateFloors = new Set();
for (const c of rerankCandidates) {
candidateFloors.add(c.floor);
// 上方 USER 楼层
const userFloor = c.floor - 1;
if (userFloor >= 0 && chat?.[userFloor]?.is_user) {
candidateFloors.add(userFloor);
}
}
// ─────────────────────────────────────────────────────────────────
// 6e. 并发rerank L0 ‖ 拉取 L1 chunks + 向量 + cosine 打分
// ─────────────────────────────────────────────────────────────────
const T_Rerank_Start = performance.now();
const reranked = await rerankChunks(rerankQuery, rerankCandidates, {
// 并发任务 1rerank L0
const rerankPromise = rerankChunks(rerankQuery, rerankCandidates, {
topN: CONFIG.RERANK_TOP_N,
minScore: CONFIG.RERANK_MIN_SCORE,
});
// 并发任务 2拉取 L1 chunks + 向量 → cosine 打分
const l1Promise = pullAndScoreL1(chatId, Array.from(candidateFloors), queryVector, chat);
// 等待两个任务完成
const [rerankedL0, l1ScoredByFloor] = await Promise.all([rerankPromise, l1Promise]);
const rerankTime = Math.round(performance.now() - T_Rerank_Start);
// ─────────────────────────────────────────────────────────────────
// 6f. 记录 rerank metrics
// ─────────────────────────────────────────────────────────────────
if (metrics) {
metrics.evidence.rerankApplied = true;
metrics.evidence.beforeRerank = rerankCandidates.length;
metrics.evidence.afterRerank = reranked.length;
metrics.evidence.selected = reranked.length;
metrics.evidence.selectedByType = countEvidenceByType(reranked);
metrics.evidence.afterRerank = rerankedL0.length;
metrics.evidence.l0Selected = rerankedL0.length;
metrics.evidence.rerankTime = rerankTime;
metrics.timing.evidenceRerank = rerankTime;
const scores = reranked.map(c => c._rerankScore || 0).filter(s => s > 0);
const scores = rerankedL0.map(c => c._rerankScore || 0).filter(s => s > 0);
if (scores.length > 0) {
scores.sort((a, b) => a - b);
metrics.evidence.rerankScores = {
@@ -722,14 +646,167 @@ async function pullAndFuseEvidence(anchorHits, anchorFloors, queryVector, rerank
}
}
// ─────────────────────────────────────────────────────────────────
// 6g. 构建最终 l0Selected + l1ByFloor
// ─────────────────────────────────────────────────────────────────
const l0Selected = rerankedL0.map(item => ({
id: item.id,
atomId: item.atomId,
floor: item.floor,
similarity: item.similarity,
rerankScore: item._rerankScore || 0,
atom: item.atom,
text: item.text,
}));
// 为每个选中的 L0 楼层组装 top-1 L1 配对
const selectedFloors = new Set(l0Selected.map(l => l.floor));
const l1ByFloor = new Map();
let contextPairsAdded = 0;
for (const floor of selectedFloors) {
const aiChunks = l1ScoredByFloor.get(floor) || [];
const userFloor = floor - 1;
const userChunks = (userFloor >= 0 && chat?.[userFloor]?.is_user)
? (l1ScoredByFloor.get(userFloor) || [])
: [];
// top-1取 cosine 最高的
const aiTop1 = aiChunks.length > 0
? aiChunks.reduce((best, c) => (c._cosineScore > best._cosineScore ? c : best))
: null;
const userTop1 = userChunks.length > 0
? userChunks.reduce((best, c) => (c._cosineScore > best._cosineScore ? c : best))
: null;
// context pair = 上方 USER 楼层成功挂载(用于 metrics
if (userTop1) contextPairsAdded++;
l1ByFloor.set(floor, { aiTop1, userTop1 });
}
// ─────────────────────────────────────────────────────────────────
// 6h. L1 metrics
// ─────────────────────────────────────────────────────────────────
if (metrics) {
let totalPulled = 0;
let totalAttached = 0;
for (const [, scored] of l1ScoredByFloor) {
totalPulled += scored.length;
}
for (const [, pair] of l1ByFloor) {
if (pair.aiTop1) totalAttached++;
if (pair.userTop1) totalAttached++;
}
metrics.evidence.l1Pulled = totalPulled;
metrics.evidence.l1Attached = totalAttached;
metrics.evidence.contextPairsAdded = contextPairsAdded;
metrics.evidence.l1CosineTime = l1ScoredByFloor._cosineTime || 0;
}
const totalTime = Math.round(performance.now() - T_Start);
metrics.timing.evidenceRetrieval = Math.max(0, totalTime - fusionTime - rerankTime);
if (metrics) {
metrics.timing.evidenceRetrieval = Math.max(0, totalTime - fusionTime - rerankTime);
}
xbLog.info(MODULE_ID,
`Evidence: ${dbChunks.length} L1 → dense=${denseCoarseChunks.length} lex=${lexRank.length} → fusion=${rerankCandidates.length} → rerank=${reranked.length} (${totalTime}ms)`
`Evidence: ${anchorHits?.length || 0} L0 dense → fusion=${rerankCandidates.length} → rerank=${rerankedL0.length} → L1 attached=${metrics?.evidence?.l1Attached || 0} (${totalTime}ms)`
);
return reranked;
return { l0Selected, l1ByFloor };
}
// ═══════════════════════════════════════════════════════════════════════════
// [L1] 拉取 + Cosine 打分(并发子任务)
// ═══════════════════════════════════════════════════════════════════════════
/**
* 从 IndexedDB 拉取指定楼层的 L1 chunks + 向量,用 queryVector cosine 打分
*
* @param {string} chatId
* @param {number[]} floors - 需要拉取的楼层列表
* @param {number[]} queryVector - 查询向量v1
* @param {object[]} chat - 聊天消息数组
* @returns {Promise<Map<number, object[]>>} floor → scored chunks带 _cosineScore
*/
async function pullAndScoreL1(chatId, floors, queryVector, chat) {
const T0 = performance.now();
/** @type {Map<number, object[]>} */
const result = new Map();
if (!chatId || !floors?.length || !queryVector?.length) {
result._cosineTime = 0;
return result;
}
// 拉取 chunks
let dbChunks = [];
try {
dbChunks = await getChunksByFloors(chatId, floors);
} catch (e) {
xbLog.warn(MODULE_ID, 'L1 chunks 拉取失败', e);
result._cosineTime = Math.round(performance.now() - T0);
return result;
}
if (!dbChunks.length) {
result._cosineTime = Math.round(performance.now() - T0);
return result;
}
// 拉取向量
const chunkIds = dbChunks.map(c => c.chunkId);
let chunkVectors = [];
try {
chunkVectors = await getChunkVectorsByIds(chatId, chunkIds);
} catch (e) {
xbLog.warn(MODULE_ID, 'L1 向量拉取失败', e);
result._cosineTime = Math.round(performance.now() - T0);
return result;
}
const vectorMap = new Map(chunkVectors.map(v => [v.chunkId, v.vector]));
// Cosine 打分 + 按楼层分组
for (const chunk of dbChunks) {
const vec = vectorMap.get(chunk.chunkId);
const cosineScore = vec?.length ? cosineSimilarity(queryVector, vec) : 0;
const scored = {
chunkId: chunk.chunkId,
floor: chunk.floor,
chunkIdx: chunk.chunkIdx,
speaker: chunk.speaker,
isUser: chunk.isUser,
text: chunk.text,
_cosineScore: cosineScore,
};
if (!result.has(chunk.floor)) {
result.set(chunk.floor, []);
}
result.get(chunk.floor).push(scored);
}
// 每楼层按 cosine 降序排序
for (const [, chunks] of result) {
chunks.sort((a, b) => b._cosineScore - a._cosineScore);
}
result._cosineTime = Math.round(performance.now() - T0);
xbLog.info(MODULE_ID,
`L1 pull: ${floors.length} floors → ${dbChunks.length} chunks → scored (${result._cosineTime}ms)`
);
return result;
}
// ═══════════════════════════════════════════════════════════════════════════
@@ -758,7 +835,8 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [],
evidenceChunks: [],
l0Selected: [],
l1ByFloor: new Map(),
causalChain: [],
focusEntities: [],
elapsed: metrics.timing.total,
@@ -782,10 +860,8 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.query.buildTime = Math.round(performance.now() - T_Build_Start);
metrics.anchor.focusEntities = bundle.focusEntities;
// Query lengths (v0 available here)
if (metrics.query?.lengths) {
metrics.query.lengths.v0Chars = String(bundle.queryText_v0 || '').length;
// v1 not built yet
metrics.query.lengths.v1Chars = null;
metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v0 || '').length;
}
@@ -806,7 +882,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
xbLog.error(MODULE_ID, 'Round 1 向量化失败', e);
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [], evidenceChunks: [], causalChain: [],
events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [],
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
logText: 'Embedding failed (round 1).',
@@ -817,7 +893,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
if (!queryVector_v0?.length) {
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [], evidenceChunks: [], causalChain: [],
events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [],
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
logText: 'Empty query vector (round 1).',
@@ -846,10 +922,8 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
refineQueryBundle(bundle, anchorHits_v0, eventHits_v0);
metrics.query.refineTime = Math.round(performance.now() - T_Refine_Start);
// 更新 focusEntitiesrefinement 可能扩展了)
metrics.anchor.focusEntities = bundle.focusEntities;
// Query lengths (v1/rerank updated here)
if (metrics.query?.lengths) {
metrics.query.lengths.v1Chars = bundle.queryText_v1 == null ? null : String(bundle.queryText_v1).length;
metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v1 || bundle.queryText_v0 || '').length;
@@ -887,12 +961,16 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
);
// ═══════════════════════════════════════════════════════════════════
// 阶段 5: Lexical Retrieval + Merge
// 阶段 5: Lexical Retrieval + L0 Merge
// ═══════════════════════════════════════════════════════════════════
const T_Lex_Start = performance.now();
let lexicalResult = { atomIds: [], atomFloors: new Set(), chunkIds: [], chunkFloors: new Set(), eventIds: [], chunkScores: [], searchTime: 0 };
let lexicalResult = {
atomIds: [], atomFloors: new Set(),
chunkIds: [], chunkFloors: new Set(),
eventIds: [], chunkScores: [], searchTime: 0,
};
try {
const index = await getLexicalIndex();
@@ -913,15 +991,11 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.lexical.terms = bundle.lexicalTerms.slice(0, 10);
}
// 合并 L0 floors
// 合并 L0 floorsdense + lexical
const anchorFloors = new Set(anchorFloors_dense);
for (const f of lexicalResult.atomFloors) {
anchorFloors.add(f);
}
// Lexical chunk floors 也加入(确保这些楼层的 chunks 被拉取)
for (const f of lexicalResult.chunkFloors) {
anchorFloors.add(f);
}
// 合并 L2 eventslexical 命中但 dense 未命中的 events
const existingEventIds = new Set(eventHits.map(e => e.event?.id).filter(Boolean));
@@ -953,10 +1027,10 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
);
// ═══════════════════════════════════════════════════════════════════
// 阶段 6: Evidence Pull + W-RRF Fusion + Cap100 + Rerank
// 阶段 6: L0-only W-RRF Fusion + Rerank ‖ 并发 L1 Cosine
// ═══════════════════════════════════════════════════════════════════
const evidenceChunks = await pullAndFuseEvidence(
const { l0Selected, l1ByFloor } = await locateAndPullEvidence(
anchorHits,
anchorFloors,
queryVector_v1,
@@ -996,24 +1070,23 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.event.entityNames = bundle.focusEntities;
metrics.event.entitiesUsed = bundle.focusEntities.length;
console.group('%c[Recall v6]', 'color: #7c3aed; font-weight: bold');
console.group('%c[Recall v7]', 'color: #7c3aed; font-weight: bold');
console.log(`Total: ${metrics.timing.total}ms`);
console.log(`Query Build: ${metrics.query.buildTime}ms | Refine: ${metrics.query.refineTime}ms`);
console.log(`Focus: [${bundle.focusEntities.join(', ')}]`);
console.log(`Round 2 Anchors: ${anchorHits.length} hits → ${anchorFloors.size} floors`);
console.log(`Lexical: atoms=${lexicalResult.atomIds.length} chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length}`);
console.log(`Fusion: dense=${metrics.fusion.denseCount} lex=${metrics.fusion.lexCount} anchor=${metrics.fusion.anchorCount} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`);
console.log(`Evidence: ${metrics.evidence.merged} → rerank → ${evidenceChunks.length} (rerank ${metrics.evidence.rerankTime || 0}ms)`);
if (metrics.evidence.selectedByType) {
console.log(`Evidence types: anchor_virtual=${metrics.evidence.selectedByType.anchorVirtual} chunk_real=${metrics.evidence.selectedByType.chunkReal}`);
}
console.log(`Fusion (L0-only): dense=${metrics.fusion.denseCount} lex=${metrics.fusion.lexCount} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`);
console.log(`L0 Rerank: ${metrics.evidence.beforeRerank || 0}${metrics.evidence.l0Selected || 0} (${metrics.evidence.rerankTime || 0}ms)`);
console.log(`L1 Pull: ${metrics.evidence.l1Pulled || 0} chunks → ${metrics.evidence.l1Attached || 0} attached (${metrics.evidence.l1CosineTime || 0}ms)`);
console.log(`Events: ${eventHits.length} hits, ${causalChain.length} causal`);
console.groupEnd();
return {
events: eventHits,
causalChain,
evidenceChunks,
l0Selected,
l1ByFloor,
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
metrics,