Update recall metrics and context pairing

This commit is contained in:
2026-02-10 00:18:51 +08:00
parent da1e3088eb
commit 3af76a9651
3 changed files with 657 additions and 750 deletions

View File

@@ -1,10 +1,18 @@
// ═══════════════════════════════════════════════════════════════════════════
// Story Summary - Prompt Injection (v4 - 统一命名)
// Story Summary - Prompt Injection (v5 - Two-Stage: L0 Locate → L1 Evidence)
//
// 命名规范:
// - 存储层用 L0/L1/L2/L3StateAtom/Chunk/Event/Fact
// - 装配层用语义名称constraint/event/evidence/arc
//
// 架构变更v4 → v5
// - L0 和 L1 不再在同一个池子竞争
// - recall.js 返回 {l0Selected[], l1ByFloor: Map} 而非 evidenceChunks[]
// - 装配层按 L2→L0→L1 层级组织
// - 预算以"L0 + USER top-1 + AI top-1"为原子单元
// - 孤立 L1无对应 L0丢弃
// - 孤立 L0无对应 L1保留
//
// 职责:
// - 仅负责"构建注入文本",不负责写入 extension_prompts
// - 注入发生在 story-summary.jsGENERATION_STARTED 时写入 extension_prompts
@@ -15,7 +23,7 @@ import { xbLog } from "../../../core/debug-core.js";
import { getSummaryStore, getFacts, isRelationFact } from "../data/store.js";
import { getVectorConfig, getSummaryPanelConfig, getSettings } from "../data/config.js";
import { recallMemory } from "../vector/retrieval/recall.js";
import { getChunksByFloors, getAllChunkVectors, getAllEventVectors, getMeta } from "../vector/storage/chunk-store.js";
import { getMeta } from "../vector/storage/chunk-store.js";
// Metrics
import { formatMetricsLog, detectIssues } from "../vector/retrieval/metrics.js";
@@ -79,23 +87,6 @@ function pushWithBudget(lines, text, state) {
return true;
}
/**
* 计算余弦相似度
* @param {number[]} a - 向量A
* @param {number[]} b - 向量B
* @returns {number} 相似度
*/
function cosineSimilarity(a, b) {
if (!a?.length || !b?.length || a.length !== b.length) return 0;
let dot = 0, nA = 0, nB = 0;
for (let i = 0; i < a.length; i++) {
dot += a[i] * b[i];
nA += a[i] * a[i];
nB += b[i] * b[i];
}
return nA && nB ? dot / (Math.sqrt(nA) * Math.sqrt(nB)) : 0;
}
/**
* 解析事件摘要中的楼层范围
* @param {string} summary - 事件摘要
@@ -134,46 +125,27 @@ function normalize(s) {
.toLowerCase();
}
// ─────────────────────────────────────────────────────────────────────────────
// 上下文配对工具函数
// ─────────────────────────────────────────────────────────────────────────────
/**
* 获取 chunk 的上下文楼层
* @param {object} chunk - chunk 对象
* @returns {number} 上下文楼层(-1 表示无)
* 获取事件排序键
* @param {object} event - 事件对象
* @returns {number} 排序键
*/
function getContextFloor(chunk) {
if (chunk.isAnchorVirtual) return -1;
return chunk.isUser ? chunk.floor + 1 : chunk.floor - 1;
function getEventSortKey(event) {
const r = parseFloorRange(event?.summary);
if (r) return r.start;
const m = String(event?.id || "").match(/evt-(\d+)/);
return m ? parseInt(m[1], 10) : Number.MAX_SAFE_INTEGER;
}
/**
* 选择上下文 chunk
* @param {object[]} candidates - 候选 chunks
* @param {object} mainChunk - 主 chunk
* @returns {object|null} 选中的上下文 chunk
* 重新编号事件文本
* @param {string} text - 原始文本
* @param {number} newIndex - 新编号
* @returns {string} 重新编号后的文本
*/
function pickContextChunk(candidates, mainChunk) {
if (!candidates?.length) return null;
const targetIsUser = !mainChunk.isUser;
const opposite = candidates.find(c => c.isUser === targetIsUser);
if (opposite) return opposite;
return candidates[0];
}
/**
* 格式化上下文 chunk 行
* @param {object} chunk - chunk 对象
* @param {boolean} isAbove - 是否在上方
* @returns {string} 格式化后的行
*/
function formatContextChunkLine(chunk, isAbove) {
const { name1, name2 } = getContext();
const speaker = chunk.isUser ? (name1 || "用户") : (chunk.speaker || name2 || "角色");
const text = String(chunk.text || "").trim();
const symbol = isAbove ? "┌" : "└";
return ` ${symbol} #${chunk.floor + 1} [${speaker}] ${text}`;
function renumberEventText(text, newIndex) {
const s = String(text || "");
return s.replace(/^(\s*)\d+(\.\s*(?:【)?)/, `$1${newIndex}$2`);
}
// ─────────────────────────────────────────────────────────────────────────────
@@ -259,10 +231,8 @@ function filterConstraintsByRelevance(facts, focusEntities, knownCharacters) {
const focusSet = new Set((focusEntities || []).map(normalize));
return facts.filter(f => {
// isState 的 facts 始终保留
if (f._isState === true) return true;
// 关系类 facts检查 from/to 是否在焦点中
if (isRelationFact(f)) {
const from = normalize(f.s);
const target = parseRelationTarget(f.p);
@@ -272,7 +242,6 @@ function filterConstraintsByRelevance(facts, focusEntities, knownCharacters) {
return false;
}
// 其他 facts检查主体是否在焦点中
const subjectNorm = normalize(f.s);
if (knownCharacters.has(subjectNorm)) {
return focusSet.has(subjectNorm);
@@ -326,28 +295,34 @@ function formatArcLine(arc) {
}
/**
* 格式化 evidence chunk 完整
* @param {object} chunk - chunk 对象
* 格式化 L0 锚点
* @param {object} l0 - L0 对象
* @returns {string} 格式化后的行
*/
function formatEvidenceFullLine(chunk) {
function formatL0Line(l0) {
return ` #${l0.floor + 1} [📌] ${String(l0.text || l0.atom?.semantic || "").trim()}`;
}
/**
* 格式化 L1 chunk 行(挂在 L0 下方)
* @param {object} chunk - L1 chunk 对象
* @param {boolean} isContext - 是否为上下文USER 侧)
* @returns {string} 格式化后的行
*/
function formatL1Line(chunk, isContext) {
const { name1, name2 } = getContext();
if (chunk.isAnchorVirtual) {
return ` #${chunk.floor + 1} [📌] ${String(chunk.text || "").trim()}`;
}
const speaker = chunk.isUser ? (name1 || "用户") : (chunk.speaker || name2 || "角色");
return ` #${chunk.floor + 1} [${speaker}] ${String(chunk.text || "").trim()}`;
const text = String(chunk.text || "").trim();
const symbol = isContext ? "┌" : "";
return ` ${symbol} #${chunk.floor + 1} [${speaker}] ${text}`;
}
/**
* 格式化因果事件行
* @param {object} causalItem - 因果事件项
* @param {Map} causalById - 因果事件索引
* @returns {string} 格式化后的行
*/
function formatCausalEventLine(causalItem, causalById) {
function formatCausalEventLine(causalItem) {
const ev = causalItem?.event || {};
const depth = Math.max(1, Math.min(9, causalItem?._causalDepth || 1));
const indent = " │" + " ".repeat(depth - 1);
@@ -365,128 +340,128 @@ function formatCausalEventLine(causalItem, causalById) {
const body = `${summary}${floorHint ? ` ${floorHint}` : ""}`.trim();
lines.push(`${indent} ${body}`);
const evidence = causalItem._evidenceChunk;
if (evidence) {
const speaker = evidence.speaker || "角色";
const text = String(evidence.text || "").trim();
lines.push(`${indent} #${evidence.floor + 1} [${speaker}] ${text}`);
}
return lines.join("\n");
}
/**
* 重新编号事件文本
* @param {string} text - 原始文本
* @param {number} newIndex - 新编号
* @returns {string} 重新编号后的文本
*/
function renumberEventText(text, newIndex) {
const s = String(text || "");
return s.replace(/^(\s*)\d+(\.\s*(?:【)?)/, `$1${newIndex}$2`);
}
/**
* 获取事件排序键
* @param {object} event - 事件对象
* @returns {number} 排序键
*/
function getEventSortKey(event) {
const r = parseFloorRange(event?.summary);
if (r) return r.start;
const m = String(event?.id || "").match(/evt-(\d+)/);
return m ? parseInt(m[1], 10) : Number.MAX_SAFE_INTEGER;
}
// ─────────────────────────────────────────────────────────────────────────────
// 按楼层分组装配 evidence修复上下文重复
// L0→L1 证据单元构建
// ─────────────────────────────────────────────────────────────────────────────
/**
* 按楼层装配 evidence
* @param {object[]} evidenceCandidates - 候选 evidence
* @param {Map} contextChunksByFloor - 上下文 chunks 索引
* @param {object} budget - 预算状态
* @returns {{lines: string[], anchorCount: number, contextPairsCount: number}}
* @typedef {object} EvidenceUnit
* @property {object} l0 - L0 锚点对象
* @property {object|null} userL1 - USER 侧 top-1 L1 chunk
* @property {object|null} aiL1 - AI 侧 top-1 L1 chunk
* @property {number} totalTokens - 整个单元的 token 估算
*/
function assembleEvidenceByFloor(evidenceCandidates, contextChunksByFloor, budget) {
if (!evidenceCandidates?.length) {
return { lines: [], anchorCount: 0, contextPairsCount: 0 };
}
// 1. 按楼层分组
const byFloor = new Map();
for (const c of evidenceCandidates) {
const arr = byFloor.get(c.floor) || [];
arr.push(c);
byFloor.set(c.floor, arr);
}
/**
* 为一个 L0 构建证据单元
* @param {object} l0 - L0 对象
* @param {Map<number, object>} l1ByFloor - 楼层→L1配对映射
* @returns {EvidenceUnit}
*/
function buildEvidenceUnit(l0, l1ByFloor) {
const pair = l1ByFloor.get(l0.floor);
const userL1 = pair?.userTop1 || null;
const aiL1 = pair?.aiTop1 || null;
// 2. 楼层内按 chunkIdx 排序
for (const [, chunks] of byFloor) {
chunks.sort((a, b) => (a.chunkIdx ?? 0) - (b.chunkIdx ?? 0));
}
// 预计算整个单元的 token 开销
let totalTokens = estimateTokens(formatL0Line(l0));
if (userL1) totalTokens += estimateTokens(formatL1Line(userL1, true));
if (aiL1) totalTokens += estimateTokens(formatL1Line(aiL1, false));
// 3. 按楼层顺序装配
const floorsSorted = Array.from(byFloor.keys()).sort((a, b) => a - b);
return { l0, userL1, aiL1, totalTokens };
}
/**
* 格式化一个证据单元为文本行
* @param {EvidenceUnit} unit - 证据单元
* @returns {string[]} 文本行数组
*/
function formatEvidenceUnit(unit) {
const lines = [];
let anchorCount = 0;
let contextPairsCount = 0;
lines.push(formatL0Line(unit.l0));
if (unit.userL1) {
lines.push(formatL1Line(unit.userL1, true));
}
if (unit.aiL1) {
lines.push(formatL1Line(unit.aiL1, false));
}
return lines;
}
for (const floor of floorsSorted) {
const chunks = byFloor.get(floor);
if (!chunks?.length) continue;
// ─────────────────────────────────────────────────────────────────────────────
// 事件证据收集
// ─────────────────────────────────────────────────────────────────────────────
// 分离锚点虚拟 chunks 和真实 chunks
const anchorChunks = chunks.filter(c => c.isAnchorVirtual);
const realChunks = chunks.filter(c => !c.isAnchorVirtual);
/**
* 为事件收集范围内的 L0 证据单元
* @param {object} eventObj - 事件对象
* @param {object[]} l0Selected - 所有选中的 L0
* @param {Map<number, object>} l1ByFloor - 楼层→L1配对映射
* @param {Set<string>} usedL0Ids - 已消费的 L0 ID 集合(会被修改)
* @returns {EvidenceUnit[]} 该事件的证据单元列表
*/
function collectEvidenceForEvent(eventObj, l0Selected, l1ByFloor, usedL0Ids) {
const range = parseFloorRange(eventObj?.summary);
if (!range) return [];
// 锚点直接输出(不需要上下文)
for (const c of anchorChunks) {
const line = formatEvidenceFullLine(c);
if (!pushWithBudget(lines, line, budget)) {
return { lines, anchorCount, contextPairsCount };
}
anchorCount++;
}
const units = [];
// 真实 chunks 按楼层统一处理
if (realChunks.length > 0) {
const firstChunk = realChunks[0];
const pairFloor = getContextFloor(firstChunk);
const pairCandidates = contextChunksByFloor.get(pairFloor) || [];
const contextChunk = pickContextChunk(pairCandidates, firstChunk);
for (const l0 of l0Selected) {
if (usedL0Ids.has(l0.id)) continue;
if (l0.floor < range.start || l0.floor > range.end) continue;
// 上下文在前
if (contextChunk && contextChunk.floor < floor) {
const contextLine = formatContextChunkLine(contextChunk, true);
if (!pushWithBudget(lines, contextLine, budget)) {
return { lines, anchorCount, contextPairsCount };
}
contextPairsCount++;
}
// 输出该楼层所有真实 chunks
for (const c of realChunks) {
const line = formatEvidenceFullLine(c);
if (!pushWithBudget(lines, line, budget)) {
return { lines, anchorCount, contextPairsCount };
}
}
// 上下文在后
if (contextChunk && contextChunk.floor > floor) {
const contextLine = formatContextChunkLine(contextChunk, false);
if (!pushWithBudget(lines, contextLine, budget)) {
return { lines, anchorCount, contextPairsCount };
}
contextPairsCount++;
}
}
const unit = buildEvidenceUnit(l0, l1ByFloor);
units.push(unit);
usedL0Ids.add(l0.id);
}
return { lines, anchorCount, contextPairsCount };
// 按楼层排序
units.sort((a, b) => a.l0.floor - b.l0.floor);
return units;
}
// ─────────────────────────────────────────────────────────────────────────────
// 事件格式化L2→L0→L1 层级)
// ─────────────────────────────────────────────────────────────────────────────
/**
* 格式化事件(含 L0→L1 证据)
* @param {object} eventItem - 事件召回项
* @param {number} idx - 编号
* @param {EvidenceUnit[]} evidenceUnits - 该事件的证据单元
* @param {Map<string, object>} causalById - 因果事件索引
* @returns {string} 格式化后的文本
*/
function formatEventWithEvidence(eventItem, idx, evidenceUnits, causalById) {
const ev = eventItem.event || {};
const time = ev.timeLabel || "";
const title = String(ev.title || "").trim();
const people = (ev.participants || []).join(" / ").trim();
const summary = cleanSummary(ev.summary);
const displayTitle = title || people || ev.id || "事件";
const header = time ? `${idx}.【${time}${displayTitle}` : `${idx}. ${displayTitle}`;
const lines = [header];
if (people && displayTitle !== people) lines.push(` ${people}`);
lines.push(` ${summary}`);
// 因果链
for (const cid of ev.causedBy || []) {
const c = causalById?.get(cid);
if (c) lines.push(formatCausalEventLine(c));
}
// L0→L1 证据单元
for (const unit of evidenceUnits) {
lines.push(...formatEvidenceUnit(unit));
}
return lines.join("\n");
}
// ─────────────────────────────────────────────────────────────────────────────
@@ -581,19 +556,22 @@ export function buildNonVectorPromptText() {
* 构建向量模式注入文本
* @param {object} store - 存储对象
* @param {object} recallResult - 召回结果
* @param {Map} causalById - 因果事件索引
* @param {Map<string, object>} causalById - 因果事件索引
* @param {string[]} focusEntities - 焦点实体
* @param {object} meta - 元数据
* @param {object} metrics - 指标对象
* @returns {Promise<{promptText: string, injectionLogText: string, injectionStats: object, metrics: object}>}
* @returns {Promise<{promptText: string, injectionStats: object, metrics: object}>}
*/
async function buildVectorPrompt(store, recallResult, causalById, focusEntities = [], meta = null, metrics = null) {
async function buildVectorPrompt(store, recallResult, causalById, focusEntities, meta, metrics) {
const T_Start = performance.now();
const { chatId } = getContext();
const data = store.json || {};
const total = { used: 0, max: MAIN_BUDGET_MAX };
// 从 recallResult 解构
const l0Selected = recallResult?.l0Selected || [];
const l1ByFloor = recallResult?.l1ByFloor || new Map();
// 装配结果
const assembled = {
constraints: { lines: [], tokens: 0 },
@@ -610,15 +588,9 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
constraint: { count: 0, tokens: 0, filtered: 0 },
arc: { count: 0, tokens: 0 },
event: { selected: 0, tokens: 0 },
evidence: { attached: 0, tokens: 0 },
distantEvidence: { injected: 0, tokens: 0, anchorCount: 0, contextPairs: 0 },
};
const recentEvidenceStats = {
injected: 0,
tokens: 0,
floorRange: "N/A",
contextPairs: 0,
evidence: { l0InEvents: 0, l1InEvents: 0, tokens: 0 },
distantEvidence: { units: 0, tokens: 0 },
recentEvidence: { units: 0, tokens: 0 },
};
const eventDetails = {
@@ -627,6 +599,9 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
relatedCount: 0,
};
// 已消费的 L0 ID 集合事件区域消费后evidence 区域不再重复)
const usedL0Ids = new Set();
// ═══════════════════════════════════════════════════════════════════════
// [Constraints] L3 Facts → 世界约束
// ═══════════════════════════════════════════════════════════════════════
@@ -698,70 +673,9 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
}
// ═══════════════════════════════════════════════════════════════════════
// [Events] L2 Events → 直接命中 + 相似命中 + 因果链
// [Events] L2 Events → 直接命中 + 相似命中 + 因果链 + L0→L1 证据
// ═══════════════════════════════════════════════════════════════════════
const eventHits = (recallResult?.events || []).filter(e => e?.event?.summary);
const evidenceChunks = recallResult?.evidenceChunks || [];
const usedChunkIds = new Set();
/**
* 为事件选择最佳证据 chunk
* @param {object} eventObj - 事件对象
* @returns {object|null} 最佳 chunk
*/
function pickBestEvidenceForEvent(eventObj) {
const range = parseFloorRange(eventObj?.summary);
if (!range) return null;
let best = null;
for (const c of evidenceChunks) {
if (usedChunkIds.has(c.chunkId)) continue;
if (c.floor < range.start || c.floor > range.end) continue;
if (!best) {
best = c;
} else if (c.isAnchorVirtual && !best.isAnchorVirtual) {
best = c;
} else if (c.isAnchorVirtual === best.isAnchorVirtual && (c.chunkIdx ?? 0) < (best.chunkIdx ?? 0)) {
best = c;
}
}
return best;
}
/**
* 格式化事件带证据
* @param {object} eventItem - 事件项
* @param {number} idx - 编号
* @param {object} chunk - 证据 chunk
* @returns {string} 格式化后的文本
*/
function formatEventWithEvidence(eventItem, idx, chunk) {
const ev = eventItem.event || {};
const time = ev.timeLabel || "";
const title = String(ev.title || "").trim();
const people = (ev.participants || []).join(" / ").trim();
const summary = cleanSummary(ev.summary);
const displayTitle = title || people || ev.id || "事件";
const header = time ? `${idx}.【${time}${displayTitle}` : `${idx}. ${displayTitle}`;
const lines = [header];
if (people && displayTitle !== people) lines.push(` ${people}`);
lines.push(` ${summary}`);
for (const cid of ev.causedBy || []) {
const c = causalById?.get(cid);
if (c) lines.push(formatCausalEventLine(c, causalById));
}
if (chunk) {
lines.push(` ${formatEvidenceFullLine(chunk)}`);
}
return lines.join("\n");
}
const candidates = [...eventHits].sort((a, b) => (b.similarity || 0) - (a.similarity || 0));
@@ -775,52 +689,91 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
const isDirect = e._recallType === "DIRECT";
const bestChunk = pickBestEvidenceForEvent(e.event);
// 收集该事件范围内的 L0→L1 证据单元
const evidenceUnits = collectEvidenceForEvent(e.event, l0Selected, l1ByFloor, usedL0Ids);
let text = formatEventWithEvidence(e, 0, bestChunk);
let cost = estimateTokens(text);
let hasEvidence = !!bestChunk;
let chosenChunk = bestChunk || null;
// 格式化事件(含证据)
const text = formatEventWithEvidence(e, 0, evidenceUnits, causalById);
const cost = estimateTokens(text);
// 预算检查:整个事件(含证据)作为原子单元
if (total.used + cost > total.max) {
text = formatEventWithEvidence(e, 0, null);
cost = estimateTokens(text);
hasEvidence = false;
chosenChunk = null;
// 尝试不带证据的版本
const textNoEvidence = formatEventWithEvidence(e, 0, [], causalById);
const costNoEvidence = estimateTokens(textNoEvidence);
if (total.used + cost > total.max) {
if (total.used + costNoEvidence > total.max) {
continue;
}
// 放入不带证据的版本,归还已消费的 L0 ID
for (const unit of evidenceUnits) {
usedL0Ids.delete(unit.l0.id);
}
if (isDirect) {
selectedDirect.push({
event: e.event, text: textNoEvidence, tokens: costNoEvidence,
evidenceUnits: [], candidateRank,
});
} else {
selectedRelated.push({
event: e.event, text: textNoEvidence, tokens: costNoEvidence,
evidenceUnits: [], candidateRank,
});
}
injectionStats.event.selected++;
injectionStats.event.tokens += costNoEvidence;
total.used += costNoEvidence;
eventDetails.list.push({
title: e.event?.title || e.event?.id,
isDirect,
hasEvidence: false,
tokens: costNoEvidence,
similarity: e.similarity || 0,
l0Count: 0,
l1Count: 0,
});
continue;
}
// 预算充足,放入完整版本
const l0Count = evidenceUnits.length;
let l1Count = 0;
for (const unit of evidenceUnits) {
if (unit.userL1) l1Count++;
if (unit.aiL1) l1Count++;
}
if (isDirect) {
selectedDirect.push({ event: e.event, text, tokens: cost, chunk: chosenChunk, hasEvidence, candidateRank });
selectedDirect.push({
event: e.event, text, tokens: cost,
evidenceUnits, candidateRank,
});
} else {
selectedRelated.push({ event: e.event, text, tokens: cost, chunk: chosenChunk, hasEvidence, candidateRank });
selectedRelated.push({
event: e.event, text, tokens: cost,
evidenceUnits, candidateRank,
});
}
injectionStats.event.selected++;
injectionStats.event.tokens += cost;
injectionStats.evidence.l0InEvents += l0Count;
injectionStats.evidence.l1InEvents += l1Count;
total.used += cost;
if (hasEvidence && bestChunk) {
const chunkLine = formatEvidenceFullLine(bestChunk);
const ct = estimateTokens(chunkLine);
injectionStats.evidence.attached++;
injectionStats.evidence.tokens += ct;
usedChunkIds.add(bestChunk.chunkId);
injectionStats.event.tokens += Math.max(0, cost - ct);
} else {
injectionStats.event.tokens += cost;
}
eventDetails.list.push({
title: e.event?.title || e.event?.id,
isDirect,
hasEvidence,
hasEvidence: l0Count > 0,
tokens: cost,
similarity: e.similarity || 0,
hasAnchorEvidence: bestChunk?.isAnchorVirtual || false,
l0Count,
l1Count,
});
}
@@ -845,110 +798,81 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
assembled.relatedEvents.lines = relatedEventTexts;
// ═══════════════════════════════════════════════════════════════════════
// [Evidence - Distant] L1 Chunks → 远期证据(已总结范围)
// [Evidence - Distant] 远期证据(已总结范围,未被事件消费的 L0→L1
// ═══════════════════════════════════════════════════════════════════════
const lastSummarized = store.lastSummarizedMesId ?? -1;
const lastChunkFloor = meta?.lastChunkFloor ?? -1;
const keepVisible = store.keepVisibleCount ?? 3;
const distantContextFloors = new Set();
const distantCandidates = evidenceChunks
.filter(c => !usedChunkIds.has(c.chunkId))
.filter(c => c.floor <= lastSummarized);
// 收集未被事件消费的 L0按 rerankScore 降序
const remainingL0 = l0Selected
.filter(l0 => !usedL0Ids.has(l0.id))
.sort((a, b) => (b.rerankScore || 0) - (a.rerankScore || 0));
for (const c of distantCandidates) {
if (c.isAnchorVirtual) continue;
const pairFloor = getContextFloor(c);
if (pairFloor >= 0) distantContextFloors.add(pairFloor);
}
// 远期floor <= lastSummarized
const distantL0 = remainingL0.filter(l0 => l0.floor <= lastSummarized);
let contextChunksByFloor = new Map();
if (chatId && distantContextFloors.size > 0) {
try {
const contextChunks = await getChunksByFloors(chatId, Array.from(distantContextFloors));
for (const pc of contextChunks) {
if (!contextChunksByFloor.has(pc.floor)) {
contextChunksByFloor.set(pc.floor, []);
}
contextChunksByFloor.get(pc.floor).push(pc);
}
} catch (e) {
xbLog.warn(MODULE_ID, "获取配对chunks失败", e);
}
}
if (distantCandidates.length && total.used < total.max) {
if (distantL0.length && total.used < total.max) {
const distantBudget = { used: 0, max: Math.min(DISTANT_EVIDENCE_MAX, total.max - total.used) };
const result = assembleEvidenceByFloor(
distantCandidates.sort((a, b) => (a.floor - b.floor) || ((a.chunkIdx ?? 0) - (b.chunkIdx ?? 0))),
contextChunksByFloor,
distantBudget
);
// 按楼层排序(时间顺序)
distantL0.sort((a, b) => a.floor - b.floor);
for (const l0 of distantL0) {
const unit = buildEvidenceUnit(l0, l1ByFloor);
// 原子单元预算检查
if (distantBudget.used + unit.totalTokens > distantBudget.max) continue;
const unitLines = formatEvidenceUnit(unit);
for (const line of unitLines) {
assembled.distantEvidence.lines.push(line);
}
distantBudget.used += unit.totalTokens;
usedL0Ids.add(l0.id);
injectionStats.distantEvidence.units++;
}
assembled.distantEvidence.lines = result.lines;
assembled.distantEvidence.tokens = distantBudget.used;
total.used += distantBudget.used;
injectionStats.distantEvidence.injected = result.lines.length;
injectionStats.distantEvidence.tokens = distantBudget.used;
injectionStats.distantEvidence.anchorCount = result.anchorCount;
injectionStats.distantEvidence.contextPairs = result.contextPairsCount;
}
// ═══════════════════════════════════════════════════════════════════════
// [Evidence - Recent] L1 Chunks → 近期证据(未总结范围,独立预算)
// [Evidence - Recent] 近期证据(未总结范围,独立预算)
// ═══════════════════════════════════════════════════════════════════════
const recentStart = lastSummarized + 1;
const recentEnd = lastChunkFloor - keepVisible;
if (evidenceChunks.length && recentEnd >= recentStart) {
const recentCandidates = evidenceChunks
.filter(c => !usedChunkIds.has(c.chunkId))
.filter(c => c.floor >= recentStart && c.floor <= recentEnd);
if (recentEnd >= recentStart) {
const recentL0 = remainingL0
.filter(l0 => !usedL0Ids.has(l0.id))
.filter(l0 => l0.floor >= recentStart && l0.floor <= recentEnd);
const recentContextFloors = new Set();
for (const c of recentCandidates) {
if (c.isAnchorVirtual) continue;
const pairFloor = getContextFloor(c);
if (pairFloor >= 0) recentContextFloors.add(pairFloor);
}
if (chatId && recentContextFloors.size > 0) {
const newFloors = Array.from(recentContextFloors).filter(f => !contextChunksByFloor.has(f));
if (newFloors.length > 0) {
try {
const newContextChunks = await getChunksByFloors(chatId, newFloors);
for (const pc of newContextChunks) {
if (!contextChunksByFloor.has(pc.floor)) {
contextChunksByFloor.set(pc.floor, []);
}
contextChunksByFloor.get(pc.floor).push(pc);
}
} catch (e) {
xbLog.warn(MODULE_ID, "获取近期配对chunks失败", e);
}
}
}
if (recentCandidates.length) {
if (recentL0.length) {
const recentBudget = { used: 0, max: RECENT_EVIDENCE_MAX };
const result = assembleEvidenceByFloor(
recentCandidates.sort((a, b) => (a.floor - b.floor) || ((a.chunkIdx ?? 0) - (b.chunkIdx ?? 0))),
contextChunksByFloor,
recentBudget
);
// 按楼层排序(时间顺序)
recentL0.sort((a, b) => a.floor - b.floor);
for (const l0 of recentL0) {
const unit = buildEvidenceUnit(l0, l1ByFloor);
if (recentBudget.used + unit.totalTokens > recentBudget.max) continue;
const unitLines = formatEvidenceUnit(unit);
for (const line of unitLines) {
assembled.recentEvidence.lines.push(line);
}
recentBudget.used += unit.totalTokens;
usedL0Ids.add(l0.id);
injectionStats.recentEvidence.units++;
}
assembled.recentEvidence.lines = result.lines;
assembled.recentEvidence.tokens = recentBudget.used;
recentEvidenceStats.injected = result.lines.length;
recentEvidenceStats.tokens = recentBudget.used;
recentEvidenceStats.floorRange = `${recentStart + 1}~${recentEnd + 1}`;
recentEvidenceStats.contextPairs = result.contextPairsCount;
injectionStats.recentEvidence.tokens = recentBudget.used;
}
}
@@ -984,7 +908,7 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
metrics.timing.evidenceAssembly = Math.round(performance.now() - T_Start - (metrics.timing.constraintFilter || 0));
metrics.timing.formatting = 0;
}
return { promptText: "", injectionLogText: "", injectionStats, metrics };
return { promptText: "", injectionStats, metrics };
}
const promptText =
@@ -1009,15 +933,16 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
metrics.budget.utilization = Math.round(metrics.budget.total / TOTAL_BUDGET_MAX * 100);
metrics.budget.breakdown = {
constraints: assembled.constraints.tokens,
events: injectionStats.event.tokens + injectionStats.evidence.tokens,
events: injectionStats.event.tokens,
distantEvidence: injectionStats.distantEvidence.tokens,
recentEvidence: recentEvidenceStats.tokens || 0,
recentEvidence: injectionStats.recentEvidence.tokens,
arcs: assembled.arcs.tokens,
};
metrics.evidence.tokens = injectionStats.distantEvidence.tokens + (recentEvidenceStats.tokens || 0);
metrics.evidence.contextPairsAdded = injectionStats.distantEvidence.contextPairs + recentEvidenceStats.contextPairs;
metrics.evidence.assemblyTime = Math.round(performance.now() - T_Start - (metrics.timing.constraintFilter || 0) - metrics.formatting.time);
metrics.evidence.tokens = injectionStats.distantEvidence.tokens + injectionStats.recentEvidence.tokens;
metrics.evidence.assemblyTime = Math.round(
performance.now() - T_Start - (metrics.timing.constraintFilter || 0) - metrics.formatting.time
);
metrics.timing.evidenceAssembly = metrics.evidence.assemblyTime;
const totalFacts = allFacts.length;
@@ -1026,76 +951,19 @@ async function buildVectorPrompt(store, recallResult, causalById, focusEntities
: 100;
metrics.quality.eventPrecisionProxy = metrics.event?.similarityDistribution?.mean || 0;
const totalSelected = metrics.evidence.selected || 0;
const attached = injectionStats.evidence.attached;
metrics.quality.evidenceDensity = totalSelected > 0
? Math.round(attached / totalSelected * 100)
: 0;
const selectedReal = metrics.evidence.selectedByType?.chunkReal || 0;
const selectedTotal = metrics.evidence.selected || 0;
metrics.quality.chunkRealRatio = selectedTotal > 0
? Math.round(selectedReal / selectedTotal * 100)
const totalL0Selected = l0Selected.length;
const l0WithL1 = l0Selected.filter(l0 => {
const pair = l1ByFloor.get(l0.floor);
return pair?.aiTop1 || pair?.userTop1;
}).length;
metrics.quality.l1AttachRate = totalL0Selected > 0
? Math.round(l0WithL1 / totalL0Selected * 100)
: 0;
metrics.quality.potentialIssues = detectIssues(metrics);
}
return { promptText, injectionLogText: "", injectionStats, metrics };
}
// ─────────────────────────────────────────────────────────────────────────────
// 因果证据补充
// ─────────────────────────────────────────────────────────────────────────────
/**
* 为因果事件附加证据
* @param {object[]} causalChain - 因果链
* @param {Map} eventVectorMap - 事件向量索引
* @param {Map} chunkVectorMap - chunk 向量索引
* @param {Map} chunksMap - chunks 索引
*/
async function attachEvidenceToCausalEvents(causalChain, eventVectorMap, chunkVectorMap, chunksMap) {
for (const c of causalChain) {
c._evidenceChunk = null;
const ev = c.event;
if (!ev?.id) continue;
const evVec = eventVectorMap.get(ev.id);
if (!evVec?.length) continue;
const range = parseFloorRange(ev.summary);
if (!range) continue;
const candidateChunks = [];
for (const [chunkId, chunk] of chunksMap) {
if (chunk.floor >= range.start && chunk.floor <= range.end) {
const vec = chunkVectorMap.get(chunkId);
if (vec?.length) candidateChunks.push({ chunk, vec });
}
}
if (!candidateChunks.length) continue;
let best = null;
let bestSim = -1;
for (const { chunk, vec } of candidateChunks) {
const sim = cosineSimilarity(evVec, vec);
if (sim > bestSim) {
bestSim = sim;
best = chunk;
}
}
if (best && bestSim > 0.3) {
c._evidenceChunk = {
floor: best.floor,
speaker: best.speaker,
text: best.text,
similarity: bestSim,
};
}
}
return { promptText, injectionStats, metrics };
}
// ─────────────────────────────────────────────────────────────────────────────
@@ -1150,43 +1018,16 @@ export async function buildVectorPromptText(excludeLastAi = false, hooks = {}) {
recallResult = {
...recallResult,
events: recallResult?.events || [],
evidenceChunks: recallResult?.evidenceChunks || [],
l0Selected: recallResult?.l0Selected || [],
l1ByFloor: recallResult?.l1ByFloor || new Map(),
causalChain: recallResult?.causalChain || [],
focusEntities: recallResult?.focusEntities || [],
logText: recallResult?.logText || "",
metrics: recallResult?.metrics || null,
};
const causalChain = recallResult.causalChain || [];
if (causalChain.length > 0) {
if (chatId) {
try {
const floors = new Set();
for (const c of causalChain) {
const r = parseFloorRange(c.event?.summary);
if (!r) continue;
for (let f = r.start; f <= r.end; f++) floors.add(f);
}
const [chunksList, chunkVecs, eventVecs] = await Promise.all([
getChunksByFloors(chatId, Array.from(floors)),
getAllChunkVectors(chatId),
getAllEventVectors(chatId),
]);
const chunksMap = new Map(chunksList.map(c => [c.chunkId, c]));
const chunkVectorMap = new Map(chunkVecs.map(v => [v.chunkId, v.vector]));
const eventVectorMap = new Map(eventVecs.map(v => [v.eventId, v.vector]));
await attachEvidenceToCausalEvents(causalChain, eventVectorMap, chunkVectorMap, chunksMap);
} catch (e) {
xbLog.warn(MODULE_ID, "Causal evidence attachment failed", e);
}
}
}
// 构建因果事件索引
causalById = new Map(
recallResult.causalChain
(recallResult.causalChain || [])
.map(c => [c?.event?.id, c])
.filter(x => x[0])
);
@@ -1210,7 +1051,7 @@ export async function buildVectorPromptText(excludeLastAi = false, hooks = {}) {
const hasUseful =
(recallResult?.events?.length || 0) > 0 ||
(recallResult?.evidenceChunks?.length || 0) > 0 ||
(recallResult?.l0Selected?.length || 0) > 0 ||
(recallResult?.causalChain?.length || 0) > 0;
if (!hasUseful) {

View File

@@ -1,9 +1,16 @@
// ═══════════════════════════════════════════════════════════════════════════
// Story Summary - Metrics Collector (v3 - Deterministic Query + Hybrid + W-RRF)
// Story Summary - Metrics Collector (v4 - Two-Stage: L0 Locate → L1 Evidence)
//
// 命名规范:
// - 存储层用 L0/L1/L2/L3StateAtom/Chunk/Event/Fact
// - 指标层用语义名称anchor/evidence/event/constraint/arc
//
// 架构变更v3 → v4
// - evidence 区块反映 L0-only 融合 + L1 按楼层拉取的两阶段架构
// - 删除 mergedByType / selectedByType不再有混合池
// - 新增 l0Candidates / l0Selected / l1Pulled / l1Attached / l1CosineTime
// - fusion 区块明确标注 L0-only删除 anchorCount
// - quality.chunkRealRatio → quality.l1AttachRate
// ═══════════════════════════════════════════════════════════════════════════
/**
@@ -41,11 +48,10 @@ export function createMetrics() {
searchTime: 0,
},
// Fusion (W-RRF) - 多路融合
// Fusion (W-RRF, L0-only) - 多路融合
fusion: {
denseCount: 0,
lexCount: 0,
anchorCount: 0,
totalUnique: 0,
afterCap: 0,
time: 0,
@@ -74,23 +80,26 @@ export function createMetrics() {
entityNames: [],
},
// Evidence (L1 Chunks) - 原文证据
// Evidence (Two-Stage: L0 rerank → L1 pull) - 原文证据
evidence: {
floorsFromAnchors: 0,
chunkTotal: 0,
denseCoarse: 0,
merged: 0,
mergedByType: { anchorVirtual: 0, chunkReal: 0 },
selected: 0,
selectedByType: { anchorVirtual: 0, chunkReal: 0 },
contextPairsAdded: 0,
tokens: 0,
assemblyTime: 0,
// Stage 1: L0
l0Candidates: 0, // W-RRF 融合后的 L0 候选数
l0Selected: 0, // rerank 后选中的 L0 数
rerankApplied: false,
beforeRerank: 0,
afterRerank: 0,
rerankTime: 0,
rerankScores: null,
// Stage 2: L1
l1Pulled: 0, // 从 DB 拉取的 L1 chunk 总数
l1Attached: 0, // 实际挂载的 L1 数top-1 × 楼层 × 2侧
l1CosineTime: 0, // L1 cosine 打分耗时
// 装配
contextPairsAdded: 0, // 保留兼容(= l1Attached 中 USER 侧数量)
tokens: 0,
assemblyTime: 0,
},
// Arc - 人物弧光
@@ -139,8 +148,7 @@ export function createMetrics() {
quality: {
constraintCoverage: 100,
eventPrecisionProxy: 0,
evidenceDensity: 0,
chunkRealRatio: 0,
l1AttachRate: 0, // 有 L1 挂载的 L0 占比
potentialIssues: [],
},
};
@@ -178,7 +186,7 @@ export function formatMetricsLog(metrics) {
lines.push('');
lines.push('════════════════════════════════════════');
lines.push(' Recall Metrics Report ');
lines.push(' Recall Metrics Report (v4) ');
lines.push('════════════════════════════════════════');
lines.push('');
@@ -214,11 +222,10 @@ export function formatMetricsLog(metrics) {
lines.push(`└─ search_time: ${m.lexical.searchTime}ms`);
lines.push('');
// Fusion (W-RRF)
lines.push('[Fusion] W-RRF - 多路融合');
// Fusion (W-RRF, L0-only)
lines.push('[Fusion] W-RRF (L0-only) - 多路融合');
lines.push(`├─ dense_count: ${m.fusion.denseCount}`);
lines.push(`├─ lex_count: ${m.fusion.lexCount}`);
lines.push(`├─ anchor_count: ${m.fusion.anchorCount}`);
lines.push(`├─ total_unique: ${m.fusion.totalUnique}`);
lines.push(`├─ after_cap: ${m.fusion.afterCap}`);
lines.push(`└─ time: ${m.fusion.time}ms`);
@@ -269,43 +276,29 @@ export function formatMetricsLog(metrics) {
lines.push(`└─ entities_used: ${m.event.entitiesUsed} [${(m.event.entityNames || []).join(', ')}]`);
lines.push('');
// Evidence (L1 Chunks)
lines.push('[Evidence] L1 Chunks - 原文证据');
lines.push(`├─ floors_from_anchors: ${m.evidence.floorsFromAnchors}`);
if (m.evidence.chunkTotal > 0) {
lines.push(`├─ chunk_total: ${m.evidence.chunkTotal}`);
lines.push(`├─ dense_coarse: ${m.evidence.denseCoarse}`);
}
lines.push(`├─ merged: ${m.evidence.merged}`);
if (m.evidence.mergedByType) {
const mt = m.evidence.mergedByType;
lines.push(`│ ├─ anchor_virtual: ${mt.anchorVirtual || 0}`);
lines.push(`│ └─ chunk_real: ${mt.chunkReal || 0}`);
}
// Evidence (Two-Stage)
lines.push('[Evidence] Two-Stage: L0 Locate → L1 Pull');
lines.push(`├─ Stage 1 (L0):`);
lines.push(`│ ├─ candidates (post-fusion): ${m.evidence.l0Candidates}`);
if (m.evidence.rerankApplied) {
lines.push(`├─ rerank_applied: true`);
lines.push(`│ ├─ before: ${m.evidence.beforeRerank}`);
lines.push(`│ ├─ after: ${m.evidence.afterRerank}`);
lines.push(`│ └─ time: ${m.evidence.rerankTime}ms`);
lines.push(`├─ rerank_applied: true`);
lines.push(` ├─ before: ${m.evidence.beforeRerank}`);
lines.push(` ├─ after: ${m.evidence.afterRerank}`);
lines.push(` └─ time: ${m.evidence.rerankTime}ms`);
if (m.evidence.rerankScores) {
const rs = m.evidence.rerankScores;
lines.push(`├─ rerank_scores: min=${rs.min}, max=${rs.max}, mean=${rs.mean}`);
lines.push(`├─ rerank_scores: min=${rs.min}, max=${rs.max}, mean=${rs.mean}`);
}
} else {
lines.push(`├─ rerank_applied: false`);
lines.push(`├─ rerank_applied: false`);
}
lines.push(`─ selected: ${m.evidence.selected}`);
if (m.evidence.selectedByType) {
const st = m.evidence.selectedByType;
lines.push(`│ ├─ anchor_virtual: ${st.anchorVirtual || 0}`);
lines.push(`│ └─ chunk_real: ${st.chunkReal || 0}`);
}
lines.push(`├─ context_pairs_added: ${m.evidence.contextPairsAdded}`);
lines.push(`│ └─ selected: ${m.evidence.l0Selected}`);
lines.push(`├─ Stage 2 (L1):`);
lines.push(`│ ├─ pulled: ${m.evidence.l1Pulled}`);
lines.push(`│ ├─ attached: ${m.evidence.l1Attached}`);
lines.push(`│ └─ cosine_time: ${m.evidence.l1CosineTime}ms`);
lines.push(`├─ tokens: ${m.evidence.tokens}`);
lines.push(`└─ assembly_time: ${m.evidence.assemblyTime}ms`);
lines.push('');
@@ -351,6 +344,7 @@ export function formatMetricsLog(metrics) {
if (m.timing.evidenceRerank > 0) {
lines.push(`├─ evidence_rerank: ${m.timing.evidenceRerank}ms`);
}
lines.push(`├─ l1_cosine: ${m.evidence.l1CosineTime}ms`);
lines.push(`├─ evidence_assembly: ${m.timing.evidenceAssembly}ms`);
lines.push(`├─ formatting: ${m.timing.formatting}ms`);
lines.push(`└─ total: ${m.timing.total}ms`);
@@ -360,8 +354,7 @@ export function formatMetricsLog(metrics) {
lines.push('[Quality] 质量指标');
lines.push(`├─ constraint_coverage: ${m.quality.constraintCoverage}%`);
lines.push(`├─ event_precision_proxy: ${m.quality.eventPrecisionProxy}`);
lines.push(`├─ evidence_density: ${m.quality.evidenceDensity}%`);
lines.push(`├─ chunk_real_ratio: ${m.quality.chunkRealRatio}%`);
lines.push(`├─ l1_attach_rate: ${m.quality.l1AttachRate}%`);
if (m.quality.potentialIssues && m.quality.potentialIssues.length > 0) {
lines.push(`└─ potential_issues:`);
@@ -414,15 +407,15 @@ export function detectIssues(metrics) {
}
// ─────────────────────────────────────────────────────────────────
// 融合问题
// 融合问题L0-only
// ─────────────────────────────────────────────────────────────────
if (m.fusion.lexCount === 0 && m.fusion.denseCount > 0) {
issues.push('No lexical candidates in fusion - hybrid retrieval not contributing');
issues.push('No lexical L0 candidates in fusion - hybrid retrieval not contributing');
}
if (m.fusion.afterCap === 0) {
issues.push('Fusion produced zero candidates - all retrieval paths may have failed');
issues.push('Fusion produced zero L0 candidates - all retrieval paths may have failed');
}
// ─────────────────────────────────────────────────────────────────
@@ -430,7 +423,6 @@ export function detectIssues(metrics) {
// ─────────────────────────────────────────────────────────────────
if (m.event.considered > 0) {
// 只统计 Dense 路选中direct + relatedLexical 是额外补充不计入
const denseSelected =
(m.event.byRecallType?.direct || 0) +
(m.event.byRecallType?.related || 0);
@@ -467,50 +459,47 @@ export function detectIssues(metrics) {
}
// ─────────────────────────────────────────────────────────────────
// 证据问题
// L0 Rerank 问题
// ─────────────────────────────────────────────────────────────────
// Dense 粗筛比例
if (m.evidence.chunkTotal > 0 && m.evidence.denseCoarse > 0) {
const coarseFilterRatio = 1 - (m.evidence.denseCoarse / m.evidence.chunkTotal);
if (coarseFilterRatio > 0.95) {
issues.push(`Very high dense coarse filter ratio (${(coarseFilterRatio * 100).toFixed(0)}%) - query vector may be poorly aligned`);
}
}
// Rerank 相关问题
if (m.evidence.rerankApplied) {
if (m.evidence.beforeRerank > 0 && m.evidence.afterRerank > 0) {
const filterRatio = 1 - (m.evidence.afterRerank / m.evidence.beforeRerank);
if (filterRatio > 0.7) {
issues.push(`High rerank filter ratio (${(filterRatio * 100).toFixed(0)}%) - many irrelevant chunks in fusion output`);
issues.push(`High L0 rerank filter ratio (${(filterRatio * 100).toFixed(0)}%) - many irrelevant L0 in fusion output`);
}
}
if (m.evidence.rerankScores) {
const rs = m.evidence.rerankScores;
if (rs.max < 0.5) {
issues.push(`Low rerank scores (max=${rs.max}) - query may be poorly matched`);
issues.push(`Low L0 rerank scores (max=${rs.max}) - query may be poorly matched`);
}
if (rs.mean < 0.3) {
issues.push(`Very low average rerank score (mean=${rs.mean}) - context may be weak`);
issues.push(`Very low average L0 rerank score (mean=${rs.mean}) - context may be weak`);
}
}
if (m.evidence.rerankTime > 2000) {
issues.push(`Slow rerank (${m.evidence.rerankTime}ms) - may affect response time`);
issues.push(`Slow L0 rerank (${m.evidence.rerankTime}ms) - may affect response time`);
}
}
// chunk_real 比例(核心质量指标)
if (m.evidence.selected > 0 && m.evidence.selectedByType) {
const chunkReal = m.evidence.selectedByType.chunkReal || 0;
const ratio = chunkReal / m.evidence.selected;
if (ratio === 0 && m.evidence.selected > 5) {
issues.push('Zero real chunks in selected evidence - only anchor virtual chunks present');
} else if (ratio < 0.2 && m.evidence.selected > 10) {
issues.push(`Low real chunk ratio (${(ratio * 100).toFixed(0)}%) - may lack concrete dialogue evidence`);
}
// ─────────────────────────────────────────────────────────────────
// L1 挂载问题
// ─────────────────────────────────────────────────────────────────
if (m.evidence.l0Selected > 0 && m.evidence.l1Pulled === 0) {
issues.push('Zero L1 chunks pulled - L1 vectors may not exist or DB read failed');
}
if (m.evidence.l0Selected > 0 && m.evidence.l1Attached === 0 && m.evidence.l1Pulled > 0) {
issues.push('L1 chunks pulled but none attached - cosine scores may be too low or floor mismatch');
}
const l1AttachRate = m.quality.l1AttachRate || 0;
if (m.evidence.l0Selected > 5 && l1AttachRate < 20) {
issues.push(`Low L1 attach rate (${l1AttachRate}%) - many L0 lack concrete dialogue evidence`);
}
// ─────────────────────────────────────────────────────────────────
@@ -533,5 +522,9 @@ export function detectIssues(metrics) {
issues.push(`Slow query build (${m.query.buildTime}ms) - entity lexicon may be too large`);
}
if (m.evidence.l1CosineTime > 1000) {
issues.push(`Slow L1 cosine scoring (${m.evidence.l1CosineTime}ms) - too many chunks pulled`);
}
return issues;
}

View File

@@ -1,5 +1,5 @@
// ═══════════════════════════════════════════════════════════════════════════
// Story Summary - Recall Engine (v6 - Deterministic Query + Hybrid + W-RRF)
// Story Summary - Recall Engine (v7 - Two-Stage: L0 Locate → L1 Evidence)
//
// 命名规范:
// - 存储层用 L0/L1/L2/L3StateAtom/Chunk/Event/Fact
@@ -7,12 +7,13 @@
//
// 架构:
// 阶段 1: Query Build确定性无 LLM
// 阶段 2: Round 1 Dense Retrieval
// 阶段 2: Round 1 Dense RetrievalL0 + L2
// 阶段 3: Query Refinement用已命中记忆增强
// 阶段 4: Round 2 Dense Retrieval
// 阶段 5: Lexical Retrieval + Merge
// 阶段 6: Evidence Pull + W-RRF Fusion + Cap100 + Rerank
// 阶段 7: Causation Trace
// 阶段 4: Round 2 Dense RetrievalL0 + L2
// 阶段 5: Lexical Retrieval + L0 Merge
// 阶段 6: L0-only W-RRF Fusion + Rerank ‖ 并发 L1 Cosine 预筛选
// 阶段 7: L1 配对组装L0 → top-1 AI L1 + top-1 USER L1
// 阶段 8: Causation Trace
// ═══════════════════════════════════════════════════════════════════════════
import { getAllEventVectors, getChunksByFloors, getMeta, getChunkVectorsByIds } from '../storage/chunk-store.js';
@@ -38,23 +39,19 @@ const CONFIG = {
// Anchor (L0 StateAtoms)
ANCHOR_MIN_SIMILARITY: 0.58,
// Evidence (L1 Chunks) Dense 粗筛
EVIDENCE_DENSE_COARSE_MAX: 200,
// Event (L2 Events)
EVENT_CANDIDATE_MAX: 100,
EVENT_SELECT_MAX: 50,
EVENT_MIN_SIMILARITY: 0.55,
EVENT_MMR_LAMBDA: 0.72,
// W-RRF 融合
// W-RRF 融合L0-only
RRF_K: 60,
RRF_W_DENSE: 1.0,
RRF_W_LEX: 0.9,
RRF_W_ANCHOR: 0.7,
FUSION_CAP: 100,
// Rerank
// RerankL0-only
RERANK_TOP_N: 50,
RERANK_MIN_SCORE: 0.15,
@@ -228,7 +225,7 @@ async function recallAnchors(queryVector, vectorConfig, metrics) {
}
// ═══════════════════════════════════════════════════════════════════════════
// [Events] L2 Events 检索(无 entity bonus
// [Events] L2 Events 检索
// ═══════════════════════════════════════════════════════════════════════════
/**
@@ -414,35 +411,32 @@ function traceCausation(eventHits, eventIndex, maxDepth = CONFIG.CAUSAL_CHAIN_MA
}
// ═══════════════════════════════════════════════════════════════════════════
// [W-RRF] 加权倒数排名融合
// [W-RRF] 加权倒数排名融合L0-only
// ═══════════════════════════════════════════════════════════════════════════
/**
* @typedef {object} RankedItem
* @property {string} chunkId - chunk 的唯一标识符
* @property {number} score - 该路的原始分数(用于日志,不参与 RRF 计算)
* @property {string} id - 唯一标识符
* @property {number} score - 该路的原始分数
*/
/**
* W-RRF 融合chunk 候选
* W-RRF 融合L0 候选dense + lexical
*
* @param {RankedItem[]} denseRank - Dense 路cosine 降序)
* @param {RankedItem[]} lexRank - Lexical 路MiniSearch score 降序)
* @param {RankedItem[]} anchorRank - Anchor 路anchor similarity 降序)
* @param {number} cap - 输出上限
* @returns {{top: {chunkId: string, fusionScore: number}[], totalUnique: number}}
* @returns {{top: {id: string, fusionScore: number}[], totalUnique: number}}
*/
function fuseChunkCandidates(denseRank, lexRank, anchorRank, cap = CONFIG.FUSION_CAP) {
function fuseL0Candidates(denseRank, lexRank, cap = CONFIG.FUSION_CAP) {
const k = CONFIG.RRF_K;
const wD = CONFIG.RRF_W_DENSE;
const wL = CONFIG.RRF_W_LEX;
const wA = CONFIG.RRF_W_ANCHOR;
// 构建 rank map: chunkId → 0-based rank
const buildRankMap = (ranked) => {
const map = new Map();
for (let i = 0; i < ranked.length; i++) {
const id = ranked[i].chunkId;
const id = ranked[i].id;
if (!map.has(id)) map.set(id, i);
}
return map;
@@ -450,37 +444,26 @@ function fuseChunkCandidates(denseRank, lexRank, anchorRank, cap = CONFIG.FUSION
const denseMap = buildRankMap(denseRank || []);
const lexMap = buildRankMap(lexRank || []);
const anchorMap = buildRankMap(anchorRank || []);
// 收集所有 chunkId去重
const allIds = new Set([
...denseMap.keys(),
...lexMap.keys(),
...anchorMap.keys(),
]);
// ★ 修复 E记录去重后的总数
const totalUnique = allIds.size;
// 计算融合分数
const scored = [];
for (const id of allIds) {
let score = 0;
if (denseMap.has(id)) {
score += wD / (k + denseMap.get(id));
}
if (lexMap.has(id)) {
score += wL / (k + lexMap.get(id));
}
if (anchorMap.has(id)) {
score += wA / (k + anchorMap.get(id));
}
scored.push({ chunkId: id, fusionScore: score });
scored.push({ id, fusionScore: score });
}
// 按融合分数降序,取前 cap 个
scored.sort((a, b) => b.fusionScore - a.fusionScore);
return {
@@ -490,228 +473,169 @@ function fuseChunkCandidates(denseRank, lexRank, anchorRank, cap = CONFIG.FUSION
}
// ═══════════════════════════════════════════════════════════════════════════
// [Evidence] L1 Chunks 拉取 + 融合 + Rerank
// [Stage 6] L0-only 融合 + Rerank ‖ 并发 L1 Cosine 预筛选
// ═══════════════════════════════════════════════════════════════════════════
/**
* 统计 evidence 类型构成
* @param {object[]} chunks
* @returns {{anchorVirtual: number, chunkReal: number}}
*/
function countEvidenceByType(chunks) {
let anchorVirtual = 0;
let chunkReal = 0;
for (const c of chunks || []) {
if (c.isAnchorVirtual) anchorVirtual++;
else chunkReal++;
}
return { anchorVirtual, chunkReal };
}
/**
* 拉取 evidence + W-RRF 融合 + Cap100 + Rerank
* L0 融合 + rerank并发拉取 L1 并 cosine 打分
*
* @param {object[]} anchorHits - L0 命中
* @param {Set<number>} anchorFloors - 锚点命中楼层(含 lexical 扩展)
* @param {number[]} queryVector - 查询向量
* @param {object[]} anchorHits - L0 dense 命中Round 2
* @param {Set<number>} anchorFloors - L0 命中楼层(含 lexical 扩展)
* @param {number[]} queryVector - 查询向量v1
* @param {string} rerankQuery - rerank 查询文本
* @param {object} lexicalResult - 词法检索结果
* @param {object} metrics
* @returns {Promise<object[]>}
* @returns {Promise<{l0Selected: object[], l1ByFloor: Map<number, {aiTop1: object|null, userTop1: object|null}>}>}
*/
async function pullAndFuseEvidence(anchorHits, anchorFloors, queryVector, rerankQuery, lexicalResult, metrics) {
const { chatId } = getContext();
if (!chatId) return [];
async function locateAndPullEvidence(anchorHits, anchorFloors, queryVector, rerankQuery, lexicalResult, metrics) {
const { chatId, chat } = getContext();
if (!chatId) return { l0Selected: [], l1ByFloor: new Map() };
const T_Start = performance.now();
// ─────────────────────────────────────────────────────────────────
// 6a. 构建 Anchor Virtual Chunks来自 L0
// 6a. 构建 L0 候选对象(用于 rerank
//
// 重要:支持 lexical-only 的 L0atom进入候选池。
// 否则 hybrid 会退化为 dense-onlylexical 命中的 atom 若未被 dense 命中会被直接丢弃。
// ─────────────────────────────────────────────────────────────────
const anchorVirtualChunks = (anchorHits || []).map(a => ({
chunkId: `anchor-${a.atomId}`,
floor: a.floor,
chunkIdx: -1,
speaker: '📌',
isUser: false,
text: a.atom?.semantic || '',
similarity: a.similarity,
isAnchorVirtual: true,
_atom: a.atom,
}));
const l0ObjectMap = new Map();
for (const a of (anchorHits || [])) {
const id = `anchor-${a.atomId}`;
l0ObjectMap.set(id, {
id,
atomId: a.atomId,
floor: a.floor,
similarity: a.similarity,
atom: a.atom,
text: a.atom?.semantic || '',
});
}
// ─────────────────────────────────────────────────────────────────
// 6b. 拉取真实 L1 Chunks从 anchorFloors
// ─────────────────────────────────────────────────────────────────
// lexical-only atoms从全量 StateAtoms 补齐similarity 记为 0靠 lex rank 贡献 W-RRF
const lexAtomIds = lexicalResult?.atomIds || [];
if (lexAtomIds.length > 0) {
const atomsList = getStateAtoms();
const atomMap = new Map(atomsList.map(a => [a.atomId, a]));
const floorArray = Array.from(anchorFloors);
let dbChunks = [];
try {
if (floorArray.length > 0) {
dbChunks = await getChunksByFloors(chatId, floorArray);
for (const atomId of lexAtomIds) {
const id = `anchor-${atomId}`;
if (l0ObjectMap.has(id)) continue;
const atom = atomMap.get(atomId);
if (!atom) continue;
if (typeof atom.floor !== 'number' || atom.floor < 0) continue;
l0ObjectMap.set(id, {
id,
atomId,
floor: atom.floor,
similarity: 0,
atom,
text: atom.semantic || '',
});
}
} catch (e) {
xbLog.warn(MODULE_ID, '从 DB 拉取 chunks 失败', e);
}
// ─────────────────────────────────────────────────────────────────
// 6c. Dense 粗筛(对真实 chunks 按 queryVector 排序
// 6b. 构建两路排名L0-only
// ─────────────────────────────────────────────────────────────────
let denseCoarseChunks = [];
if (dbChunks.length > 0 && queryVector?.length) {
const chunkIds = dbChunks.map(c => c.chunkId);
let chunkVectors = [];
try {
chunkVectors = await getChunkVectorsByIds(chatId, chunkIds);
} catch (e) {
xbLog.warn(MODULE_ID, 'L1 向量获取失败', e);
}
const vectorMap = new Map(chunkVectors.map(v => [v.chunkId, v.vector]));
denseCoarseChunks = dbChunks
.map(c => {
const vec = vectorMap.get(c.chunkId);
if (!vec?.length) return null;
return {
...c,
isAnchorVirtual: false,
similarity: cosineSimilarity(queryVector, vec),
};
})
.filter(Boolean)
.sort((a, b) => b.similarity - a.similarity)
.slice(0, CONFIG.EVIDENCE_DENSE_COARSE_MAX);
}
// ─────────────────────────────────────────────────────────────────
// 6d. 构建三路排名
// ─────────────────────────────────────────────────────────────────
// Dense 路anchorVirtual + denseCoarse按 similarity 排序
const denseRank = [
...anchorVirtualChunks.map(c => ({ chunkId: c.chunkId, score: c.similarity })),
...denseCoarseChunks.map(c => ({ chunkId: c.chunkId, score: c.similarity })),
].sort((a, b) => b.score - a.score);
// Lexical 路:从 lexicalResult.chunkScores
const lexRank = (lexicalResult?.chunkScores || [])
.sort((a, b) => b.score - a.score)
.map(cs => ({ chunkId: cs.chunkId, score: cs.score }));
// Anchor 路anchorVirtual 按 similarity 排序
const anchorRank = anchorVirtualChunks
.map(c => ({ chunkId: c.chunkId, score: c.similarity }))
// Dense 路anchorHits 按 similarity 排序
const denseRank = (anchorHits || [])
.map(a => ({ id: `anchor-${a.atomId}`, score: a.similarity }))
.sort((a, b) => b.score - a.score);
// Lexical 路:从 lexicalResult.atomIds 构建排名(允许 lexical-only
// atomIds 已按 MiniSearch score 排序searchLexicalIndex 返回顺序W-RRF 依赖 rankscore 为占位
const lexRank = (lexAtomIds || [])
.map(atomId => ({ id: `anchor-${atomId}`, score: 1 }))
.filter(item => l0ObjectMap.has(item.id));
// ─────────────────────────────────────────────────────────────────
// 6e. W-RRF 融合 + Cap100
// 6c. W-RRF 融合L0-only
// ─────────────────────────────────────────────────────────────────
const T_Fusion_Start = performance.now();
const { top: fusionResult } = fuseChunkCandidates(denseRank, lexRank, anchorRank, CONFIG.FUSION_CAP);
const fusionChunkIds = new Set(fusionResult.map(f => f.chunkId));
const { top: fusionResult, totalUnique } = fuseL0Candidates(denseRank, lexRank, CONFIG.FUSION_CAP);
const fusionTime = Math.round(performance.now() - T_Fusion_Start);
// ─────────────────────────────────────────────────────────────────
// 6f. 构建最终候选 chunk 对象列表(用于 rerank
// ─────────────────────────────────────────────────────────────────
// 构建 chunkId → chunk 对象的映射
const chunkObjectMap = new Map();
for (const c of anchorVirtualChunks) {
chunkObjectMap.set(c.chunkId, c);
}
for (const c of denseCoarseChunks) {
if (!chunkObjectMap.has(c.chunkId)) {
chunkObjectMap.set(c.chunkId, c);
}
}
// Lexical 命中的 chunks 可能不在 denseCoarse 里,需要从 dbChunks 补充
const dbChunkMap = new Map(dbChunks.map(c => [c.chunkId, c]));
for (const cs of (lexicalResult?.chunkScores || [])) {
if (fusionChunkIds.has(cs.chunkId) && !chunkObjectMap.has(cs.chunkId)) {
const dbChunk = dbChunkMap.get(cs.chunkId);
if (dbChunk) {
chunkObjectMap.set(cs.chunkId, {
...dbChunk,
isAnchorVirtual: false,
similarity: 0,
});
}
}
}
// 按 fusionScore 排序的候选列表
// 构建 rerank 候选列表
const rerankCandidates = fusionResult
.map(f => {
const chunk = chunkObjectMap.get(f.chunkId);
if (!chunk) return null;
return {
...chunk,
_fusionScore: f.fusionScore,
};
})
.map(f => l0ObjectMap.get(f.id))
.filter(Boolean);
// ─────────────────────────────────────────────────────────────────
// 更新 metrics
// ─────────────────────────────────────────────────────────────────
if (metrics) {
metrics.evidence.floorsFromAnchors = floorArray.length;
metrics.evidence.chunkTotal = dbChunks.length;
metrics.evidence.denseCoarse = denseCoarseChunks.length;
metrics.fusion.denseCount = denseRank.length;
metrics.fusion.lexCount = lexRank.length;
metrics.fusion.anchorCount = anchorRank.length;
metrics.fusion.totalUnique = fusionResult.length + (denseRank.length + lexRank.length + anchorRank.length - fusionResult.length);
metrics.fusion.totalUnique = totalUnique;
metrics.fusion.afterCap = rerankCandidates.length;
metrics.fusion.time = fusionTime;
metrics.evidence.merged = rerankCandidates.length;
metrics.evidence.mergedByType = countEvidenceByType(rerankCandidates);
metrics.evidence.l0Candidates = rerankCandidates.length;
}
// ─────────────────────────────────────────────────────────────────
// 6g. Rerank
// ─────────────────────────────────────────────────────────────────
if (rerankCandidates.length === 0) {
if (metrics) {
metrics.evidence.l0Selected = 0;
metrics.evidence.l1Pulled = 0;
metrics.evidence.l1Attached = 0;
metrics.evidence.l1CosineTime = 0;
metrics.evidence.rerankApplied = false;
metrics.evidence.selected = 0;
metrics.evidence.selectedByType = { anchorVirtual: 0, chunkReal: 0 };
}
return [];
return { l0Selected: [], l1ByFloor: new Map() };
}
// ─────────────────────────────────────────────────────────────────
// 6d. 收集所有候选 L0 的楼层(用于并发拉取 L1
// 包含 AI 楼层本身 + 上方 USER 楼层
// ─────────────────────────────────────────────────────────────────
const candidateFloors = new Set();
for (const c of rerankCandidates) {
candidateFloors.add(c.floor);
// 上方 USER 楼层
const userFloor = c.floor - 1;
if (userFloor >= 0 && chat?.[userFloor]?.is_user) {
candidateFloors.add(userFloor);
}
}
// ─────────────────────────────────────────────────────────────────
// 6e. 并发rerank L0 ‖ 拉取 L1 chunks + 向量 + cosine 打分
// ─────────────────────────────────────────────────────────────────
const T_Rerank_Start = performance.now();
const reranked = await rerankChunks(rerankQuery, rerankCandidates, {
// 并发任务 1rerank L0
const rerankPromise = rerankChunks(rerankQuery, rerankCandidates, {
topN: CONFIG.RERANK_TOP_N,
minScore: CONFIG.RERANK_MIN_SCORE,
});
// 并发任务 2拉取 L1 chunks + 向量 → cosine 打分
const l1Promise = pullAndScoreL1(chatId, Array.from(candidateFloors), queryVector, chat);
// 等待两个任务完成
const [rerankedL0, l1ScoredByFloor] = await Promise.all([rerankPromise, l1Promise]);
const rerankTime = Math.round(performance.now() - T_Rerank_Start);
// ─────────────────────────────────────────────────────────────────
// 6f. 记录 rerank metrics
// ─────────────────────────────────────────────────────────────────
if (metrics) {
metrics.evidence.rerankApplied = true;
metrics.evidence.beforeRerank = rerankCandidates.length;
metrics.evidence.afterRerank = reranked.length;
metrics.evidence.selected = reranked.length;
metrics.evidence.selectedByType = countEvidenceByType(reranked);
metrics.evidence.afterRerank = rerankedL0.length;
metrics.evidence.l0Selected = rerankedL0.length;
metrics.evidence.rerankTime = rerankTime;
metrics.timing.evidenceRerank = rerankTime;
const scores = reranked.map(c => c._rerankScore || 0).filter(s => s > 0);
const scores = rerankedL0.map(c => c._rerankScore || 0).filter(s => s > 0);
if (scores.length > 0) {
scores.sort((a, b) => a - b);
metrics.evidence.rerankScores = {
@@ -722,14 +646,167 @@ async function pullAndFuseEvidence(anchorHits, anchorFloors, queryVector, rerank
}
}
// ─────────────────────────────────────────────────────────────────
// 6g. 构建最终 l0Selected + l1ByFloor
// ─────────────────────────────────────────────────────────────────
const l0Selected = rerankedL0.map(item => ({
id: item.id,
atomId: item.atomId,
floor: item.floor,
similarity: item.similarity,
rerankScore: item._rerankScore || 0,
atom: item.atom,
text: item.text,
}));
// 为每个选中的 L0 楼层组装 top-1 L1 配对
const selectedFloors = new Set(l0Selected.map(l => l.floor));
const l1ByFloor = new Map();
let contextPairsAdded = 0;
for (const floor of selectedFloors) {
const aiChunks = l1ScoredByFloor.get(floor) || [];
const userFloor = floor - 1;
const userChunks = (userFloor >= 0 && chat?.[userFloor]?.is_user)
? (l1ScoredByFloor.get(userFloor) || [])
: [];
// top-1取 cosine 最高的
const aiTop1 = aiChunks.length > 0
? aiChunks.reduce((best, c) => (c._cosineScore > best._cosineScore ? c : best))
: null;
const userTop1 = userChunks.length > 0
? userChunks.reduce((best, c) => (c._cosineScore > best._cosineScore ? c : best))
: null;
// context pair = 上方 USER 楼层成功挂载(用于 metrics
if (userTop1) contextPairsAdded++;
l1ByFloor.set(floor, { aiTop1, userTop1 });
}
// ─────────────────────────────────────────────────────────────────
// 6h. L1 metrics
// ─────────────────────────────────────────────────────────────────
if (metrics) {
let totalPulled = 0;
let totalAttached = 0;
for (const [, scored] of l1ScoredByFloor) {
totalPulled += scored.length;
}
for (const [, pair] of l1ByFloor) {
if (pair.aiTop1) totalAttached++;
if (pair.userTop1) totalAttached++;
}
metrics.evidence.l1Pulled = totalPulled;
metrics.evidence.l1Attached = totalAttached;
metrics.evidence.contextPairsAdded = contextPairsAdded;
metrics.evidence.l1CosineTime = l1ScoredByFloor._cosineTime || 0;
}
const totalTime = Math.round(performance.now() - T_Start);
metrics.timing.evidenceRetrieval = Math.max(0, totalTime - fusionTime - rerankTime);
if (metrics) {
metrics.timing.evidenceRetrieval = Math.max(0, totalTime - fusionTime - rerankTime);
}
xbLog.info(MODULE_ID,
`Evidence: ${dbChunks.length} L1 → dense=${denseCoarseChunks.length} lex=${lexRank.length} → fusion=${rerankCandidates.length} → rerank=${reranked.length} (${totalTime}ms)`
`Evidence: ${anchorHits?.length || 0} L0 dense → fusion=${rerankCandidates.length} → rerank=${rerankedL0.length} → L1 attached=${metrics?.evidence?.l1Attached || 0} (${totalTime}ms)`
);
return reranked;
return { l0Selected, l1ByFloor };
}
// ═══════════════════════════════════════════════════════════════════════════
// [L1] 拉取 + Cosine 打分(并发子任务)
// ═══════════════════════════════════════════════════════════════════════════
/**
* 从 IndexedDB 拉取指定楼层的 L1 chunks + 向量,用 queryVector cosine 打分
*
* @param {string} chatId
* @param {number[]} floors - 需要拉取的楼层列表
* @param {number[]} queryVector - 查询向量v1
* @param {object[]} chat - 聊天消息数组
* @returns {Promise<Map<number, object[]>>} floor → scored chunks带 _cosineScore
*/
async function pullAndScoreL1(chatId, floors, queryVector, chat) {
const T0 = performance.now();
/** @type {Map<number, object[]>} */
const result = new Map();
if (!chatId || !floors?.length || !queryVector?.length) {
result._cosineTime = 0;
return result;
}
// 拉取 chunks
let dbChunks = [];
try {
dbChunks = await getChunksByFloors(chatId, floors);
} catch (e) {
xbLog.warn(MODULE_ID, 'L1 chunks 拉取失败', e);
result._cosineTime = Math.round(performance.now() - T0);
return result;
}
if (!dbChunks.length) {
result._cosineTime = Math.round(performance.now() - T0);
return result;
}
// 拉取向量
const chunkIds = dbChunks.map(c => c.chunkId);
let chunkVectors = [];
try {
chunkVectors = await getChunkVectorsByIds(chatId, chunkIds);
} catch (e) {
xbLog.warn(MODULE_ID, 'L1 向量拉取失败', e);
result._cosineTime = Math.round(performance.now() - T0);
return result;
}
const vectorMap = new Map(chunkVectors.map(v => [v.chunkId, v.vector]));
// Cosine 打分 + 按楼层分组
for (const chunk of dbChunks) {
const vec = vectorMap.get(chunk.chunkId);
const cosineScore = vec?.length ? cosineSimilarity(queryVector, vec) : 0;
const scored = {
chunkId: chunk.chunkId,
floor: chunk.floor,
chunkIdx: chunk.chunkIdx,
speaker: chunk.speaker,
isUser: chunk.isUser,
text: chunk.text,
_cosineScore: cosineScore,
};
if (!result.has(chunk.floor)) {
result.set(chunk.floor, []);
}
result.get(chunk.floor).push(scored);
}
// 每楼层按 cosine 降序排序
for (const [, chunks] of result) {
chunks.sort((a, b) => b._cosineScore - a._cosineScore);
}
result._cosineTime = Math.round(performance.now() - T0);
xbLog.info(MODULE_ID,
`L1 pull: ${floors.length} floors → ${dbChunks.length} chunks → scored (${result._cosineTime}ms)`
);
return result;
}
// ═══════════════════════════════════════════════════════════════════════════
@@ -758,7 +835,8 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [],
evidenceChunks: [],
l0Selected: [],
l1ByFloor: new Map(),
causalChain: [],
focusEntities: [],
elapsed: metrics.timing.total,
@@ -782,10 +860,8 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.query.buildTime = Math.round(performance.now() - T_Build_Start);
metrics.anchor.focusEntities = bundle.focusEntities;
// Query lengths (v0 available here)
if (metrics.query?.lengths) {
metrics.query.lengths.v0Chars = String(bundle.queryText_v0 || '').length;
// v1 not built yet
metrics.query.lengths.v1Chars = null;
metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v0 || '').length;
}
@@ -806,7 +882,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
xbLog.error(MODULE_ID, 'Round 1 向量化失败', e);
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [], evidenceChunks: [], causalChain: [],
events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [],
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
logText: 'Embedding failed (round 1).',
@@ -817,7 +893,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
if (!queryVector_v0?.length) {
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [], evidenceChunks: [], causalChain: [],
events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [],
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
logText: 'Empty query vector (round 1).',
@@ -846,10 +922,8 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
refineQueryBundle(bundle, anchorHits_v0, eventHits_v0);
metrics.query.refineTime = Math.round(performance.now() - T_Refine_Start);
// 更新 focusEntitiesrefinement 可能扩展了)
metrics.anchor.focusEntities = bundle.focusEntities;
// Query lengths (v1/rerank updated here)
if (metrics.query?.lengths) {
metrics.query.lengths.v1Chars = bundle.queryText_v1 == null ? null : String(bundle.queryText_v1).length;
metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v1 || bundle.queryText_v0 || '').length;
@@ -887,12 +961,16 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
);
// ═══════════════════════════════════════════════════════════════════
// 阶段 5: Lexical Retrieval + Merge
// 阶段 5: Lexical Retrieval + L0 Merge
// ═══════════════════════════════════════════════════════════════════
const T_Lex_Start = performance.now();
let lexicalResult = { atomIds: [], atomFloors: new Set(), chunkIds: [], chunkFloors: new Set(), eventIds: [], chunkScores: [], searchTime: 0 };
let lexicalResult = {
atomIds: [], atomFloors: new Set(),
chunkIds: [], chunkFloors: new Set(),
eventIds: [], chunkScores: [], searchTime: 0,
};
try {
const index = await getLexicalIndex();
@@ -913,15 +991,11 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.lexical.terms = bundle.lexicalTerms.slice(0, 10);
}
// 合并 L0 floors
// 合并 L0 floorsdense + lexical
const anchorFloors = new Set(anchorFloors_dense);
for (const f of lexicalResult.atomFloors) {
anchorFloors.add(f);
}
// Lexical chunk floors 也加入(确保这些楼层的 chunks 被拉取)
for (const f of lexicalResult.chunkFloors) {
anchorFloors.add(f);
}
// 合并 L2 eventslexical 命中但 dense 未命中的 events
const existingEventIds = new Set(eventHits.map(e => e.event?.id).filter(Boolean));
@@ -953,10 +1027,10 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
);
// ═══════════════════════════════════════════════════════════════════
// 阶段 6: Evidence Pull + W-RRF Fusion + Cap100 + Rerank
// 阶段 6: L0-only W-RRF Fusion + Rerank ‖ 并发 L1 Cosine
// ═══════════════════════════════════════════════════════════════════
const evidenceChunks = await pullAndFuseEvidence(
const { l0Selected, l1ByFloor } = await locateAndPullEvidence(
anchorHits,
anchorFloors,
queryVector_v1,
@@ -996,24 +1070,23 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.event.entityNames = bundle.focusEntities;
metrics.event.entitiesUsed = bundle.focusEntities.length;
console.group('%c[Recall v6]', 'color: #7c3aed; font-weight: bold');
console.group('%c[Recall v7]', 'color: #7c3aed; font-weight: bold');
console.log(`Total: ${metrics.timing.total}ms`);
console.log(`Query Build: ${metrics.query.buildTime}ms | Refine: ${metrics.query.refineTime}ms`);
console.log(`Focus: [${bundle.focusEntities.join(', ')}]`);
console.log(`Round 2 Anchors: ${anchorHits.length} hits → ${anchorFloors.size} floors`);
console.log(`Lexical: atoms=${lexicalResult.atomIds.length} chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length}`);
console.log(`Fusion: dense=${metrics.fusion.denseCount} lex=${metrics.fusion.lexCount} anchor=${metrics.fusion.anchorCount} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`);
console.log(`Evidence: ${metrics.evidence.merged} → rerank → ${evidenceChunks.length} (rerank ${metrics.evidence.rerankTime || 0}ms)`);
if (metrics.evidence.selectedByType) {
console.log(`Evidence types: anchor_virtual=${metrics.evidence.selectedByType.anchorVirtual} chunk_real=${metrics.evidence.selectedByType.chunkReal}`);
}
console.log(`Fusion (L0-only): dense=${metrics.fusion.denseCount} lex=${metrics.fusion.lexCount} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`);
console.log(`L0 Rerank: ${metrics.evidence.beforeRerank || 0}${metrics.evidence.l0Selected || 0} (${metrics.evidence.rerankTime || 0}ms)`);
console.log(`L1 Pull: ${metrics.evidence.l1Pulled || 0} chunks → ${metrics.evidence.l1Attached || 0} attached (${metrics.evidence.l1CosineTime || 0}ms)`);
console.log(`Events: ${eventHits.length} hits, ${causalChain.length} causal`);
console.groupEnd();
return {
events: eventHits,
causalChain,
evidenceChunks,
l0Selected,
l1ByFloor,
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
metrics,