feat(recall): clamp focus weight and adjust pending context window

This commit is contained in:
2026-02-11 17:21:04 +08:00
parent 297cc03770
commit 816196a710
3 changed files with 544 additions and 327 deletions

View File

@@ -1,15 +1,22 @@
// ═══════════════════════════════════════════════════════════════════════════
// Story Summary - Recall Engine (v7 - Two-Stage: L0 Locate → L1 Evidence)
// Story Summary - Recall Engine (v8 - Weighted Query Vectors + Floor Aggregation)
//
// 命名规范:
// - 存储层用 L0/L1/L2/L3StateAtom/Chunk/Event/Fact
// - 召回层用语义名称anchor/evidence/event/constraint
//
// v7 → v8 变更:
// - Query 取 3 条消息(对齐 L0 对结构),加权向量合成替代文本拼接
// - R1 权重 [0.15, 0.30, 0.55](焦点 > 近上下文 > 远上下文)
// - R2 复用 R1 向量 + embed hints 1 条,权重 [0.10, 0.20, 0.45, 0.25]
// - Dense floor 聚合max → maxSim×0.6 + meanSim×0.4
// - Lexical floor 聚合max → maxScore × (1 + 0.3×log₂(hitCount))
//
// 架构:
// 阶段 1: Query Build确定性无 LLM
// 阶段 2: Round 1 Dense RetrievalL0 + L2
// 阶段 3: Query Refinement用已命中记忆增强
// 阶段 4: Round 2 Dense RetrievalL0 + L2
// 阶段 2: Round 1 Dense Retrievalbatch embed 3 段 → 加权平均
// 阶段 3: Query Refinement用已命中记忆产出 hints 段
// 阶段 4: Round 2 Dense Retrieval复用 R1 vec + embed hints → 加权平均
// 阶段 5: Lexical Retrieval
// 阶段 6: Floor W-RRF Fusion + Rerank + L1 配对
// 阶段 7: L1 配对组装L0 → top-1 AI L1 + top-1 USER L1
@@ -21,7 +28,14 @@ import { getAllStateVectors, getStateAtoms } from '../storage/state-store.js';
import { getEngineFingerprint, embed } from '../utils/embedder.js';
import { xbLog } from '../../../../core/debug-core.js';
import { getContext } from '../../../../../../../extensions.js';
import { buildQueryBundle, refineQueryBundle } from './query-builder.js';
import {
buildQueryBundle,
refineQueryBundle,
computeLengthFactor,
FOCUS_BASE_WEIGHT_R2,
CONTEXT_BASE_WEIGHTS_R2,
FOCUS_MIN_NORMALIZED_WEIGHT,
} from './query-builder.js';
import { getLexicalIndex, searchLexicalIndex } from './lexical-index.js';
import { rerankChunks } from '../llm/reranker.js';
import { createMetrics, calcSimilarityStats } from './metrics.js';
@@ -33,8 +47,9 @@ const MODULE_ID = 'recall';
// ═══════════════════════════════════════════════════════════════════════════
const CONFIG = {
// 窗口
LAST_MESSAGES_K: 2,
// 窗口:取 3 条消息(对齐 L0 USER+AI 对结构)
LAST_MESSAGES_K: 3,
LAST_MESSAGES_K_WITH_PENDING: 2, // pending 存在时只取 2 条上下文,避免形成 4 段
// Anchor (L0 StateAtoms)
ANCHOR_MIN_SIMILARITY: 0.58,
@@ -51,6 +66,13 @@ const CONFIG = {
RRF_W_LEX: 0.9,
FUSION_CAP: 60,
// Dense floor 聚合权重
DENSE_AGG_W_MAX: 0.6,
DENSE_AGG_W_MEAN: 0.4,
// Lexical floor 聚合密度加成
LEX_DENSITY_BONUS: 0.3,
// Rerankfloor-level
RERANK_TOP_N: 20,
RERANK_MIN_SCORE: 0.15,
@@ -66,9 +88,6 @@ const CONFIG = {
/**
* 计算余弦相似度
* @param {number[]} a
* @param {number[]} b
* @returns {number}
*/
function cosineSimilarity(a, b) {
if (!a?.length || !b?.length || a.length !== b.length) return 0;
@@ -83,8 +102,6 @@ function cosineSimilarity(a, b) {
/**
* 标准化字符串
* @param {string} s
* @returns {string}
*/
function normalize(s) {
return String(s || '')
@@ -96,12 +113,8 @@ function normalize(s) {
/**
* 获取最近消息
* @param {object[]} chat
* @param {number} count
* @param {boolean} excludeLastAi
* @returns {object[]}
*/
function getLastMessages(chat, count = 2, excludeLastAi = false) {
function getLastMessages(chat, count = 3, excludeLastAi = false) {
if (!chat?.length) return [];
let messages = [...chat];
if (excludeLastAi && messages.length > 0 && !messages[messages.length - 1]?.is_user) {
@@ -111,18 +124,128 @@ function getLastMessages(chat, count = 2, excludeLastAi = false) {
}
// ═══════════════════════════════════════════════════════════════════════════
// MMR 选择算法
// 加权向量工具
// ═══════════════════════════════════════════════════════════════════════════
/**
* Maximal Marginal Relevance 选择
* @param {object[]} candidates
* @param {number} k
* @param {number} lambda
* @param {Function} getVector
* @param {Function} getScore
* @returns {object[]}
* 多向量加权平均
*
* @param {number[][]} vectors - 向量数组
* @param {number[]} weights - 归一化后的权重sum = 1
* @returns {number[]|null}
*/
function weightedAverageVectors(vectors, weights) {
if (!vectors?.length || !weights?.length || vectors.length !== weights.length) return null;
const dims = vectors[0].length;
const result = new Array(dims).fill(0);
for (let i = 0; i < vectors.length; i++) {
const w = weights[i];
const v = vectors[i];
if (!v?.length) continue;
for (let d = 0; d < dims; d++) {
result[d] += w * v[d];
}
}
return result;
}
/**
* 对归一化权重做“目标位最小占比”硬保底
*
* @param {number[]} weights - 已归一化权重sum≈1
* @param {number} targetIdx - 目标位置focus 段索引)
* @param {number} minWeight - 最小占比0~1
* @returns {number[]} 调整后的归一化权重
*/
function clampMinNormalizedWeight(weights, targetIdx, minWeight) {
if (!weights?.length) return [];
if (targetIdx < 0 || targetIdx >= weights.length) return weights;
const current = weights[targetIdx];
if (current >= minWeight) return weights;
const otherSum = 1 - current;
if (otherSum <= 0) {
const out = new Array(weights.length).fill(0);
out[targetIdx] = 1;
return out;
}
const remain = 1 - minWeight;
const scale = remain / otherSum;
const out = weights.map((w, i) => (i === targetIdx ? minWeight : w * scale));
// 数值稳定性:消除浮点误差
const drift = 1 - out.reduce((a, b) => a + b, 0);
out[targetIdx] += drift;
return out;
}
/**
* 计算 R1 段权重baseWeight × lengthFactor归一化
*
* @param {object[]} segments - QuerySegment[]
* @returns {number[]} 归一化后的权重
*/
function computeSegmentWeights(segments) {
if (!segments?.length) return [];
const adjusted = segments.map(s => s.baseWeight * computeLengthFactor(s.charCount));
const sum = adjusted.reduce((a, b) => a + b, 0);
const normalized = sum <= 0
? segments.map(() => 1 / segments.length)
: adjusted.map(w => w / sum);
// focus 段始终在末尾
const focusIdx = segments.length - 1;
return clampMinNormalizedWeight(normalized, focusIdx, FOCUS_MIN_NORMALIZED_WEIGHT);
}
/**
* 计算 R2 权重R1 段用 R2 基础权重 + hints 段,归一化)
*
* @param {object[]} segments - QuerySegment[](与 R1 相同的段)
* @param {object|null} hintsSegment - { text, baseWeight, charCount }
* @returns {number[]} 归一化后的权重(长度 = segments.length + (hints ? 1 : 0)
*/
function computeR2Weights(segments, hintsSegment) {
if (!segments?.length) return [];
// 为 R1 段分配 R2 基础权重(尾部对齐)
const contextCount = segments.length - 1;
const r2Base = [];
for (let i = 0; i < contextCount; i++) {
const weightIdx = Math.max(0, CONTEXT_BASE_WEIGHTS_R2.length - contextCount + i);
r2Base.push(CONTEXT_BASE_WEIGHTS_R2[weightIdx] || CONTEXT_BASE_WEIGHTS_R2[0]);
}
r2Base.push(FOCUS_BASE_WEIGHT_R2);
// 应用 lengthFactor
const adjusted = r2Base.map((w, i) => w * computeLengthFactor(segments[i].charCount));
// 追加 hints
if (hintsSegment) {
adjusted.push(hintsSegment.baseWeight * computeLengthFactor(hintsSegment.charCount));
}
// 归一化
const sum = adjusted.reduce((a, b) => a + b, 0);
const normalized = sum <= 0
? adjusted.map(() => 1 / adjusted.length)
: adjusted.map(w => w / sum);
// R2 中 focus 位置固定为“segments 最后一个”
const focusIdx = segments.length - 1;
return clampMinNormalizedWeight(normalized, focusIdx, FOCUS_MIN_NORMALIZED_WEIGHT);
}
// ═══════════════════════════════════════════════════════════════════════════
// MMR 选择算法
// ═══════════════════════════════════════════════════════════════════════════
function mmrSelect(candidates, k, lambda, getVector, getScore) {
const selected = [];
const ids = new Set();
@@ -166,13 +289,6 @@ function mmrSelect(candidates, k, lambda, getVector, getScore) {
// [Anchors] L0 StateAtoms 检索
// ═══════════════════════════════════════════════════════════════════════════
/**
* 检索语义锚点
* @param {number[]} queryVector
* @param {object} vectorConfig
* @param {object|null} metrics
* @returns {Promise<{hits: object[], floors: Set<number>}>}
*/
async function recallAnchors(queryVector, vectorConfig, metrics) {
const { chatId } = getContext();
if (!chatId || !queryVector?.length) {
@@ -228,15 +344,6 @@ async function recallAnchors(queryVector, vectorConfig, metrics) {
// [Events] L2 Events 检索
// ═══════════════════════════════════════════════════════════════════════════
/**
* 检索事件
* @param {number[]} queryVector
* @param {object[]} allEvents
* @param {object} vectorConfig
* @param {string[]} focusEntities
* @param {object|null} metrics
* @returns {Promise<object[]>}
*/
async function recallEvents(queryVector, allEvents, vectorConfig, focusEntities, metrics) {
const { chatId } = getContext();
if (!chatId || !queryVector?.length || !allEvents?.length) {
@@ -344,11 +451,6 @@ async function recallEvents(queryVector, allEvents, vectorConfig, focusEntities,
// [Causation] 因果链追溯
// ═══════════════════════════════════════════════════════════════════════════
/**
* 构建事件索引
* @param {object[]} allEvents
* @returns {Map<string, object>}
*/
function buildEventIndex(allEvents) {
const map = new Map();
for (const e of allEvents || []) {
@@ -357,13 +459,6 @@ function buildEventIndex(allEvents) {
return map;
}
/**
* 追溯因果链
* @param {object[]} eventHits
* @param {Map<string, object>} eventIndex
* @param {number} maxDepth
* @returns {{results: object[], maxDepth: number}}
*/
function traceCausation(eventHits, eventIndex, maxDepth = CONFIG.CAUSAL_CHAIN_MAX_DEPTH) {
const out = new Map();
const idRe = /^evt-\d+$/;
@@ -411,23 +506,9 @@ function traceCausation(eventHits, eventIndex, maxDepth = CONFIG.CAUSAL_CHAIN_MA
}
// ═══════════════════════════════════════════════════════════════════════════
// [W-RRF] 加权倒数排名融合(L0-only
// [W-RRF] 加权倒数排名融合(floor 粒度
// ═══════════════════════════════════════════════════════════════════════════
/**
* @typedef {object} RankedItem
* @property {string} id - 唯一标识符
* @property {number} score - 该路的原始分数
*/
/**
* W-RRF 加权倒数排名融合floor 粒度)
*
* @param {{id: number, score: number}[]} denseRank - Dense 路floor → max cosine降序
* @param {{id: number, score: number}[]} lexRank - Lexical 路floor → max bm25降序
* @param {number} cap - 输出上限
* @returns {{top: {id: number, fusionScore: number}[], totalUnique: number}}
*/
function fuseByFloor(denseRank, lexRank, cap = CONFIG.FUSION_CAP) {
const k = CONFIG.RRF_K;
const wD = CONFIG.RRF_W_DENSE;
@@ -464,16 +545,6 @@ function fuseByFloor(denseRank, lexRank, cap = CONFIG.FUSION_CAP) {
// [Stage 6] Floor 融合 + Rerank + L1 配对
// ═══════════════════════════════════════════════════════════════════════════
/**
* Floor 粒度融合 + Rerank + L1 配对
*
* @param {object[]} anchorHits - L0 dense 命中Round 2
* @param {number[]} queryVector - 查询向量v1
* @param {string} rerankQuery - rerank 查询文本(纯自然语言)
* @param {object} lexicalResult - 词法检索结果
* @param {object} metrics
* @returns {Promise<{l0Selected: object[], l1ByFloor: Map<number, {aiTop1: object|null, userTop1: object|null}>}>}
*/
async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexicalResult, metrics) {
const { chatId, chat, name1, name2 } = getContext();
if (!chatId) return { l0Selected: [], l1ByFloor: new Map() };
@@ -481,26 +552,36 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
const T_Start = performance.now();
// ─────────────────────────────────────────────────────────────────
// 6a. Dense floor rank每个 floor 取 max cosine
// 6a. Dense floor rank加权聚合maxSim×0.6 + meanSim×0.4
// ─────────────────────────────────────────────────────────────────
const denseFloorMap = new Map();
const denseFloorAgg = new Map();
for (const a of (anchorHits || [])) {
const cur = denseFloorMap.get(a.floor) || 0;
if (a.similarity > cur) denseFloorMap.set(a.floor, a.similarity);
const cur = denseFloorAgg.get(a.floor);
if (!cur) {
denseFloorAgg.set(a.floor, { maxSim: a.similarity, hitCount: 1, sumSim: a.similarity });
} else {
cur.maxSim = Math.max(cur.maxSim, a.similarity);
cur.hitCount++;
cur.sumSim += a.similarity;
}
}
const denseFloorRank = [...denseFloorMap.entries()]
.sort((a, b) => b[1] - a[1])
.map(([floor, score]) => ({ id: floor, score }));
const denseFloorRank = [...denseFloorAgg.entries()]
.map(([floor, info]) => ({
id: floor,
score: info.maxSim * CONFIG.DENSE_AGG_W_MAX
+ (info.sumSim / info.hitCount) * CONFIG.DENSE_AGG_W_MEAN,
}))
.sort((a, b) => b.score - a.score);
// ─────────────────────────────────────────────────────────────────
// 6b. Lexical floor rankchunkScores → floor 聚合 + USER→AI 映射 + 预过滤
// 6b. Lexical floor rank密度加成maxScore × (1 + 0.3×log₂(hitCount))
// ─────────────────────────────────────────────────────────────────
const atomFloorSet = new Set(getStateAtoms().map(a => a.floor));
const lexFloorScores = new Map();
const lexFloorAgg = new Map();
for (const { chunkId, score } of (lexicalResult?.chunkScores || [])) {
const match = chunkId?.match(/^c-(\d+)-/);
if (!match) continue;
@@ -519,13 +600,21 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
// 预过滤:必须有 L0 atoms
if (!atomFloorSet.has(floor)) continue;
const cur = lexFloorScores.get(floor) || 0;
if (score > cur) lexFloorScores.set(floor, score);
const cur = lexFloorAgg.get(floor);
if (!cur) {
lexFloorAgg.set(floor, { maxScore: score, hitCount: 1 });
} else {
cur.maxScore = Math.max(cur.maxScore, score);
cur.hitCount++;
}
}
const lexFloorRank = [...lexFloorScores.entries()]
.sort((a, b) => b[1] - a[1])
.map(([floor, score]) => ({ id: floor, score }));
const lexFloorRank = [...lexFloorAgg.entries()]
.map(([floor, info]) => ({
id: floor,
score: info.maxScore * (1 + CONFIG.LEX_DENSITY_BONUS * Math.log2(Math.max(1, info.hitCount))),
}))
.sort((a, b) => b.score - a.score);
// ─────────────────────────────────────────────────────────────────
// 6c. Floor W-RRF 融合
@@ -541,6 +630,8 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
metrics.fusion.totalUnique = totalUnique;
metrics.fusion.afterCap = fusedFloors.length;
metrics.fusion.time = fusionTime;
metrics.fusion.denseAggMethod = `max×${CONFIG.DENSE_AGG_W_MAX}+mean×${CONFIG.DENSE_AGG_W_MEAN}`;
metrics.fusion.lexDensityBonus = CONFIG.LEX_DENSITY_BONUS;
metrics.evidence.floorCandidates = fusedFloors.length;
}
@@ -617,7 +708,7 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
}
// ─────────────────────────────────────────────────────────────────
// 6f. 并发 Rerank
// 6f. Rerank
// ─────────────────────────────────────────────────────────────────
const T_Rerank_Start = performance.now();
@@ -647,7 +738,6 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
};
}
// document 平均长度
if (rerankCandidates.length > 0) {
const totalLen = rerankCandidates.reduce((s, c) => s + (c.text?.length || 0), 0);
metrics.evidence.rerankDocAvgLength = Math.round(totalLen / rerankCandidates.length);
@@ -666,6 +756,13 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
atomsByFloor.get(atom.floor).push(atom);
}
// 重建 denseFloorMap 以获取每层 max cosine用于 L0 similarity 标注)
const denseFloorMaxMap = new Map();
for (const a of (anchorHits || [])) {
const cur = denseFloorMaxMap.get(a.floor) || 0;
if (a.similarity > cur) denseFloorMaxMap.set(a.floor, a.similarity);
}
const l0Selected = [];
const l1ByFloor = new Map();
let contextPairsAdded = 0;
@@ -673,9 +770,9 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
for (const item of reranked) {
const floor = item.floor;
const rerankScore = item._rerankScore || 0;
const denseSim = denseFloorMap.get(floor) || 0;
const denseSim = denseFloorMaxMap.get(floor) || 0;
// 收集该 floor 所有 L0 atoms,共享 floor 的 rerankScore
// 收集该 floor 所有 L0 atoms
const floorAtoms = atomsByFloor.get(floor) || [];
for (const atom of floorAtoms) {
l0Selected.push({
@@ -735,22 +832,14 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
return { l0Selected, l1ByFloor };
}
// [L1] 拉取 + Cosine 打分(并发子任务)
// ═══════════════════════════════════════════════════════════════════════════
// [L1] 拉取 + Cosine 打分
// ═══════════════════════════════════════════════════════════════════════════
/**
* 从 IndexedDB 拉取指定楼层的 L1 chunks + 向量,用 queryVector cosine 打分
*
* @param {string} chatId
* @param {number[]} floors - 需要拉取的楼层列表
* @param {number[]} queryVector - 查询向量v1
* @param {object[]} chat - 聊天消息数组
* @returns {Promise<Map<number, object[]>>} floor → scored chunks带 _cosineScore
*/
async function pullAndScoreL1(chatId, floors, queryVector, chat) {
const T0 = performance.now();
/** @type {Map<number, object[]>} */
const result = new Map();
if (!chatId || !floors?.length || !queryVector?.length) {
@@ -758,7 +847,6 @@ async function pullAndScoreL1(chatId, floors, queryVector, chat) {
return result;
}
// 拉取 chunks
let dbChunks = [];
try {
dbChunks = await getChunksByFloors(chatId, floors);
@@ -773,7 +861,6 @@ async function pullAndScoreL1(chatId, floors, queryVector, chat) {
return result;
}
// 拉取向量
const chunkIds = dbChunks.map(c => c.chunkId);
let chunkVectors = [];
try {
@@ -786,7 +873,6 @@ async function pullAndScoreL1(chatId, floors, queryVector, chat) {
const vectorMap = new Map(chunkVectors.map(v => [v.chunkId, v.vector]));
// Cosine 打分 + 按楼层分组
for (const chunk of dbChunks) {
const vec = vectorMap.get(chunk.chunkId);
const cosineScore = vec?.length ? cosineSimilarity(queryVector, vec) : 0;
@@ -807,7 +893,6 @@ async function pullAndScoreL1(chatId, floors, queryVector, chat) {
result.get(chunk.floor).push(scored);
}
// 每楼层按 cosine 降序排序
for (const [, chunks] of result) {
chunks.sort((a, b) => b._cosineScore - a._cosineScore);
}
@@ -825,16 +910,6 @@ async function pullAndScoreL1(chatId, floors, queryVector, chat) {
// 主函数
// ═══════════════════════════════════════════════════════════════════════════
/**
* 执行记忆召回
*
* @param {object[]} allEvents - 所有事件L2
* @param {object} vectorConfig - 向量配置
* @param {object} options
* @param {boolean} options.excludeLastAi
* @param {string|null} options.pendingUserMessage
* @returns {Promise<object>}
*/
export async function recallMemory(allEvents, vectorConfig, options = {}) {
const T0 = performance.now();
const { chat } = getContext();
@@ -865,7 +940,10 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
const T_Build_Start = performance.now();
const lastMessages = getLastMessages(chat, CONFIG.LAST_MESSAGES_K, excludeLastAi);
const lastMessagesCount = pendingUserMessage
? CONFIG.LAST_MESSAGES_K_WITH_PENDING
: CONFIG.LAST_MESSAGES_K;
const lastMessages = getLastMessages(chat, lastMessagesCount, excludeLastAi);
const bundle = buildQueryBundle(lastMessages, pendingUserMessage);
@@ -873,29 +951,39 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.anchor.focusEntities = bundle.focusEntities;
if (metrics.query?.lengths) {
metrics.query.lengths.v0Chars = String(bundle.queryText_v0 || '').length;
metrics.query.lengths.v0Chars = bundle.querySegments.reduce((sum, s) => sum + s.text.length, 0);
metrics.query.lengths.v1Chars = null;
metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v0 || '').length;
metrics.query.lengths.rerankChars = String(bundle.rerankQuery || '').length;
}
xbLog.info(MODULE_ID,
`Query Build: focus=[${bundle.focusEntities.join(',')}] lexTerms=[${bundle.lexicalTerms.slice(0, 5).join(',')}]`
`Query Build: focus=[${bundle.focusEntities.join(',')}] segments=${bundle.querySegments.length} lexTerms=[${bundle.lexicalTerms.slice(0, 5).join(',')}]`
);
// ═══════════════════════════════════════════════════════════════════
// 阶段 2: Round 1 Dense Retrieval
// 阶段 2: Round 1 Dense Retrievalbatch embed → 加权平均)
// ═══════════════════════════════════════════════════════════════════
let queryVector_v0;
const segmentTexts = bundle.querySegments.map(s => s.text);
if (!segmentTexts.length) {
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [],
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
logText: 'No query segments.',
metrics,
};
}
let r1Vectors;
try {
const [vec] = await embed([bundle.queryText_v0], vectorConfig, { timeout: 10000 });
queryVector_v0 = vec;
r1Vectors = await embed(segmentTexts, vectorConfig, { timeout: 10000 });
} catch (e1) {
xbLog.warn(MODULE_ID, 'Round 1 向量化失败500ms 后重试', e1);
await new Promise(r => setTimeout(r, 500));
try {
const [vec] = await embed([bundle.queryText_v0], vectorConfig, { timeout: 15000 });
queryVector_v0 = vec;
r1Vectors = await embed(segmentTexts, vectorConfig, { timeout: 15000 });
} catch (e2) {
xbLog.error(MODULE_ID, 'Round 1 向量化重试仍失败', e2);
metrics.timing.total = Math.round(performance.now() - T0);
@@ -909,13 +997,31 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
}
}
if (!r1Vectors?.length || r1Vectors.some(v => !v?.length)) {
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [],
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
logText: 'Empty query vectors (round 1).',
metrics,
};
}
const r1Weights = computeSegmentWeights(bundle.querySegments);
const queryVector_v0 = weightedAverageVectors(r1Vectors, r1Weights);
if (metrics) {
metrics.query.segmentWeights = r1Weights.map(w => Number(w.toFixed(3)));
}
if (!queryVector_v0?.length) {
metrics.timing.total = Math.round(performance.now() - T0);
return {
events: [], l0Selected: [], l1ByFloor: new Map(), causalChain: [],
focusEntities: bundle.focusEntities,
elapsed: metrics.timing.total,
logText: 'Empty query vector (round 1).',
logText: 'Weighted average produced empty vector.',
metrics,
};
}
@@ -929,7 +1035,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
const r1EventTime = Math.round(performance.now() - T_R1_Event_Start);
xbLog.info(MODULE_ID,
`Round 1: anchors=${anchorHits_v0.length} events=${eventHits_v0.length} (anchor=${r1AnchorTime}ms event=${r1EventTime}ms)`
`Round 1: anchors=${anchorHits_v0.length} events=${eventHits_v0.length} weights=[${r1Weights.map(w => w.toFixed(2)).join(',')}] (anchor=${r1AnchorTime}ms event=${r1EventTime}ms)`
);
// ═══════════════════════════════════════════════════════════════════
@@ -943,27 +1049,44 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.query.refineTime = Math.round(performance.now() - T_Refine_Start);
metrics.anchor.focusEntities = bundle.focusEntities;
if (metrics.query?.lengths) {
metrics.query.lengths.v1Chars = bundle.queryText_v1 == null ? null : String(bundle.queryText_v1).length;
metrics.query.lengths.rerankChars = String(bundle.rerankQuery || bundle.queryText_v1 || bundle.queryText_v0 || '').length;
// 更新 v1 长度指标
if (metrics.query?.lengths && bundle.hintsSegment) {
metrics.query.lengths.v1Chars = metrics.query.lengths.v0Chars + bundle.hintsSegment.text.length;
}
xbLog.info(MODULE_ID,
`Refinement: focus=[${bundle.focusEntities.join(',')}] hasV1=${!!bundle.queryText_v1} (${metrics.query.refineTime}ms)`
`Refinement: focus=[${bundle.focusEntities.join(',')}] hasHints=${!!bundle.hintsSegment} (${metrics.query.refineTime}ms)`
);
// ═══════════════════════════════════════════════════════════════════
// 阶段 4: Round 2 Dense Retrieval
// 阶段 4: Round 2 Dense Retrieval(复用 R1 向量 + embed hints
// ═══════════════════════════════════════════════════════════════════
const queryTextFinal = bundle.queryText_v1 || bundle.queryText_v0;
let queryVector_v1;
try {
const [vec] = await embed([queryTextFinal], vectorConfig, { timeout: 10000 });
queryVector_v1 = vec;
} catch (e) {
xbLog.warn(MODULE_ID, 'Round 2 向量化失败,降级使用 Round 1 向量', e);
if (bundle.hintsSegment) {
try {
const [hintsVec] = await embed([bundle.hintsSegment.text], vectorConfig, { timeout: 10000 });
if (hintsVec?.length) {
const r2Weights = computeR2Weights(bundle.querySegments, bundle.hintsSegment);
queryVector_v1 = weightedAverageVectors([...r1Vectors, hintsVec], r2Weights);
if (metrics) {
metrics.query.r2Weights = r2Weights.map(w => Number(w.toFixed(3)));
}
xbLog.info(MODULE_ID,
`Round 2 weights: [${r2Weights.map(w => w.toFixed(2)).join(',')}]`
);
} else {
queryVector_v1 = queryVector_v0;
}
} catch (e) {
xbLog.warn(MODULE_ID, 'Round 2 hints 向量化失败,降级使用 Round 1 向量', e);
queryVector_v1 = queryVector_v0;
}
} else {
queryVector_v1 = queryVector_v0;
}
@@ -1082,13 +1205,14 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
metrics.event.entityNames = bundle.focusEntities;
metrics.event.entitiesUsed = bundle.focusEntities.length;
console.group('%c[Recall v7]', 'color: #7c3aed; font-weight: bold');
console.group('%c[Recall v8]', 'color: #7c3aed; font-weight: bold');
console.log(`Total: ${metrics.timing.total}ms`);
console.log(`Query Build: ${metrics.query.buildTime}ms | Refine: ${metrics.query.refineTime}ms`);
console.log(`R1 weights: [${r1Weights.map(w => w.toFixed(2)).join(', ')}]`);
console.log(`Focus: [${bundle.focusEntities.join(', ')}]`);
console.log(`Round 2 Anchors: ${anchorHits.length} hits → ${anchorFloors_dense.size} floors`);
console.log(`Lexical: chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length}`);
console.log(`Fusion (floor): dense=${metrics.fusion.denseFloors} lex=${metrics.fusion.lexFloors} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`);
console.log(`Fusion (floor, weighted): dense=${metrics.fusion.denseFloors} lex=${metrics.fusion.lexFloors} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`);
console.log(`Floor Rerank: ${metrics.evidence.beforeRerank || 0}${metrics.evidence.floorsSelected || 0} floors → L0=${metrics.evidence.l0Collected || 0} (${metrics.evidence.rerankTime || 0}ms)`);
console.log(`L1: ${metrics.evidence.l1Pulled || 0} pulled → ${metrics.evidence.l1Attached || 0} attached (${metrics.evidence.l1CosineTime || 0}ms)`);
console.log(`Events: ${eventHits.length} hits, ${causalChain.length} causal`);