feat(summary): update prompt display, metrics lexical gate, and edge sanitization
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Story Summary - Prompt Injection (v6 - EvidenceGroup: per-floor L0 + shared L1)
|
||||
// Story Summary - Prompt Injection (v7 - L0 scene-based display)
|
||||
//
|
||||
// 命名规范:
|
||||
// - 存储层用 L0/L1/L2/L3(StateAtom/Chunk/Event/Fact)
|
||||
@@ -7,11 +7,7 @@
|
||||
//
|
||||
// 架构变更(v5 → v6):
|
||||
// - 同楼层多个 L0 共享一对 L1(EvidenceGroup per-floor)
|
||||
// - L0 展示文本从 semantic 字段改为从结构字段(type/subject/object/value/location)拼接
|
||||
// - 移除 <type> 标签和 [tags] theme 标签,输出自然语言短句
|
||||
// - 短行分号拼接,长行换行(120字阈值)
|
||||
//
|
||||
// 职责:
|
||||
// - L0 展示文本直接使用 semantic 字段(v7: 场景摘要,纯自然语言)
|
||||
// - 仅负责"构建注入文本",不负责写入 extension_prompts
|
||||
// - 注入发生在 story-summary.js:GENERATION_STARTED 时写入 extension_prompts
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
@@ -297,84 +293,16 @@ function formatArcLine(arc) {
|
||||
}
|
||||
|
||||
/**
|
||||
* 从 atom 结构字段生成可读短句(不依赖 semantic 字段)
|
||||
* 从 L0 获取展示文本
|
||||
*
|
||||
* 规则:
|
||||
* - act: 主体+谓词+客体
|
||||
* - emo: 主体+谓词+(对客体)
|
||||
* - rev: 揭示:谓词+(关于客体)
|
||||
* - dec: 主体+谓词+(对客体)
|
||||
* - ten: 主体与客体之间:谓词
|
||||
* - loc: 场景:地点或谓词
|
||||
* - 地点非空且非 loc 类型时后缀 "在{location}"
|
||||
* v7: L0 的 semantic 字段已是纯自然语言场景摘要(60-100字),直接使用。
|
||||
*
|
||||
* @param {object} l0 - L0 对象(含 l0.atom)
|
||||
* @returns {string} 可读短句
|
||||
* @param {object} l0 - L0 对象
|
||||
* @returns {string} 场景描述文本
|
||||
*/
|
||||
function buildL0DisplayText(l0) {
|
||||
const atom = l0.atom || l0._atom || {};
|
||||
const type = atom.type || 'act';
|
||||
const subject = String(atom.subject || '').trim();
|
||||
const object = String(atom.object || '').trim();
|
||||
const value = String(atom.value || '').trim();
|
||||
const location = String(atom.location || '').trim();
|
||||
|
||||
if (!value && !subject) {
|
||||
// 兜底:如果结构字段缺失,回退到 semantic 并剥离标签
|
||||
const semantic = String(atom.semantic || l0.text || '').trim();
|
||||
return semantic
|
||||
.replace(/^<\w+>\s*/, '')
|
||||
.replace(/\s*\[[\w/]+\]\s*$/, '')
|
||||
.trim() || '(未知锚点)';
|
||||
}
|
||||
|
||||
let result = '';
|
||||
|
||||
switch (type) {
|
||||
case 'emo':
|
||||
result = `${subject}${value}`;
|
||||
if (object) result += `(对${object})`;
|
||||
break;
|
||||
|
||||
case 'act':
|
||||
result = `${subject}${value}`;
|
||||
if (object) result += ` → ${object}`;
|
||||
break;
|
||||
|
||||
case 'rev':
|
||||
result = `揭示:${value}`;
|
||||
if (object) result += `(关于${object})`;
|
||||
break;
|
||||
|
||||
case 'dec':
|
||||
result = `${subject}${value}`;
|
||||
if (object) result += `(对${object})`;
|
||||
break;
|
||||
|
||||
case 'ten':
|
||||
if (object) {
|
||||
result = `${subject}与${object}之间:${value}`;
|
||||
} else {
|
||||
result = `${subject}:${value}`;
|
||||
}
|
||||
break;
|
||||
|
||||
case 'loc':
|
||||
result = `场景:${location || value}`;
|
||||
break;
|
||||
|
||||
default:
|
||||
result = `${subject}${value}`;
|
||||
if (object) result += ` → ${object}`;
|
||||
break;
|
||||
}
|
||||
|
||||
// 地点后缀(loc 类型已包含地点,不重复)
|
||||
if (location && type !== 'loc') {
|
||||
result += ` 在${location}`;
|
||||
}
|
||||
|
||||
return result.trim();
|
||||
const atom = l0.atom || {};
|
||||
return String(atom.scene || atom.semantic || l0.text || '').trim() || '(未知锚点)';
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
// ============================================================================
|
||||
// atom-extraction.js - L0 叙事锚点提取(三层 themes 版)
|
||||
// atom-extraction.js - L0 场景锚点提取(v2 - 场景摘要 + 图结构)
|
||||
//
|
||||
// 设计依据:
|
||||
// - BGE-M3 (BAAI, 2024): 自然语言段落检索精度最高 → semantic = 纯自然语言
|
||||
// - Interpersonal Circumplex (Kiesler, 1983): 权力轴+情感轴 → dynamics 枚举
|
||||
// - Labov Narrative Structure (1972): 叙事功能轴 → dynamics 枚举补充
|
||||
// - TransE (Bordes, 2013): s/t/r 三元组方向性 → edges 格式
|
||||
//
|
||||
// 每楼层 1-2 个场景锚点(非碎片原子),60-100 字场景摘要
|
||||
// ============================================================================
|
||||
|
||||
import { callLLM, parseJson } from './llm-service.js';
|
||||
@@ -25,10 +33,30 @@ export function isBatchCancelled() {
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// L0 提取 Prompt(三层 themes)
|
||||
// dynamics 封闭枚举(8 个标签,两轴四象限 + 叙事轴)
|
||||
// ============================================================================
|
||||
|
||||
const SYSTEM_PROMPT = `你是叙事锚点提取器。从一轮对话中提取4-8个关键锚点,用于后续语义检索。
|
||||
const VALID_DYNAMICS = new Set([
|
||||
// 权力轴 (Interpersonal Circumplex: Dominance-Submission)
|
||||
'支配', // 控制、命令、审视、威慑、主导
|
||||
'让渡', // 顺从、服从、屈服、被动、配合
|
||||
|
||||
// 情感轴 (Interpersonal Circumplex: Hostility-Friendliness)
|
||||
'亲密', // 温柔、关怀、依赖、信任、连接
|
||||
'敌意', // 对抗、拒绝、攻击、嘲讽、排斥
|
||||
|
||||
// 叙事轴 (Labov Narrative Structure)
|
||||
'揭示', // 真相、发现、告白、暴露、秘密
|
||||
'决意', // 选择、承诺、放弃、宣言、转折
|
||||
'张力', // 悬念、对峙、暗涌、不安、等待
|
||||
'丧失', // 分离、死亡、破碎、遗憾、崩塌
|
||||
]);
|
||||
|
||||
// ============================================================================
|
||||
// L0 提取 Prompt
|
||||
// ============================================================================
|
||||
|
||||
const SYSTEM_PROMPT = `你是场景摘要器。从一轮对话中提取1-2个场景锚点,用于语义检索和关系追踪。
|
||||
|
||||
输入格式:
|
||||
<round>
|
||||
@@ -37,126 +65,50 @@ const SYSTEM_PROMPT = `你是叙事锚点提取器。从一轮对话中提取4-8
|
||||
</round>
|
||||
|
||||
只输出严格JSON:
|
||||
{"atoms":[{"t":"类型","s":"主体","o":"客体","v":"谓词","l":"地点","f":"来源","th":{"fn":[],"pt":[],"kw":[]}}]}
|
||||
|
||||
## 类型(t)
|
||||
- emo: 情绪状态变化
|
||||
- act: 关键动作/行为
|
||||
- rev: 揭示/发现/真相
|
||||
- dec: 决定/承诺/宣言
|
||||
- ten: 冲突/张力/对立
|
||||
- loc: 场景/地点变化
|
||||
|
||||
## 字段说明
|
||||
- s: 主体(必填)
|
||||
- o: 客体(可空)
|
||||
- v: 谓词,15字内(必填)
|
||||
- l: 地点(可空)
|
||||
- f: "u"=用户 / "a"=角色(必填)
|
||||
- th: 主题标签(必填,结构化对象)
|
||||
|
||||
## th 三层结构
|
||||
fn(叙事功能)1-2个,枚举:
|
||||
establish=建立设定 | escalate=升级加剧 | reveal=揭示发现 | challenge=挑战试探
|
||||
commit=承诺锁定 | conflict=冲突对抗 | resolve=解决收束 | transform=转变逆转
|
||||
bond=连接羁绊 | break=断裂破坏
|
||||
|
||||
pt(互动模式)1-3个,枚举:
|
||||
power_down=上对下 | power_up=下对上 | power_equal=对等 | power_contest=争夺
|
||||
asymmetric=信息不对称 | witnessed=有观众 | secluded=隔绝私密
|
||||
ritual=仪式正式 | routine=日常惯例 | triangular=三方介入
|
||||
|
||||
kw(具体关键词)1-3个,自由格式
|
||||
|
||||
## 示例输出
|
||||
{"atoms":[
|
||||
{"t":"act","s":"艾拉","o":"古龙","v":"用圣剑刺穿心脏","l":"火山口","f":"a",
|
||||
"th":{"fn":["commit"],"pt":["power_down","ritual"],"kw":["战斗","牺牲"]}},
|
||||
{"t":"emo","s":"林夏","o":"陆远","v":"意识到自己喜欢他","l":"","f":"a",
|
||||
"th":{"fn":["reveal","escalate"],"pt":["asymmetric","secluded"],"kw":["心动","暗恋"]}},
|
||||
{"t":"dec","s":"凯尔","o":"王国","v":"放弃王位继承权","l":"王座厅","f":"a",
|
||||
"th":{"fn":["commit","break"],"pt":["ritual","witnessed"],"kw":["抉择","自由"]}},
|
||||
{"t":"rev","s":"","o":"","v":"管家其实是间谍","l":"","f":"a",
|
||||
"th":{"fn":["reveal"],"pt":["asymmetric"],"kw":["背叛","真相"]}},
|
||||
{"t":"ten","s":"兄弟二人","o":"","v":"为遗产反目","l":"","f":"a",
|
||||
"th":{"fn":["conflict","break"],"pt":["power_contest"],"kw":["冲突","亲情破裂"]}}
|
||||
{"anchors":[
|
||||
{
|
||||
"scene": "60-100字完整场景描述",
|
||||
"who": ["角色名1","角色名2"],
|
||||
"edges": [{"s":"施事方","t":"受事方","r":"互动行为"}],
|
||||
"dynamics": ["标签"],
|
||||
"where": "地点"
|
||||
}
|
||||
]}
|
||||
|
||||
规则:
|
||||
- 只提取对未来检索有价值的锚点
|
||||
- fn 回答"这在故事里推动了什么"
|
||||
- pt 回答"这是什么结构的互动"
|
||||
- kw 用于细粒度检索
|
||||
- 无明显锚点时返回 {"atoms":[]}`;
|
||||
## scene 写法
|
||||
- 纯自然语言,像旁白或日记,不要任何标签/标记/枚举值
|
||||
- 必须包含:角色名、动作、情感氛围、关键细节
|
||||
- 读者只看 scene 就能复原这一幕
|
||||
- 60-100字,信息密集但流畅
|
||||
|
||||
const JSON_PREFILL = '{"atoms":[';
|
||||
## who
|
||||
- 参与互动的角色正式名称,不用代词或别称
|
||||
|
||||
// ============================================================================
|
||||
// Semantic 构建
|
||||
// ============================================================================
|
||||
## edges(关系三元组)
|
||||
- s=施事方 t=受事方 r=互动行为(10-15字)
|
||||
- 每个锚点 1-3 条
|
||||
|
||||
function buildSemantic(atom, userName, aiName) {
|
||||
const type = atom.t || 'act';
|
||||
const subject = atom.s || (atom.f === 'u' ? userName : aiName);
|
||||
const object = atom.o || '';
|
||||
const verb = atom.v || '';
|
||||
const location = atom.l || '';
|
||||
|
||||
// 三层 themes 合并
|
||||
const th = atom.th || {};
|
||||
const tags = [
|
||||
...(Array.isArray(th.fn) ? th.fn : []),
|
||||
...(Array.isArray(th.pt) ? th.pt : []),
|
||||
...(Array.isArray(th.kw) ? th.kw : []),
|
||||
].filter(Boolean);
|
||||
## dynamics(封闭枚举,选0-2个)
|
||||
权力轴:支配(控制/命令/审视) | 让渡(顺从/服从/屈服)
|
||||
情感轴:亲密(温柔/信任/连接) | 敌意(对抗/拒绝/攻击)
|
||||
叙事轴:揭示(真相/秘密) | 决意(选择/承诺) | 张力(对峙/不安) | 丧失(分离/破碎)
|
||||
纯日常无明显模式时 dynamics 为 []
|
||||
|
||||
const typePart = `<${type}>`;
|
||||
const themePart = tags.length > 0 ? ` [${tags.join('/')}]` : '';
|
||||
const locPart = location ? ` 在${location}` : '';
|
||||
const objPart = object ? ` -> ${object}` : '';
|
||||
## where
|
||||
- 场景地点,无明确地点时空字符串
|
||||
|
||||
let semantic = '';
|
||||
switch (type) {
|
||||
case 'emo':
|
||||
semantic = object
|
||||
? `${typePart} ${subject} -> ${verb} (对${object})${locPart}`
|
||||
: `${typePart} ${subject} -> ${verb}${locPart}`;
|
||||
break;
|
||||
## 数量规则
|
||||
- 最多2个。1个够时不凑2个
|
||||
- 明显场景切换(地点/时间/对象变化)时才2个
|
||||
- 同一场景不拆分
|
||||
- 无角色互动时返回 {"anchors":[]}
|
||||
|
||||
case 'act':
|
||||
semantic = `${typePart} ${subject} -> ${verb}${objPart}${locPart}`;
|
||||
break;
|
||||
## 示例
|
||||
输入:艾拉在火山口举起圣剑刺穿古龙心脏,龙血溅满她的铠甲,她跪倒在地痛哭
|
||||
输出:
|
||||
{"anchors":[{"scene":"火山口上艾拉举起圣剑刺穿古龙的心脏,龙血溅满铠甲,古龙轰然倒地,艾拉跪倒在滚烫的岩石上痛哭,完成了她不得不做的弑杀","who":["艾拉","古龙"],"edges":[{"s":"艾拉","t":"古龙","r":"以圣剑刺穿心脏"}],"dynamics":["决意","丧失"],"where":"火山口"}]}`;
|
||||
|
||||
case 'rev':
|
||||
semantic = object
|
||||
? `${typePart} 揭示: ${verb} (关于${object})${locPart}`
|
||||
: `${typePart} 揭示: ${verb}${locPart}`;
|
||||
break;
|
||||
|
||||
case 'dec':
|
||||
semantic = object
|
||||
? `${typePart} ${subject} -> ${verb} (对${object})${locPart}`
|
||||
: `${typePart} ${subject} -> ${verb}${locPart}`;
|
||||
break;
|
||||
|
||||
case 'ten':
|
||||
semantic = object
|
||||
? `${typePart} ${subject} <-> ${object}: ${verb}${locPart}`
|
||||
: `${typePart} ${subject}: ${verb}${locPart}`;
|
||||
break;
|
||||
|
||||
case 'loc':
|
||||
semantic = location
|
||||
? `${typePart} 场景: ${location} - ${verb}`
|
||||
: `${typePart} 场景: ${verb}`;
|
||||
break;
|
||||
|
||||
default:
|
||||
semantic = `${typePart} ${subject} -> ${verb}${objPart}${locPart}`;
|
||||
}
|
||||
|
||||
return semantic + themePart;
|
||||
}
|
||||
const JSON_PREFILL = '{"anchors":[';
|
||||
|
||||
// ============================================================================
|
||||
// 睡眠工具
|
||||
@@ -164,6 +116,100 @@ function buildSemantic(atom, userName, aiName) {
|
||||
|
||||
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
|
||||
|
||||
// ============================================================================
|
||||
// 清洗与构建
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* 清洗 dynamics 标签,只保留合法枚举值
|
||||
* @param {string[]} raw
|
||||
* @returns {string[]}
|
||||
*/
|
||||
function sanitizeDynamics(raw) {
|
||||
if (!Array.isArray(raw)) return [];
|
||||
return raw
|
||||
.map(d => String(d || '').trim())
|
||||
.filter(d => VALID_DYNAMICS.has(d))
|
||||
.slice(0, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* 清洗 edges 三元组
|
||||
* @param {object[]} raw
|
||||
* @returns {object[]}
|
||||
*/
|
||||
function sanitizeEdges(raw) {
|
||||
if (!Array.isArray(raw)) return [];
|
||||
return raw
|
||||
.filter(e => e && typeof e === 'object')
|
||||
.map(e => ({
|
||||
s: String(e.s || '').trim(),
|
||||
t: String(e.t || '').trim(),
|
||||
r: String(e.r || '').trim().slice(0, 30),
|
||||
}))
|
||||
.filter(e => e.s && e.t && e.r)
|
||||
.slice(0, 3);
|
||||
}
|
||||
|
||||
/**
|
||||
* 清洗 who 列表
|
||||
* @param {string[]} raw
|
||||
* @returns {string[]}
|
||||
*/
|
||||
function sanitizeWho(raw) {
|
||||
if (!Array.isArray(raw)) return [];
|
||||
const seen = new Set();
|
||||
return raw
|
||||
.map(w => String(w || '').trim())
|
||||
.filter(w => {
|
||||
if (!w || w.length < 1 || seen.has(w)) return false;
|
||||
seen.add(w);
|
||||
return true;
|
||||
})
|
||||
.slice(0, 6);
|
||||
}
|
||||
|
||||
/**
|
||||
* 将解析后的 anchor 转换为 atom 存储对象
|
||||
*
|
||||
* semantic = scene(纯自然语言,直接用于 embedding)
|
||||
*
|
||||
* @param {object} anchor - LLM 输出的 anchor 对象
|
||||
* @param {number} aiFloor - AI 消息楼层号
|
||||
* @param {number} idx - 同楼层序号(0 或 1)
|
||||
* @returns {object|null} atom 对象
|
||||
*/
|
||||
function anchorToAtom(anchor, aiFloor, idx) {
|
||||
const scene = String(anchor.scene || '').trim();
|
||||
if (!scene) return null;
|
||||
|
||||
// scene 过短(< 15 字)可能是噪音
|
||||
if (scene.length < 15) return null;
|
||||
|
||||
const who = sanitizeWho(anchor.who);
|
||||
const edges = sanitizeEdges(anchor.edges);
|
||||
const dynamics = sanitizeDynamics(anchor.dynamics);
|
||||
const where = String(anchor.where || '').trim();
|
||||
|
||||
return {
|
||||
atomId: `atom-${aiFloor}-${idx}`,
|
||||
floor: aiFloor,
|
||||
source: 'ai',
|
||||
|
||||
// ═══ 检索层(embedding 的唯一入口) ═══
|
||||
semantic: scene,
|
||||
|
||||
// ═══ 场景数据 ═══
|
||||
scene,
|
||||
|
||||
// ═══ 图结构层(扩散的 key) ═══
|
||||
who,
|
||||
edges,
|
||||
dynamics,
|
||||
where,
|
||||
};
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// 单轮提取(带重试)
|
||||
// ============================================================================
|
||||
@@ -196,8 +242,8 @@ async function extractAtomsForRoundWithRetry(userMessage, aiMessage, aiFloor, op
|
||||
{ role: 'user', content: input },
|
||||
{ role: 'assistant', content: JSON_PREFILL },
|
||||
], {
|
||||
temperature: 0.2,
|
||||
max_tokens: 1000,
|
||||
temperature: 0.3,
|
||||
max_tokens: 600,
|
||||
timeout,
|
||||
});
|
||||
|
||||
@@ -216,7 +262,7 @@ async function extractAtomsForRoundWithRetry(userMessage, aiMessage, aiFloor, op
|
||||
try {
|
||||
parsed = parseJson(fullJson);
|
||||
} catch (e) {
|
||||
xbLog.warn(MODULE_ID, `floor ${aiFloor} JSON解析失败`);
|
||||
xbLog.warn(MODULE_ID, `floor ${aiFloor} JSON解析失败 (attempt ${attempt})`);
|
||||
if (attempt < RETRY_COUNT) {
|
||||
await sleep(RETRY_DELAY);
|
||||
continue;
|
||||
@@ -224,7 +270,9 @@ async function extractAtomsForRoundWithRetry(userMessage, aiMessage, aiFloor, op
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!parsed?.atoms || !Array.isArray(parsed.atoms)) {
|
||||
// 兼容:优先 anchors,回退 atoms
|
||||
const rawAnchors = parsed?.anchors || parsed?.atoms;
|
||||
if (!rawAnchors || !Array.isArray(rawAnchors)) {
|
||||
if (attempt < RETRY_COUNT) {
|
||||
await sleep(RETRY_DELAY);
|
||||
continue;
|
||||
@@ -232,22 +280,13 @@ async function extractAtomsForRoundWithRetry(userMessage, aiMessage, aiFloor, op
|
||||
return null;
|
||||
}
|
||||
|
||||
const filtered = parsed.atoms
|
||||
.filter(a => a?.t && a?.v)
|
||||
.map((a, idx) => ({
|
||||
atomId: `atom-${aiFloor}-${idx}`,
|
||||
floor: aiFloor,
|
||||
type: a.t,
|
||||
subject: a.s || null,
|
||||
object: a.o || null,
|
||||
value: String(a.v).slice(0, 50),
|
||||
location: a.l || null,
|
||||
source: a.f === 'u' ? 'user' : 'ai',
|
||||
themes: a.th || { fn: [], pt: [], kw: [] },
|
||||
semantic: buildSemantic(a, userName, aiName),
|
||||
}));
|
||||
// 转换为 atom 存储格式(最多 2 个)
|
||||
const atoms = rawAnchors
|
||||
.slice(0, 2)
|
||||
.map((a, idx) => anchorToAtom(a, aiFloor, idx))
|
||||
.filter(Boolean);
|
||||
|
||||
return filtered;
|
||||
return atoms;
|
||||
|
||||
} catch (e) {
|
||||
if (batchCancelled) return null;
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Story Summary - Metrics Collector (v5 - Weighted Query + Floor Aggregation)
|
||||
// Story Summary - Metrics Collector (v6 - Dense-Gated Lexical)
|
||||
//
|
||||
// v5 → v6 变更:
|
||||
// - lexical: 新增 eventFilteredByDense / floorFilteredByDense
|
||||
// - event: entityFilter bypass 阈值改为 CONFIG 驱动(0.80)
|
||||
// - 其余结构不变
|
||||
//
|
||||
// v4 → v5 变更:
|
||||
// - query: 新增 segmentWeights / r2Weights(加权向量诊断)
|
||||
@@ -44,6 +49,8 @@ export function createMetrics() {
|
||||
chunkHits: 0,
|
||||
eventHits: 0,
|
||||
searchTime: 0,
|
||||
eventFilteredByDense: 0,
|
||||
floorFilteredByDense: 0,
|
||||
},
|
||||
|
||||
// Fusion (W-RRF, floor-level) - 多路融合
|
||||
@@ -229,7 +236,14 @@ export function formatMetricsLog(metrics) {
|
||||
lines.push(`├─ atom_hits: ${m.lexical.atomHits}`);
|
||||
lines.push(`├─ chunk_hits: ${m.lexical.chunkHits}`);
|
||||
lines.push(`├─ event_hits: ${m.lexical.eventHits}`);
|
||||
lines.push(`└─ search_time: ${m.lexical.searchTime}ms`);
|
||||
lines.push(`├─ search_time: ${m.lexical.searchTime}ms`);
|
||||
if (m.lexical.eventFilteredByDense > 0) {
|
||||
lines.push(`├─ event_filtered_by_dense: ${m.lexical.eventFilteredByDense}`);
|
||||
}
|
||||
if (m.lexical.floorFilteredByDense > 0) {
|
||||
lines.push(`├─ floor_filtered_by_dense: ${m.lexical.floorFilteredByDense}`);
|
||||
}
|
||||
lines.push(`└─ dense_gate_threshold: 0.50`);
|
||||
lines.push('');
|
||||
|
||||
// Fusion (W-RRF, floor-level)
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Story Summary - Recall Engine (v8 - Weighted Query Vectors + Floor Aggregation)
|
||||
// Story Summary - Recall Engine (v9 - Dense-Gated Lexical + Entity Bypass Tuning)
|
||||
//
|
||||
// 命名规范:
|
||||
// - 存储层用 L0/L1/L2/L3(StateAtom/Chunk/Event/Fact)
|
||||
// - 召回层用语义名称:anchor/evidence/event/constraint
|
||||
//
|
||||
// v7 → v8 变更:
|
||||
// - Query 取 3 条消息(对齐 L0 对结构),加权向量合成替代文本拼接
|
||||
// - R1 权重 [0.15, 0.30, 0.55](焦点 > 近上下文 > 远上下文)
|
||||
// - R2 复用 R1 向量 + embed hints 1 条,权重 [0.10, 0.20, 0.45, 0.25]
|
||||
// - Dense floor 聚合:max → maxSim×0.6 + meanSim×0.4
|
||||
// - Lexical floor 聚合:max → maxScore × (1 + 0.3×log₂(hitCount))
|
||||
// v8 → v9 变更:
|
||||
// - recallEvents() 返回 { events, vectorMap },暴露 event 向量映射
|
||||
// - Lexical Event 合并前验 dense similarity ≥ 0.50(CONFIG.LEXICAL_EVENT_DENSE_MIN)
|
||||
// - Lexical Floor 进入融合前验 dense similarity ≥ 0.50(CONFIG.LEXICAL_FLOOR_DENSE_MIN)
|
||||
// - Entity Bypass 阈值 0.85 → 0.80(CONFIG.EVENT_ENTITY_BYPASS_SIM)
|
||||
// - metrics 新增 lexical.eventFilteredByDense / lexical.floorFilteredByDense
|
||||
//
|
||||
// 架构:
|
||||
// 阶段 1: Query Build(确定性,无 LLM)
|
||||
// 阶段 2: Round 1 Dense Retrieval(batch embed 3 段 → 加权平均)
|
||||
// 阶段 3: Query Refinement(用已命中记忆产出 hints 段)
|
||||
// 阶段 4: Round 2 Dense Retrieval(复用 R1 vec + embed hints → 加权平均)
|
||||
// 阶段 5: Lexical Retrieval
|
||||
// 阶段 5: Lexical Retrieval + Dense-Gated Event Merge
|
||||
// 阶段 6: Floor W-RRF Fusion + Rerank + L1 配对
|
||||
// 阶段 7: L1 配对组装(L0 → top-1 AI L1 + top-1 USER L1)
|
||||
// 阶段 8: Causation Trace
|
||||
@@ -47,9 +47,9 @@ const MODULE_ID = 'recall';
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
const CONFIG = {
|
||||
// 窗口:取 3 条消息(对齐 L0 USER+AI 对结构)
|
||||
// 窗口:取 3 条消息(对齐 L0 对结构),pending 存在时取 2 条上下文
|
||||
LAST_MESSAGES_K: 3,
|
||||
LAST_MESSAGES_K_WITH_PENDING: 2, // pending 存在时只取 2 条上下文,避免形成 4 段
|
||||
LAST_MESSAGES_K_WITH_PENDING: 2,
|
||||
|
||||
// Anchor (L0 StateAtoms)
|
||||
ANCHOR_MIN_SIMILARITY: 0.58,
|
||||
@@ -59,6 +59,11 @@ const CONFIG = {
|
||||
EVENT_SELECT_MAX: 50,
|
||||
EVENT_MIN_SIMILARITY: 0.55,
|
||||
EVENT_MMR_LAMBDA: 0.72,
|
||||
EVENT_ENTITY_BYPASS_SIM: 0.80,
|
||||
|
||||
// Lexical Dense 门槛
|
||||
LEXICAL_EVENT_DENSE_MIN: 0.50,
|
||||
LEXICAL_FLOOR_DENSE_MIN: 0.50,
|
||||
|
||||
// W-RRF 融合(L0-only)
|
||||
RRF_K: 60,
|
||||
@@ -86,9 +91,6 @@ const CONFIG = {
|
||||
// 工具函数
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
/**
|
||||
* 计算余弦相似度
|
||||
*/
|
||||
function cosineSimilarity(a, b) {
|
||||
if (!a?.length || !b?.length || a.length !== b.length) return 0;
|
||||
let dot = 0, nA = 0, nB = 0;
|
||||
@@ -100,9 +102,6 @@ function cosineSimilarity(a, b) {
|
||||
return nA && nB ? dot / (Math.sqrt(nA) * Math.sqrt(nB)) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* 标准化字符串
|
||||
*/
|
||||
function normalize(s) {
|
||||
return String(s || '')
|
||||
.normalize('NFKC')
|
||||
@@ -111,9 +110,6 @@ function normalize(s) {
|
||||
.toLowerCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取最近消息
|
||||
*/
|
||||
function getLastMessages(chat, count = 3, excludeLastAi = false) {
|
||||
if (!chat?.length) return [];
|
||||
let messages = [...chat];
|
||||
@@ -127,13 +123,6 @@ function getLastMessages(chat, count = 3, excludeLastAi = false) {
|
||||
// 加权向量工具
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
/**
|
||||
* 多向量加权平均
|
||||
*
|
||||
* @param {number[][]} vectors - 向量数组
|
||||
* @param {number[]} weights - 归一化后的权重(sum = 1)
|
||||
* @returns {number[]|null}
|
||||
*/
|
||||
function weightedAverageVectors(vectors, weights) {
|
||||
if (!vectors?.length || !weights?.length || vectors.length !== weights.length) return null;
|
||||
|
||||
@@ -152,14 +141,6 @@ function weightedAverageVectors(vectors, weights) {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* 对归一化权重做“目标位最小占比”硬保底
|
||||
*
|
||||
* @param {number[]} weights - 已归一化权重(sum≈1)
|
||||
* @param {number} targetIdx - 目标位置(focus 段索引)
|
||||
* @param {number} minWeight - 最小占比(0~1)
|
||||
* @returns {number[]} 调整后的归一化权重
|
||||
*/
|
||||
function clampMinNormalizedWeight(weights, targetIdx, minWeight) {
|
||||
if (!weights?.length) return [];
|
||||
if (targetIdx < 0 || targetIdx >= weights.length) return weights;
|
||||
@@ -178,18 +159,11 @@ function clampMinNormalizedWeight(weights, targetIdx, minWeight) {
|
||||
const scale = remain / otherSum;
|
||||
|
||||
const out = weights.map((w, i) => (i === targetIdx ? minWeight : w * scale));
|
||||
// 数值稳定性:消除浮点误差
|
||||
const drift = 1 - out.reduce((a, b) => a + b, 0);
|
||||
out[targetIdx] += drift;
|
||||
return out;
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算 R1 段权重(baseWeight × lengthFactor,归一化)
|
||||
*
|
||||
* @param {object[]} segments - QuerySegment[]
|
||||
* @returns {number[]} 归一化后的权重
|
||||
*/
|
||||
function computeSegmentWeights(segments) {
|
||||
if (!segments?.length) return [];
|
||||
|
||||
@@ -199,22 +173,13 @@ function computeSegmentWeights(segments) {
|
||||
? segments.map(() => 1 / segments.length)
|
||||
: adjusted.map(w => w / sum);
|
||||
|
||||
// focus 段始终在末尾
|
||||
const focusIdx = segments.length - 1;
|
||||
return clampMinNormalizedWeight(normalized, focusIdx, FOCUS_MIN_NORMALIZED_WEIGHT);
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算 R2 权重(R1 段用 R2 基础权重 + hints 段,归一化)
|
||||
*
|
||||
* @param {object[]} segments - QuerySegment[](与 R1 相同的段)
|
||||
* @param {object|null} hintsSegment - { text, baseWeight, charCount }
|
||||
* @returns {number[]} 归一化后的权重(长度 = segments.length + (hints ? 1 : 0))
|
||||
*/
|
||||
function computeR2Weights(segments, hintsSegment) {
|
||||
if (!segments?.length) return [];
|
||||
|
||||
// 为 R1 段分配 R2 基础权重(尾部对齐)
|
||||
const contextCount = segments.length - 1;
|
||||
const r2Base = [];
|
||||
for (let i = 0; i < contextCount; i++) {
|
||||
@@ -223,21 +188,17 @@ function computeR2Weights(segments, hintsSegment) {
|
||||
}
|
||||
r2Base.push(FOCUS_BASE_WEIGHT_R2);
|
||||
|
||||
// 应用 lengthFactor
|
||||
const adjusted = r2Base.map((w, i) => w * computeLengthFactor(segments[i].charCount));
|
||||
|
||||
// 追加 hints
|
||||
if (hintsSegment) {
|
||||
adjusted.push(hintsSegment.baseWeight * computeLengthFactor(hintsSegment.charCount));
|
||||
}
|
||||
|
||||
// 归一化
|
||||
const sum = adjusted.reduce((a, b) => a + b, 0);
|
||||
const normalized = sum <= 0
|
||||
? adjusted.map(() => 1 / adjusted.length)
|
||||
: adjusted.map(w => w / sum);
|
||||
|
||||
// R2 中 focus 位置固定为“segments 最后一个”
|
||||
const focusIdx = segments.length - 1;
|
||||
return clampMinNormalizedWeight(normalized, focusIdx, FOCUS_MIN_NORMALIZED_WEIGHT);
|
||||
}
|
||||
@@ -342,26 +303,27 @@ async function recallAnchors(queryVector, vectorConfig, metrics) {
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// [Events] L2 Events 检索
|
||||
// 返回 { events, vectorMap }
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
async function recallEvents(queryVector, allEvents, vectorConfig, focusEntities, metrics) {
|
||||
const { chatId } = getContext();
|
||||
if (!chatId || !queryVector?.length || !allEvents?.length) {
|
||||
return [];
|
||||
return { events: [], vectorMap: new Map() };
|
||||
}
|
||||
|
||||
const meta = await getMeta(chatId);
|
||||
const fp = getEngineFingerprint(vectorConfig);
|
||||
if (meta.fingerprint && meta.fingerprint !== fp) {
|
||||
xbLog.warn(MODULE_ID, 'Event fingerprint 不匹配');
|
||||
return [];
|
||||
return { events: [], vectorMap: new Map() };
|
||||
}
|
||||
|
||||
const eventVectors = await getAllEventVectors(chatId);
|
||||
const vectorMap = new Map(eventVectors.map(v => [v.eventId, v.vector]));
|
||||
|
||||
if (!vectorMap.size) {
|
||||
return [];
|
||||
return { events: [], vectorMap };
|
||||
}
|
||||
|
||||
const focusSet = new Set((focusEntities || []).map(normalize));
|
||||
@@ -400,7 +362,7 @@ async function recallEvents(queryVector, allEvents, vectorConfig, focusEntities,
|
||||
const beforeFilter = candidates.length;
|
||||
|
||||
candidates = candidates.filter(c => {
|
||||
if (c.similarity >= 0.85) return true;
|
||||
if (c.similarity >= CONFIG.EVENT_ENTITY_BYPASS_SIM) return true;
|
||||
return c._hasEntityMatch;
|
||||
});
|
||||
|
||||
@@ -444,7 +406,7 @@ async function recallEvents(queryVector, allEvents, vectorConfig, focusEntities,
|
||||
metrics.event.similarityDistribution = calcSimilarityStats(results.map(r => r.similarity));
|
||||
}
|
||||
|
||||
return results;
|
||||
return { events: results, vectorMap };
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
@@ -576,12 +538,14 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
.sort((a, b) => b.score - a.score);
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────
|
||||
// 6b. Lexical floor rank(密度加成:maxScore × (1 + 0.3×log₂(hitCount)))
|
||||
// 6b. Lexical floor rank(密度加成 + Dense 门槛过滤)
|
||||
// ─────────────────────────────────────────────────────────────────
|
||||
|
||||
const atomFloorSet = new Set(getStateAtoms().map(a => a.floor));
|
||||
|
||||
const lexFloorAgg = new Map();
|
||||
let lexFloorFilteredByDense = 0;
|
||||
|
||||
for (const { chunkId, score } of (lexicalResult?.chunkScores || [])) {
|
||||
const match = chunkId?.match(/^c-(\d+)-/);
|
||||
if (!match) continue;
|
||||
@@ -600,6 +564,13 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
// 预过滤:必须有 L0 atoms
|
||||
if (!atomFloorSet.has(floor)) continue;
|
||||
|
||||
// Dense 门槛:lexical floor 必须有最低 dense 相关性
|
||||
const denseInfo = denseFloorAgg.get(floor);
|
||||
if (!denseInfo || denseInfo.maxSim < CONFIG.LEXICAL_FLOOR_DENSE_MIN) {
|
||||
lexFloorFilteredByDense++;
|
||||
continue;
|
||||
}
|
||||
|
||||
const cur = lexFloorAgg.get(floor);
|
||||
if (!cur) {
|
||||
lexFloorAgg.set(floor, { maxScore: score, hitCount: 1 });
|
||||
@@ -616,6 +587,10 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
}))
|
||||
.sort((a, b) => b.score - a.score);
|
||||
|
||||
if (metrics) {
|
||||
metrics.lexical.floorFilteredByDense = lexFloorFilteredByDense;
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────
|
||||
// 6c. Floor W-RRF 融合
|
||||
// ─────────────────────────────────────────────────────────────────
|
||||
@@ -756,7 +731,6 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
atomsByFloor.get(atom.floor).push(atom);
|
||||
}
|
||||
|
||||
// 重建 denseFloorMap 以获取每层 max cosine(用于 L0 similarity 标注)
|
||||
const denseFloorMaxMap = new Map();
|
||||
for (const a of (anchorHits || [])) {
|
||||
const cur = denseFloorMaxMap.get(a.floor) || 0;
|
||||
@@ -772,7 +746,6 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
const rerankScore = item._rerankScore || 0;
|
||||
const denseSim = denseFloorMaxMap.get(floor) || 0;
|
||||
|
||||
// 收集该 floor 所有 L0 atoms
|
||||
const floorAtoms = atomsByFloor.get(floor) || [];
|
||||
for (const atom of floorAtoms) {
|
||||
l0Selected.push({
|
||||
@@ -786,7 +759,6 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
});
|
||||
}
|
||||
|
||||
// L1 top-1 配对(cosine 最高)
|
||||
const aiChunks = l1ScoredByFloor.get(floor) || [];
|
||||
const userFloor = floor - 1;
|
||||
const userChunks = (userFloor >= 0 && chat?.[userFloor]?.is_user)
|
||||
@@ -804,10 +776,6 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
l1ByFloor.set(floor, { aiTop1, userTop1 });
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────
|
||||
// 6h. Metrics
|
||||
// ─────────────────────────────────────────────────────────────────
|
||||
|
||||
if (metrics) {
|
||||
metrics.evidence.floorsSelected = reranked.length;
|
||||
metrics.evidence.l0Collected = l0Selected.length;
|
||||
@@ -827,7 +795,7 @@ async function locateAndPullEvidence(anchorHits, queryVector, rerankQuery, lexic
|
||||
}
|
||||
|
||||
xbLog.info(MODULE_ID,
|
||||
`Evidence: ${denseFloorRank.length} dense floors + ${lexFloorRank.length} lex floors → fusion=${fusedFloors.length} → rerank=${reranked.length} floors → L0=${l0Selected.length} L1 attached=${metrics?.evidence?.l1Attached || 0} (${totalTime}ms)`
|
||||
`Evidence: ${denseFloorRank.length} dense floors + ${lexFloorRank.length} lex floors (${lexFloorFilteredByDense} lex filtered by dense) → fusion=${fusedFloors.length} → rerank=${reranked.length} floors → L0=${l0Selected.length} L1 attached=${metrics?.evidence?.l1Attached || 0} (${totalTime}ms)`
|
||||
);
|
||||
|
||||
return { l0Selected, l1ByFloor };
|
||||
@@ -1031,7 +999,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
|
||||
const r1AnchorTime = Math.round(performance.now() - T_R1_Anchor_Start);
|
||||
|
||||
const T_R1_Event_Start = performance.now();
|
||||
const eventHits_v0 = await recallEvents(queryVector_v0, allEvents, vectorConfig, bundle.focusEntities, null);
|
||||
const { events: eventHits_v0 } = await recallEvents(queryVector_v0, allEvents, vectorConfig, bundle.focusEntities, null);
|
||||
const r1EventTime = Math.round(performance.now() - T_R1_Event_Start);
|
||||
|
||||
xbLog.info(MODULE_ID,
|
||||
@@ -1048,7 +1016,6 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
|
||||
|
||||
metrics.query.refineTime = Math.round(performance.now() - T_Refine_Start);
|
||||
|
||||
// 更新 v1 长度指标
|
||||
if (metrics.query?.lengths && bundle.hintsSegment) {
|
||||
metrics.query.lengths.v1Chars = metrics.query.lengths.v0Chars + bundle.hintsSegment.text.length;
|
||||
}
|
||||
@@ -1094,7 +1061,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
|
||||
metrics.timing.anchorSearch = Math.round(performance.now() - T_R2_Anchor_Start);
|
||||
|
||||
const T_R2_Event_Start = performance.now();
|
||||
let eventHits = await recallEvents(queryVector_v1, allEvents, vectorConfig, bundle.focusEntities, metrics);
|
||||
let { events: eventHits, vectorMap: eventVectorMap } = await recallEvents(queryVector_v1, allEvents, vectorConfig, bundle.focusEntities, metrics);
|
||||
metrics.timing.eventRetrieval = Math.round(performance.now() - T_R2_Event_Start);
|
||||
|
||||
xbLog.info(MODULE_ID,
|
||||
@@ -1102,7 +1069,7 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
|
||||
);
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════
|
||||
// 阶段 5: Lexical Retrieval
|
||||
// 阶段 5: Lexical Retrieval + Dense-Gated Event Merge
|
||||
// ═══════════════════════════════════════════════════════════════════
|
||||
|
||||
const T_Lex_Start = performance.now();
|
||||
@@ -1133,32 +1100,53 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
|
||||
}
|
||||
|
||||
// 合并 L2 events(lexical 命中但 dense 未命中的 events)
|
||||
// ★ Dense 门槛:验证 event 向量与 queryVector_v1 的 cosine similarity
|
||||
const existingEventIds = new Set(eventHits.map(e => e.event?.id).filter(Boolean));
|
||||
const eventIndex = buildEventIndex(allEvents);
|
||||
let lexicalEventCount = 0;
|
||||
let lexicalEventFilteredByDense = 0;
|
||||
|
||||
for (const eid of lexicalResult.eventIds) {
|
||||
if (!existingEventIds.has(eid)) {
|
||||
const ev = eventIndex.get(eid);
|
||||
if (ev) {
|
||||
eventHits.push({
|
||||
event: ev,
|
||||
similarity: 0,
|
||||
_recallType: 'LEXICAL',
|
||||
});
|
||||
existingEventIds.add(eid);
|
||||
lexicalEventCount++;
|
||||
}
|
||||
if (existingEventIds.has(eid)) continue;
|
||||
|
||||
const ev = eventIndex.get(eid);
|
||||
if (!ev) continue;
|
||||
|
||||
// Dense gate: 验证 event 向量与 query 的语义相关性
|
||||
const evVec = eventVectorMap.get(eid);
|
||||
if (!evVec?.length) {
|
||||
// 无向量无法验证相关性,丢弃
|
||||
lexicalEventFilteredByDense++;
|
||||
continue;
|
||||
}
|
||||
|
||||
const sim = cosineSimilarity(queryVector_v1, evVec);
|
||||
if (sim < CONFIG.LEXICAL_EVENT_DENSE_MIN) {
|
||||
lexicalEventFilteredByDense++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// 通过门槛,使用实际 dense similarity(而非硬编码 0)
|
||||
eventHits.push({
|
||||
event: ev,
|
||||
similarity: sim,
|
||||
_recallType: 'LEXICAL',
|
||||
});
|
||||
existingEventIds.add(eid);
|
||||
lexicalEventCount++;
|
||||
}
|
||||
|
||||
if (metrics) {
|
||||
metrics.lexical.eventFilteredByDense = lexicalEventFilteredByDense;
|
||||
|
||||
if (lexicalEventCount > 0) {
|
||||
metrics.event.byRecallType.lexical = lexicalEventCount;
|
||||
metrics.event.selected += lexicalEventCount;
|
||||
}
|
||||
}
|
||||
|
||||
if (metrics && lexicalEventCount > 0) {
|
||||
metrics.event.byRecallType.lexical = lexicalEventCount;
|
||||
metrics.event.selected += lexicalEventCount;
|
||||
}
|
||||
|
||||
xbLog.info(MODULE_ID,
|
||||
`Lexical: chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length} mergedEvents=+${lexicalEventCount} (${lexTime}ms)`
|
||||
`Lexical: chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length} mergedEvents=+${lexicalEventCount} filteredByDense=${lexicalEventFilteredByDense} (${lexTime}ms)`
|
||||
);
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════
|
||||
@@ -1204,13 +1192,13 @@ export async function recallMemory(allEvents, vectorConfig, options = {}) {
|
||||
metrics.event.entityNames = bundle.focusEntities;
|
||||
metrics.event.entitiesUsed = bundle.focusEntities.length;
|
||||
|
||||
console.group('%c[Recall v8]', 'color: #7c3aed; font-weight: bold');
|
||||
console.group('%c[Recall v9]', 'color: #7c3aed; font-weight: bold');
|
||||
console.log(`Total: ${metrics.timing.total}ms`);
|
||||
console.log(`Query Build: ${metrics.query.buildTime}ms | Refine: ${metrics.query.refineTime}ms`);
|
||||
console.log(`R1 weights: [${r1Weights.map(w => w.toFixed(2)).join(', ')}]`);
|
||||
console.log(`Focus: [${bundle.focusEntities.join(', ')}]`);
|
||||
console.log(`Round 2 Anchors: ${anchorHits.length} hits → ${anchorFloors_dense.size} floors`);
|
||||
console.log(`Lexical: chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length}`);
|
||||
console.log(`Lexical: chunks=${lexicalResult.chunkIds.length} events=${lexicalResult.eventIds.length} evtMerged=+${lexicalEventCount} evtFiltered=${lexicalEventFilteredByDense} floorFiltered=${metrics.lexical.floorFilteredByDense || 0}`);
|
||||
console.log(`Fusion (floor, weighted): dense=${metrics.fusion.denseFloors} lex=${metrics.fusion.lexFloors} → cap=${metrics.fusion.afterCap} (${metrics.fusion.time}ms)`);
|
||||
console.log(`Floor Rerank: ${metrics.evidence.beforeRerank || 0} → ${metrics.evidence.floorsSelected || 0} floors → L0=${metrics.evidence.l0Collected || 0} (${metrics.evidence.rerankTime || 0}ms)`);
|
||||
console.log(`L1: ${metrics.evidence.l1Pulled || 0} pulled → ${metrics.evidence.l1Attached || 0} attached (${metrics.evidence.l1CosineTime || 0}ms)`);
|
||||
|
||||
Reference in New Issue
Block a user