Update story summary recall and prompt

This commit is contained in:
2026-02-05 00:22:02 +08:00
parent 12db08abe0
commit 8137e206f9
18 changed files with 708 additions and 406 deletions

View File

@@ -0,0 +1,129 @@
// ═══════════════════════════════════════════════════════════════════════════
// Entity Recognition & Relation Graph
// 实体识别与关系扩散
// ═══════════════════════════════════════════════════════════════════════════
/**
* 从文本中匹配已知实体
* @param {string} text - 待匹配文本
* @param {Set<string>} knownEntities - 已知实体集合
* @returns {string[]} - 匹配到的实体
*/
export function matchEntities(text, knownEntities) {
if (!text || !knownEntities?.size) return [];
const matched = new Set();
for (const entity of knownEntities) {
// 精确包含
if (text.includes(entity)) {
matched.add(entity);
continue;
}
// 处理简称:如果实体是"林黛玉",文本包含"黛玉"
if (entity.length >= 3) {
const shortName = entity.slice(-2); // 取后两字
if (text.includes(shortName)) {
matched.add(entity);
}
}
}
return Array.from(matched);
}
/**
* 从角色数据和事件中收集所有已知实体
*/
export function collectKnownEntities(characters, events) {
const entities = new Set();
// 从主要角色
(characters?.main || []).forEach(m => {
const name = typeof m === 'string' ? m : m.name;
if (name) entities.add(name);
});
// 从关系
(characters?.relationships || []).forEach(r => {
if (r.from) entities.add(r.from);
if (r.to) entities.add(r.to);
});
// 从事件参与者
(events || []).forEach(e => {
(e.participants || []).forEach(p => {
if (p) entities.add(p);
});
});
return entities;
}
/**
* 构建关系邻接表
* @param {Array} relationships - 关系数组
* @returns {Map<string, Array<{target: string, weight: number}>>}
*/
export function buildRelationGraph(relationships) {
const graph = new Map();
const trendWeight = {
'交融': 1.0,
'亲密': 0.9,
'投缘': 0.7,
'陌生': 0.3,
'反感': 0.5,
'厌恶': 0.6,
'破裂': 0.7,
};
for (const rel of relationships || []) {
if (!rel.from || !rel.to) continue;
const weight = trendWeight[rel.trend] || 0.5;
// 双向
if (!graph.has(rel.from)) graph.set(rel.from, []);
if (!graph.has(rel.to)) graph.set(rel.to, []);
graph.get(rel.from).push({ target: rel.to, weight });
graph.get(rel.to).push({ target: rel.from, weight });
}
return graph;
}
/**
* 关系扩散1跳
* @param {string[]} focusEntities - 焦点实体
* @param {Map} graph - 关系图
* @param {number} decayFactor - 衰减因子
* @returns {Map<string, number>} - 实体 -> 激活分数
*/
export function spreadActivation(focusEntities, graph, decayFactor = 0.5) {
const activation = new Map();
// 焦点实体初始分数 1.0
for (const entity of focusEntities) {
activation.set(entity, 1.0);
}
// 1跳扩散
for (const entity of focusEntities) {
const neighbors = graph.get(entity) || [];
for (const { target, weight } of neighbors) {
const spreadScore = weight * decayFactor;
const existing = activation.get(target) || 0;
// 取最大值,不累加
if (spreadScore > existing) {
activation.set(target, spreadScore);
}
}
}
return activation;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,237 @@
// text-search.js - 最终版
import MiniSearch from '../../../../libs/minisearch.mjs';
const STOP_WORDS = new Set([
'的', '了', '是', '在', '和', '与', '或', '但', '而', '却',
'这', '那', '他', '她', '它', '我', '你', '们', '着', '过',
'把', '被', '给', '让', '向', '就', '都', '也', '还', '又',
'很', '太', '更', '最', '只', '才', '已', '正', '会', '能',
'要', '可', '得', '地', '之', '所', '以', '为', '于', '有',
'不', '去', '来', '上', '下', '里', '说', '看', '吧', '呢',
'啊', '吗', '呀', '哦', '嗯', '么',
'の', 'に', 'は', 'を', 'が', 'と', 'で', 'へ', 'や', 'か',
'も', 'な', 'よ', 'ね', 'わ', 'です', 'ます', 'した', 'ない',
'the', 'a', 'an', 'is', 'are', 'was', 'were', 'be', 'been',
'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would',
'to', 'of', 'in', 'on', 'at', 'for', 'with', 'by', 'from',
'and', 'or', 'but', 'if', 'that', 'this', 'it', 'its',
'i', 'you', 'he', 'she', 'we', 'they', 'my', 'your', 'his',
]);
function tokenize(text) {
const s = String(text || '').toLowerCase().trim();
if (!s) return [];
const tokens = new Set();
// CJK Bigram + Trigram
const cjk = s.match(/[\u4e00-\u9fff\u3400-\u4dbf]+/g) || [];
for (const seg of cjk) {
const chars = [...seg].filter(c => !STOP_WORDS.has(c));
for (let i = 0; i < chars.length - 1; i++) {
tokens.add(chars[i] + chars[i + 1]);
}
for (let i = 0; i < chars.length - 2; i++) {
tokens.add(chars[i] + chars[i + 1] + chars[i + 2]);
}
}
// 日语假名
const kana = s.match(/[\u3040-\u309f\u30a0-\u30ff]{2,}/g) || [];
for (const k of kana) {
if (!STOP_WORDS.has(k)) tokens.add(k);
}
// 英文
const en = s.match(/[a-z]{2,}/g) || [];
for (const w of en) {
if (!STOP_WORDS.has(w)) tokens.add(w);
}
return [...tokens];
}
let idx = null;
let lastRevision = null;
function stripFloorTag(s) {
return String(s || '').replace(/\s*\(#\d+(?:-\d+)?\)\s*$/, '').trim();
}
export function ensureEventTextIndex(events, revision) {
if (!events?.length) {
idx = null;
lastRevision = null;
return;
}
if (idx && revision === lastRevision) return;
try {
idx = new MiniSearch({
fields: ['title', 'summary', 'participants'],
storeFields: ['id'],
tokenize,
searchOptions: { tokenize },
});
idx.addAll(events.map(e => ({
id: e.id,
title: e.title || '',
summary: stripFloorTag(e.summary),
participants: (e.participants || []).join(' '),
})));
lastRevision = revision;
} catch (e) {
console.error('[text-search] Index build failed:', e);
idx = null;
}
}
/**
* BM25 检索,返回 top-K 候选给 RRF
*
* 设计原则:
* - 不做分数过滤BM25 分数跨查询不可比)
* - 不做匹配数过滤bigram 让一个词产生多个 token
* - 只做 top-KBM25 排序本身有区分度)
* - 质量过滤交给 RRF 后的 hasVector 过滤
*/
/**
* 动态 top-K累积分数占比法
*
* 原理BM25 分数服从幂律分布,少数高分条目贡献大部分总分
* 取累积分数达到阈值的最小 K
*
* 参考帕累托法则80/20 法则)在信息检索中的应用
*/
export function dynamicTopK(scores, coverage = 0.90, minK = 15, maxK = 80) {
if (!scores.length) return 0;
const total = scores.reduce((a, b) => a + b, 0);
if (total <= 0) return Math.min(minK, scores.length);
let cumulative = 0;
for (let i = 0; i < scores.length; i++) {
cumulative += scores[i];
if (cumulative / total >= coverage) {
return Math.max(minK, Math.min(maxK, i + 1));
}
}
return Math.min(maxK, scores.length);
}
export function searchEventsByText(queryText, limit = 80) {
if (!idx || !queryText?.trim()) return [];
try {
const results = idx.search(queryText, {
boost: { title: 4, participants: 2, summary: 1 },
fuzzy: false,
prefix: false,
});
if (!results.length) return [];
const scores = results.map(r => r.score);
const k = dynamicTopK(scores, 0.90, 15, limit);
const output = results.slice(0, k).map((r, i) => ({
id: r.id,
textRank: i + 1,
score: r.score,
}));
const total = scores.reduce((a, b) => a + b, 0);
const kCumulative = scores.slice(0, k).reduce((a, b) => a + b, 0);
output._gapInfo = {
total: results.length,
returned: k,
coverage: ((kCumulative / total) * 100).toFixed(1) + '%',
scoreRange: {
top: scores[0]?.toFixed(1),
cutoff: scores[k - 1]?.toFixed(1),
p50: scores[Math.floor(scores.length / 2)]?.toFixed(1),
last: scores[scores.length - 1]?.toFixed(1),
},
};
return output;
} catch (e) {
console.error('[text-search] Search failed:', e);
return [];
}
}
export function clearEventTextIndex() {
idx = null;
lastRevision = null;
}
// ---------------------------------------------------------------------------
// Chunk 文本索引(待整理区 L1 补充)
// ---------------------------------------------------------------------------
let chunkIdx = null;
let chunkIdxRevision = null;
export function ensureChunkTextIndex(chunks, revision) {
if (chunkIdx && revision === chunkIdxRevision) return;
try {
chunkIdx = new MiniSearch({
fields: ['text'],
storeFields: ['chunkId', 'floor'],
tokenize,
searchOptions: { tokenize },
});
chunkIdx.addAll(chunks.map(c => ({
id: c.chunkId,
chunkId: c.chunkId,
floor: c.floor,
text: c.text || '',
})));
chunkIdxRevision = revision;
} catch (e) {
console.error('[text-search] Chunk index build failed:', e);
chunkIdx = null;
}
}
export function searchChunksByText(query, floorMin, floorMax, limit = 20) {
if (!chunkIdx || !query?.trim()) return [];
try {
const results = chunkIdx.search(query, {
fuzzy: false,
prefix: false,
});
const filtered = results.filter(r => r.floor >= floorMin && r.floor <= floorMax);
if (!filtered.length) return [];
const scores = filtered.map(r => r.score);
const k = dynamicTopK(scores, 0.85, 5, limit);
return filtered.slice(0, k).map((r, i) => ({
chunkId: r.chunkId,
floor: r.floor,
textRank: i + 1,
score: r.score,
}));
} catch (e) {
console.error('[text-search] Chunk search failed:', e);
return [];
}
}
export function clearChunkTextIndex() {
chunkIdx = null;
chunkIdxRevision = null;
}

View File

@@ -0,0 +1,287 @@
import { xbLog } from '../../../../core/debug-core.js';
import { extensionFolderPath } from '../../../../core/constants.js';
const MODULE_ID = 'tokenizer';
// ═══════════════════════════════════════════════════════════════════════════
// 词性过滤
// ═══════════════════════════════════════════════════════════════════════════
// 保留的词性(名词类 + 英文)
const KEEP_POS_PREFIXES = ['n', 'eng'];
function shouldKeepByPos(pos) {
return KEEP_POS_PREFIXES.some(prefix => pos.startsWith(prefix));
}
// ═══════════════════════════════════════════════════════════════════════════
// 语言检测
// ═══════════════════════════════════════════════════════════════════════════
function shouldUseJieba(text) {
const zh = (text.match(/[\u4e00-\u9fff]/g) || []).length;
return zh >= 5;
}
function detectMainLanguage(text) {
const zh = (text.match(/[\u4e00-\u9fff]/g) || []).length;
const jp = (text.match(/[\u3040-\u309f\u30a0-\u30ff]/g) || []).length;
const en = (text.match(/[a-zA-Z]/g) || []).length;
const total = zh + jp + en || 1;
if (jp / total > 0.2) return 'jp';
if (en / total > 0.5) return 'en';
return 'zh';
}
// 替换原有的大停用词表
const STOP_WORDS = new Set([
// 系统词
'用户', '角色', '玩家', '旁白', 'user', 'assistant', 'system',
// 时间泛词
'时候', '现在', '今天', '明天', '昨天', '早上', '晚上',
// 方位泛词
'这里', '那里', '上面', '下面', '里面', '外面',
// 泛化名词
'东西', '事情', '事儿', '地方', '样子', '意思', '感觉',
'一下', '一些', '一点', '一会', '一次',
]);
// 英文停用词fallback 用)
const EN_STOP_WORDS = new Set([
'the', 'a', 'an', 'is', 'are', 'was', 'were', 'be', 'been',
'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would',
'could', 'should', 'may', 'might', 'must', 'can',
'to', 'of', 'in', 'on', 'at', 'for', 'with', 'by', 'from',
'and', 'or', 'but', 'if', 'that', 'this', 'it', 'its',
'i', 'you', 'he', 'she', 'we', 'they',
'my', 'your', 'his', 'her', 'our', 'their',
'what', 'which', 'who', 'whom', 'where', 'when', 'why', 'how',
]);
let jiebaModule = null;
let jiebaReady = false;
let jiebaLoading = false;
async function ensureJieba() {
if (jiebaReady) return true;
if (jiebaLoading) {
for (let i = 0; i < 50; i++) {
await new Promise(r => setTimeout(r, 100));
if (jiebaReady) return true;
}
return false;
}
jiebaLoading = true;
try {
const jiebaPath = `/${extensionFolderPath}/libs/jieba-wasm/jieba_rs_wasm.js`;
// eslint-disable-next-line no-unsanitized/method
jiebaModule = await import(jiebaPath);
if (jiebaModule.default) {
await jiebaModule.default();
}
jiebaReady = true;
xbLog.info(MODULE_ID, 'jieba-wasm 加载成功');
const keys = Object.getOwnPropertyNames(jiebaModule || {});
const dkeys = Object.getOwnPropertyNames(jiebaModule?.default || {});
xbLog.info(MODULE_ID, `jieba keys: ${keys.join(',')}`);
xbLog.info(MODULE_ID, `jieba default keys: ${dkeys.join(',')}`);
xbLog.info(MODULE_ID, `jieba.tag: ${typeof jiebaModule?.tag}`);
return true;
} catch (e) {
xbLog.error(MODULE_ID, 'jieba-wasm 加载失败', e);
jiebaLoading = false;
return false;
}
}
function fallbackTokenize(text) {
const tokens = [];
const lang = detectMainLanguage(text);
// 英文
const enMatches = text.match(/[a-zA-Z]{2,20}/gi) || [];
tokens.push(...enMatches.filter(w => !EN_STOP_WORDS.has(w.toLowerCase())));
// 日语假名
if (lang === 'jp') {
const kanaMatches = text.match(/[\u3040-\u309f\u30a0-\u30ff]{2,10}/g) || [];
tokens.push(...kanaMatches);
}
// 中文/日语汉字
const zhMatches = text.match(/[\u4e00-\u9fff]{2,6}/g) || [];
tokens.push(...zhMatches);
// 数字+汉字组合
const numZhMatches = text.match(/\d+[\u4e00-\u9fff]{1,4}/g) || [];
tokens.push(...numZhMatches);
return tokens;
}
export async function extractNouns(text, options = {}) {
const { minLen = 2, maxCount = 0 } = options;
if (!text?.trim()) return [];
// 中文为主 → 用 jieba
if (shouldUseJieba(text)) {
const hasJieba = await ensureJieba();
if (hasJieba && jiebaModule?.tag) {
try {
const tagged = jiebaModule.tag(text, true);
const result = [];
const seen = new Set();
const list = Array.isArray(tagged) ? tagged : [];
for (const item of list) {
let word = '';
let pos = '';
if (Array.isArray(item)) {
[word, pos] = item;
} else if (item && typeof item === 'object') {
word = item.word || item.w || item.text || item.term || '';
pos = item.tag || item.pos || item.p || '';
}
if (!word || !pos) continue;
if (word.length < minLen) continue;
if (!shouldKeepByPos(pos)) continue;
if (STOP_WORDS.has(word)) continue;
if (seen.has(word)) continue;
seen.add(word);
result.push(word);
if (maxCount > 0 && result.length >= maxCount) break;
}
return result;
} catch (e) {
xbLog.warn(MODULE_ID, 'jieba tag 失败:' + (e && e.message ? e.message : String(e)));
}
}
}
// 非中文 / jieba 失败 → fallback
const tokens = fallbackTokenize(text);
const result = [];
const seen = new Set();
for (const t of tokens) {
if (t.length < minLen) continue;
if (STOP_WORDS.has(t)) continue;
if (seen.has(t)) continue;
seen.add(t);
result.push(t);
if (maxCount > 0 && result.length >= maxCount) break;
}
return result;
}
export async function extractRareTerms(text, maxCount = 15) {
if (!text?.trim()) return [];
// 中文为主 → 用 jieba
if (shouldUseJieba(text)) {
const hasJieba = await ensureJieba();
if (hasJieba && jiebaModule?.tag) {
try {
const tagged = jiebaModule.tag(text, true);
const candidates = [];
const seen = new Set();
const list = Array.isArray(tagged) ? tagged : [];
for (const item of list) {
let word = '';
let pos = '';
if (Array.isArray(item)) {
[word, pos] = item;
} else if (item && typeof item === 'object') {
word = item.word || item.w || item.text || item.term || '';
pos = item.tag || item.pos || item.p || '';
}
if (!word || !pos) continue;
if (word.length < 2) continue;
if (!shouldKeepByPos(pos)) continue;
if (STOP_WORDS.has(word)) continue;
if (seen.has(word)) continue;
seen.add(word);
// 稀有度评分
let score = 0;
if (word.length >= 4) score += 3;
else if (word.length >= 3) score += 1;
if (/[a-zA-Z]/.test(word)) score += 2;
if (/\d/.test(word)) score += 1;
// 专名词性加分
if (['nr', 'ns', 'nt', 'nz'].some(p => pos.startsWith(p))) score += 2;
candidates.push({ term: word, score });
}
candidates.sort((a, b) => b.score - a.score);
return candidates.slice(0, maxCount).map(x => x.term);
} catch (e) {
xbLog.warn(MODULE_ID, 'jieba tag 失败:' + (e && e.message ? e.message : String(e)));
}
}
}
// 非中文 / jieba 失败 → fallback
const allNouns = await extractNouns(text, { minLen: 2, maxCount: 0 });
const scored = allNouns.map(t => {
let score = 0;
if (t.length >= 4) score += 3;
else if (t.length >= 3) score += 1;
if (/[a-zA-Z]/.test(t)) score += 2;
if (/\d/.test(t)) score += 1;
return { term: t, score };
});
scored.sort((a, b) => b.score - a.score);
return scored.slice(0, maxCount).map(x => x.term);
}
export async function extractNounsFromFactsO(facts, relevantSubjects, maxCount = 5) {
if (!facts?.length || !relevantSubjects?.size) return [];
const oTexts = [];
for (const f of facts) {
if (f.retracted) continue;
// 只取相关主体的 facts
const s = String(f.s || '').trim();
if (!relevantSubjects.has(s)) continue;
const o = String(f.o || '').trim();
if (!o) continue;
// 跳过太长的 O可能是完整句子
if (o.length > 30) continue;
oTexts.push(o);
}
if (!oTexts.length) return [];
const combined = oTexts.join(' ');
return await extractNouns(combined, { minLen: 2, maxCount });
}
export { ensureJieba };