Fix tokenizer jieba tag flow and debug logging
This commit is contained in:
@@ -21,6 +21,10 @@ import {
|
||||
mergeAndSparsify,
|
||||
} from './state-recall.js';
|
||||
import { ensureEventTextIndex, searchEventsByText } from './text-search.js';
|
||||
import {
|
||||
extractRareTerms,
|
||||
extractNounsFromFactsO,
|
||||
} from './tokenizer.js';
|
||||
|
||||
const MODULE_ID = 'recall';
|
||||
|
||||
@@ -50,6 +54,10 @@ const CONFIG = {
|
||||
|
||||
RRF_K: 60,
|
||||
TEXT_SEARCH_LIMIT: 80,
|
||||
|
||||
// TEXT-only 质量控制
|
||||
TEXT_SOFT_MIN_SIM: 0.50,
|
||||
TEXT_TOTAL_MAX: 6,
|
||||
};
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
@@ -83,7 +91,7 @@ function fuseEventsByRRF(vectorRanked, textRanked, eventById, k = CONFIG.RRF_K)
|
||||
|
||||
const upsert = (id) => {
|
||||
if (!map.has(id)) {
|
||||
map.set(id, { id, rrf: 0, vRank: Infinity, tRank: Infinity, type: 'TEXT' });
|
||||
map.set(id, { id, rrf: 0, vRank: Infinity, tRank: Infinity, type: 'TEXT', rawSim: 0, vector: null });
|
||||
}
|
||||
return map.get(id);
|
||||
};
|
||||
@@ -96,6 +104,7 @@ function fuseEventsByRRF(vectorRanked, textRanked, eventById, k = CONFIG.RRF_K)
|
||||
o.rrf += 1 / (k + i + 1);
|
||||
o.type = o.tRank !== Infinity ? 'HYBRID' : 'VECTOR';
|
||||
o.vector = r.vector;
|
||||
o.rawSim = r.rawSim || 0;
|
||||
});
|
||||
|
||||
textRanked.forEach((r) => {
|
||||
@@ -381,6 +390,70 @@ function normalizeEntityWeights(queryEntityWeights) {
|
||||
return normalized;
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// 文本路 Query 构建(分层高信号词)
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
async function buildTextSearchQuery(segments, queryEntities, facts, expandedTerms) {
|
||||
const breakdown = {
|
||||
entities: [],
|
||||
rareTerms: [],
|
||||
factsO: [],
|
||||
expanded: [],
|
||||
};
|
||||
|
||||
breakdown.entities = [...(queryEntities || [])];
|
||||
|
||||
const q2Segments = segments.slice(-2);
|
||||
const q2Text = q2Segments.join(' ');
|
||||
|
||||
try {
|
||||
breakdown.rareTerms = await extractRareTerms(q2Text, 15);
|
||||
} catch (e) {
|
||||
xbLog.warn(MODULE_ID, '稀有词提取失败', e);
|
||||
breakdown.rareTerms = [];
|
||||
}
|
||||
|
||||
const entitySet = new Set(breakdown.entities.map(e => e.toLowerCase()));
|
||||
breakdown.rareTerms = breakdown.rareTerms.filter(t => !entitySet.has(t.toLowerCase()));
|
||||
|
||||
const relevantSubjects = new Set(queryEntities || []);
|
||||
try {
|
||||
breakdown.factsO = await extractNounsFromFactsO(facts, relevantSubjects, 5);
|
||||
} catch (e) {
|
||||
xbLog.warn(MODULE_ID, 'facts O 提取失败', e);
|
||||
breakdown.factsO = [];
|
||||
}
|
||||
|
||||
const existingSet = new Set([
|
||||
...breakdown.entities,
|
||||
...breakdown.rareTerms,
|
||||
].map(e => e.toLowerCase()));
|
||||
breakdown.factsO = breakdown.factsO.filter(t => !existingSet.has(t.toLowerCase()));
|
||||
|
||||
const allExistingSet = new Set([
|
||||
...breakdown.entities,
|
||||
...breakdown.rareTerms,
|
||||
...breakdown.factsO,
|
||||
].map(e => e.toLowerCase()));
|
||||
|
||||
breakdown.expanded = (expandedTerms || [])
|
||||
.filter(t => !allExistingSet.has(t.toLowerCase()))
|
||||
.slice(0, 3);
|
||||
|
||||
const queryParts = [
|
||||
...breakdown.entities,
|
||||
...breakdown.entities,
|
||||
...breakdown.rareTerms,
|
||||
...breakdown.factsO,
|
||||
...breakdown.expanded,
|
||||
];
|
||||
|
||||
const query = queryParts.join(' ');
|
||||
|
||||
return { query, breakdown };
|
||||
}
|
||||
|
||||
function stripFloorTag(s) {
|
||||
return String(s || '').replace(/\s*\(#\d+(?:-\d+)?\)\s*$/, '').trim();
|
||||
}
|
||||
@@ -605,7 +678,7 @@ async function searchEvents(queryVector, queryTextForSearch, allEvents, vectorCo
|
||||
|
||||
const scored = (allEvents || []).map((event, idx) => {
|
||||
const v = vectorMap.get(event.id);
|
||||
const sim = v ? cosineSimilarity(queryVector, v) : 0;
|
||||
const rawSim = v ? cosineSimilarity(queryVector, v) : 0;
|
||||
|
||||
let bonus = 0;
|
||||
|
||||
@@ -635,14 +708,15 @@ async function searchEvents(queryVector, queryTextForSearch, allEvents, vectorCo
|
||||
_id: event.id,
|
||||
_idx: idx,
|
||||
event,
|
||||
similarity: sim,
|
||||
finalScore: sim + bonus,
|
||||
rawSim,
|
||||
finalScore: rawSim + bonus,
|
||||
vector: v,
|
||||
_entityBonus: entityBonus,
|
||||
_hasPresent: maxEntityWeight > 0,
|
||||
};
|
||||
});
|
||||
|
||||
const rawSimById = new Map(scored.map(s => [s._id, s.rawSim]));
|
||||
const entityBonusById = new Map(scored.map(s => [s._id, s._entityBonus]));
|
||||
const hasPresentById = new Map(scored.map(s => [s._id, s._hasPresent]));
|
||||
|
||||
@@ -665,14 +739,34 @@ async function searchEvents(queryVector, queryTextForSearch, allEvents, vectorCo
|
||||
const vectorRanked = candidates.map(s => ({
|
||||
event: s.event,
|
||||
similarity: s.finalScore,
|
||||
rawSim: s.rawSim,
|
||||
vector: s.vector,
|
||||
}));
|
||||
|
||||
const eventById = new Map(allEvents.map(e => [e.id, e]));
|
||||
const fused = fuseEventsByRRF(vectorRanked, textRanked, eventById);
|
||||
|
||||
const hasVector = vectorRanked.length > 0;
|
||||
const filtered = hasVector ? fused.filter(x => x.type !== 'TEXT') : fused;
|
||||
const textOnlyStats = {
|
||||
total: 0,
|
||||
passedSoftCheck: 0,
|
||||
filtered: 0,
|
||||
finalIncluded: 0,
|
||||
truncatedByLimit: 0,
|
||||
};
|
||||
|
||||
const filtered = fused.filter(x => {
|
||||
if (x.type !== 'TEXT') return true;
|
||||
|
||||
textOnlyStats.total++;
|
||||
const sim = x.rawSim || rawSimById.get(x.id) || 0;
|
||||
if (sim >= CONFIG.TEXT_SOFT_MIN_SIM) {
|
||||
textOnlyStats.passedSoftCheck++;
|
||||
return true;
|
||||
}
|
||||
|
||||
textOnlyStats.filtered++;
|
||||
return false;
|
||||
});
|
||||
|
||||
const mmrInput = filtered.slice(0, CONFIG.CANDIDATE_EVENTS).map(x => ({
|
||||
...x,
|
||||
@@ -686,14 +780,27 @@ async function searchEvents(queryVector, queryTextForSearch, allEvents, vectorCo
|
||||
c => c.vector || null,
|
||||
c => c.rrf
|
||||
);
|
||||
// 构造结果
|
||||
const results = mmrOutput.map(x => ({
|
||||
|
||||
let textOnlyCount = 0;
|
||||
const finalResults = mmrOutput.filter(x => {
|
||||
if (x.type !== 'TEXT') return true;
|
||||
if (textOnlyCount < CONFIG.TEXT_TOTAL_MAX) {
|
||||
textOnlyCount++;
|
||||
return true;
|
||||
}
|
||||
textOnlyStats.truncatedByLimit++;
|
||||
return false;
|
||||
});
|
||||
textOnlyStats.finalIncluded = textOnlyCount;
|
||||
|
||||
const results = finalResults.map(x => ({
|
||||
event: x.event,
|
||||
similarity: x.rrf,
|
||||
_recallType: hasPresentById.get(x.event?.id) ? 'DIRECT' : 'SIMILAR',
|
||||
_recallReason: x.type,
|
||||
_rrfDetail: { vRank: x.vRank, tRank: x.tRank, rrf: x.rrf },
|
||||
_entityBonus: entityBonusById.get(x.event?.id) || 0,
|
||||
_rawSim: rawSimById.get(x.event?.id) || 0,
|
||||
}));
|
||||
|
||||
// 统计信息附加到第一条结果
|
||||
@@ -704,8 +811,9 @@ async function searchEvents(queryVector, queryTextForSearch, allEvents, vectorCo
|
||||
textCount: textRanked.length,
|
||||
hybridCount: fused.filter(x => x.type === 'HYBRID').length,
|
||||
vectorOnlyCount: fused.filter(x => x.type === 'VECTOR').length,
|
||||
textOnlyFiltered: fused.filter(x => x.type === 'TEXT').length,
|
||||
textOnlyTotal: textOnlyStats.total,
|
||||
};
|
||||
results[0]._textOnlyStats = textOnlyStats;
|
||||
results[0]._textGapInfo = textGapInfo;
|
||||
}
|
||||
|
||||
@@ -729,6 +837,7 @@ function formatRecallLog({
|
||||
l0Results = [],
|
||||
textGapInfo = null,
|
||||
expandedTerms = [],
|
||||
textQueryBreakdown = null,
|
||||
}) {
|
||||
const lines = [
|
||||
'\u2554' + '\u2550'.repeat(62) + '\u2557',
|
||||
@@ -775,6 +884,40 @@ function formatRecallLog({
|
||||
lines.push(` 扩散: ${expandedTerms.join('、')}`);
|
||||
}
|
||||
|
||||
lines.push('');
|
||||
lines.push('\u250c' + '\u2500'.repeat(61) + '\u2510');
|
||||
lines.push('\u2502 【文本路 Query 构成】 \u2502');
|
||||
lines.push('\u2514' + '\u2500'.repeat(61) + '\u2518');
|
||||
|
||||
if (textQueryBreakdown) {
|
||||
const bd = textQueryBreakdown;
|
||||
if (bd.entities?.length) {
|
||||
lines.push(` 强信号-实体 (${bd.entities.length}): ${bd.entities.slice(0, 8).join(' | ')}${bd.entities.length > 8 ? ' ...' : ''}`);
|
||||
} else {
|
||||
lines.push(' 强信号-实体: (无)');
|
||||
}
|
||||
|
||||
if (bd.rareTerms?.length) {
|
||||
lines.push(` 强信号-稀有词 (${bd.rareTerms.length}): ${bd.rareTerms.slice(0, 10).join(' | ')}${bd.rareTerms.length > 10 ? ' ...' : ''}`);
|
||||
} else {
|
||||
lines.push(' 强信号-稀有词: (无)');
|
||||
}
|
||||
|
||||
if (bd.factsO?.length) {
|
||||
lines.push(` 中信号-facts O (${bd.factsO.length}): ${bd.factsO.join(' | ')}`);
|
||||
} else {
|
||||
lines.push(' 中信号-facts O: (无)');
|
||||
}
|
||||
|
||||
if (bd.expanded?.length) {
|
||||
lines.push(` 背景扩展 (${bd.expanded.length}): ${bd.expanded.join(' | ')}`);
|
||||
} else {
|
||||
lines.push(' 背景扩展: (无)');
|
||||
}
|
||||
} else {
|
||||
lines.push(' (降级模式,无分层信息)');
|
||||
}
|
||||
|
||||
lines.push('');
|
||||
lines.push(' 实体归一化(用于加分):');
|
||||
if (normalizedEntityWeights?.size) {
|
||||
@@ -816,13 +959,37 @@ function formatRecallLog({
|
||||
|
||||
// L2
|
||||
const rrfStats = eventResults[0]?._rrfStats || {};
|
||||
const textOnlyStats = eventResults[0]?._textOnlyStats || {};
|
||||
|
||||
lines.push('');
|
||||
lines.push(' L2 事件记忆 (RRF 混合检索):');
|
||||
lines.push(` 总事件: ${allEvents.length} 条 | 最终: ${eventResults.length} 条`);
|
||||
lines.push(` 向量路: ${rrfStats.vectorCount || 0} 条 | 文本路: ${rrfStats.textCount || 0} 条`);
|
||||
lines.push(` HYBRID: ${rrfStats.hybridCount || 0} 条 | 纯 VECTOR: ${rrfStats.vectorOnlyCount || 0} 条 | 纯 TEXT (已过滤): ${rrfStats.textOnlyFiltered || 0} 条`);
|
||||
lines.push(` HYBRID: ${rrfStats.hybridCount || 0} 条 | 纯 VECTOR: ${rrfStats.vectorOnlyCount || 0} 条`);
|
||||
|
||||
lines.push('');
|
||||
lines.push(' TEXT-only 质量控制:');
|
||||
lines.push(` 候选: ${textOnlyStats.total || 0} 条`);
|
||||
lines.push(` 通过软校验 (sim>=${CONFIG.TEXT_SOFT_MIN_SIM}): ${textOnlyStats.passedSoftCheck || 0} 条`);
|
||||
lines.push(` 语义过滤: ${textOnlyStats.filtered || 0} 条`);
|
||||
lines.push(` 限额截断 (max=${CONFIG.TEXT_TOTAL_MAX}): ${textOnlyStats.truncatedByLimit || 0} 条`);
|
||||
lines.push(` 最终入选: ${textOnlyStats.finalIncluded || 0} 条`);
|
||||
|
||||
const textOnlyEvents = eventResults.filter(e => e._recallReason === 'TEXT');
|
||||
if (textOnlyEvents.length > 0) {
|
||||
lines.push('');
|
||||
lines.push(' TEXT-only 入选事件:');
|
||||
textOnlyEvents.forEach((e, i) => {
|
||||
const ev = e.event || {};
|
||||
const id = ev.id || '?';
|
||||
const title = (ev.title || '').slice(0, 25) || '(无标题)';
|
||||
const sim = (e._rawSim || 0).toFixed(2);
|
||||
const tRank = e._rrfDetail?.tRank ?? '?';
|
||||
lines.push(` ${i + 1}. [${id}] ${title.padEnd(25)} sim=${sim} tRank=${tRank}`);
|
||||
});
|
||||
}
|
||||
const entityBoostedEvents = eventResults.filter(e => e._entityBonus > 0).length;
|
||||
lines.push('');
|
||||
lines.push(` 实体加分事件: ${entityBoostedEvents} 条`);
|
||||
|
||||
if (textGapInfo) {
|
||||
@@ -886,14 +1053,17 @@ export async function recallMemory(queryText, allEvents, vectorConfig, options =
|
||||
const expandedTerms = expandByFacts(queryEntities, facts, 2);
|
||||
const normalizedEntityWeights = normalizeEntityWeights(queryEntityWeights);
|
||||
|
||||
// 构建文本查询串:最后一条消息 + 实体 + 关键词
|
||||
const lastSeg = segments[segments.length - 1] || '';
|
||||
const queryTextForSearch = [
|
||||
lastSeg,
|
||||
...queryEntities,
|
||||
...expandedTerms,
|
||||
...(store?.json?.keywords || []).slice(0, 5).map(k => k.text),
|
||||
].join(' ');
|
||||
let queryTextForSearch = '';
|
||||
let textQueryBreakdown = null;
|
||||
try {
|
||||
const result = await buildTextSearchQuery(segments, queryEntities, facts, expandedTerms);
|
||||
queryTextForSearch = result.query;
|
||||
textQueryBreakdown = result.breakdown;
|
||||
} catch (e) {
|
||||
xbLog.warn(MODULE_ID, '文本路 Query 构建失败,降级到简单拼接', e);
|
||||
const lastSeg = segments[segments.length - 1] || '';
|
||||
queryTextForSearch = [lastSeg, ...queryEntities, ...expandedTerms].join(' ');
|
||||
}
|
||||
|
||||
// L0 召回
|
||||
let l0Results = [];
|
||||
@@ -954,6 +1124,7 @@ export async function recallMemory(queryText, allEvents, vectorConfig, options =
|
||||
l0Results,
|
||||
textGapInfo,
|
||||
expandedTerms,
|
||||
textQueryBreakdown,
|
||||
});
|
||||
|
||||
console.group('%c[Recall]', 'color: #7c3aed; font-weight: bold');
|
||||
|
||||
287
modules/story-summary/vector/tokenizer.js
Normal file
287
modules/story-summary/vector/tokenizer.js
Normal file
@@ -0,0 +1,287 @@
|
||||
import { xbLog } from '../../../core/debug-core.js';
|
||||
import { extensionFolderPath } from '../../../core/constants.js';
|
||||
|
||||
const MODULE_ID = 'tokenizer';
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// 词性过滤
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
// 保留的词性(名词类 + 英文)
|
||||
const KEEP_POS_PREFIXES = ['n', 'eng'];
|
||||
|
||||
function shouldKeepByPos(pos) {
|
||||
return KEEP_POS_PREFIXES.some(prefix => pos.startsWith(prefix));
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// 语言检测
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
function shouldUseJieba(text) {
|
||||
const zh = (text.match(/[\u4e00-\u9fff]/g) || []).length;
|
||||
return zh >= 5;
|
||||
}
|
||||
|
||||
function detectMainLanguage(text) {
|
||||
const zh = (text.match(/[\u4e00-\u9fff]/g) || []).length;
|
||||
const jp = (text.match(/[\u3040-\u309f\u30a0-\u30ff]/g) || []).length;
|
||||
const en = (text.match(/[a-zA-Z]/g) || []).length;
|
||||
const total = zh + jp + en || 1;
|
||||
|
||||
if (jp / total > 0.2) return 'jp';
|
||||
if (en / total > 0.5) return 'en';
|
||||
return 'zh';
|
||||
}
|
||||
|
||||
// 替换原有的大停用词表
|
||||
const STOP_WORDS = new Set([
|
||||
// 系统词
|
||||
'用户', '角色', '玩家', '旁白', 'user', 'assistant', 'system',
|
||||
// 时间泛词
|
||||
'时候', '现在', '今天', '明天', '昨天', '早上', '晚上',
|
||||
// 方位泛词
|
||||
'这里', '那里', '上面', '下面', '里面', '外面',
|
||||
// 泛化名词
|
||||
'东西', '事情', '事儿', '地方', '样子', '意思', '感觉',
|
||||
'一下', '一些', '一点', '一会', '一次',
|
||||
]);
|
||||
|
||||
// 英文停用词(fallback 用)
|
||||
const EN_STOP_WORDS = new Set([
|
||||
'the', 'a', 'an', 'is', 'are', 'was', 'were', 'be', 'been',
|
||||
'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would',
|
||||
'could', 'should', 'may', 'might', 'must', 'can',
|
||||
'to', 'of', 'in', 'on', 'at', 'for', 'with', 'by', 'from',
|
||||
'and', 'or', 'but', 'if', 'that', 'this', 'it', 'its',
|
||||
'i', 'you', 'he', 'she', 'we', 'they',
|
||||
'my', 'your', 'his', 'her', 'our', 'their',
|
||||
'what', 'which', 'who', 'whom', 'where', 'when', 'why', 'how',
|
||||
]);
|
||||
|
||||
let jiebaModule = null;
|
||||
let jiebaReady = false;
|
||||
let jiebaLoading = false;
|
||||
|
||||
async function ensureJieba() {
|
||||
if (jiebaReady) return true;
|
||||
if (jiebaLoading) {
|
||||
for (let i = 0; i < 50; i++) {
|
||||
await new Promise(r => setTimeout(r, 100));
|
||||
if (jiebaReady) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
jiebaLoading = true;
|
||||
|
||||
try {
|
||||
const jiebaPath = `/${extensionFolderPath}/libs/jieba-wasm/jieba_rs_wasm.js`;
|
||||
// eslint-disable-next-line no-unsanitized/method
|
||||
jiebaModule = await import(jiebaPath);
|
||||
|
||||
if (jiebaModule.default) {
|
||||
await jiebaModule.default();
|
||||
}
|
||||
|
||||
jiebaReady = true;
|
||||
xbLog.info(MODULE_ID, 'jieba-wasm 加载成功');
|
||||
const keys = Object.getOwnPropertyNames(jiebaModule || {});
|
||||
const dkeys = Object.getOwnPropertyNames(jiebaModule?.default || {});
|
||||
xbLog.info(MODULE_ID, `jieba keys: ${keys.join(',')}`);
|
||||
xbLog.info(MODULE_ID, `jieba default keys: ${dkeys.join(',')}`);
|
||||
xbLog.info(MODULE_ID, `jieba.tag: ${typeof jiebaModule?.tag}`);
|
||||
return true;
|
||||
} catch (e) {
|
||||
xbLog.error(MODULE_ID, 'jieba-wasm 加载失败', e);
|
||||
jiebaLoading = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function fallbackTokenize(text) {
|
||||
const tokens = [];
|
||||
const lang = detectMainLanguage(text);
|
||||
|
||||
// 英文
|
||||
const enMatches = text.match(/[a-zA-Z]{2,20}/gi) || [];
|
||||
tokens.push(...enMatches.filter(w => !EN_STOP_WORDS.has(w.toLowerCase())));
|
||||
|
||||
// 日语假名
|
||||
if (lang === 'jp') {
|
||||
const kanaMatches = text.match(/[\u3040-\u309f\u30a0-\u30ff]{2,10}/g) || [];
|
||||
tokens.push(...kanaMatches);
|
||||
}
|
||||
|
||||
// 中文/日语汉字
|
||||
const zhMatches = text.match(/[\u4e00-\u9fff]{2,6}/g) || [];
|
||||
tokens.push(...zhMatches);
|
||||
|
||||
// 数字+汉字组合
|
||||
const numZhMatches = text.match(/\d+[\u4e00-\u9fff]{1,4}/g) || [];
|
||||
tokens.push(...numZhMatches);
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
export async function extractNouns(text, options = {}) {
|
||||
const { minLen = 2, maxCount = 0 } = options;
|
||||
if (!text?.trim()) return [];
|
||||
|
||||
// 中文为主 → 用 jieba
|
||||
if (shouldUseJieba(text)) {
|
||||
const hasJieba = await ensureJieba();
|
||||
|
||||
if (hasJieba && jiebaModule?.tag) {
|
||||
try {
|
||||
const tagged = jiebaModule.tag(text, true);
|
||||
|
||||
const result = [];
|
||||
const seen = new Set();
|
||||
|
||||
const list = Array.isArray(tagged) ? tagged : [];
|
||||
for (const item of list) {
|
||||
let word = '';
|
||||
let pos = '';
|
||||
if (Array.isArray(item)) {
|
||||
[word, pos] = item;
|
||||
} else if (item && typeof item === 'object') {
|
||||
word = item.word || item.w || item.text || item.term || '';
|
||||
pos = item.tag || item.pos || item.p || '';
|
||||
}
|
||||
if (!word || !pos) continue;
|
||||
if (word.length < minLen) continue;
|
||||
if (!shouldKeepByPos(pos)) continue;
|
||||
if (STOP_WORDS.has(word)) continue;
|
||||
if (seen.has(word)) continue;
|
||||
|
||||
seen.add(word);
|
||||
result.push(word);
|
||||
|
||||
if (maxCount > 0 && result.length >= maxCount) break;
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (e) {
|
||||
xbLog.warn(MODULE_ID, 'jieba tag 失败:' + (e && e.message ? e.message : String(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 非中文 / jieba 失败 → fallback
|
||||
const tokens = fallbackTokenize(text);
|
||||
|
||||
const result = [];
|
||||
const seen = new Set();
|
||||
|
||||
for (const t of tokens) {
|
||||
if (t.length < minLen) continue;
|
||||
if (STOP_WORDS.has(t)) continue;
|
||||
if (seen.has(t)) continue;
|
||||
|
||||
seen.add(t);
|
||||
result.push(t);
|
||||
|
||||
if (maxCount > 0 && result.length >= maxCount) break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export async function extractRareTerms(text, maxCount = 15) {
|
||||
if (!text?.trim()) return [];
|
||||
|
||||
// 中文为主 → 用 jieba
|
||||
if (shouldUseJieba(text)) {
|
||||
const hasJieba = await ensureJieba();
|
||||
|
||||
if (hasJieba && jiebaModule?.tag) {
|
||||
try {
|
||||
const tagged = jiebaModule.tag(text, true);
|
||||
|
||||
const candidates = [];
|
||||
const seen = new Set();
|
||||
|
||||
const list = Array.isArray(tagged) ? tagged : [];
|
||||
for (const item of list) {
|
||||
let word = '';
|
||||
let pos = '';
|
||||
if (Array.isArray(item)) {
|
||||
[word, pos] = item;
|
||||
} else if (item && typeof item === 'object') {
|
||||
word = item.word || item.w || item.text || item.term || '';
|
||||
pos = item.tag || item.pos || item.p || '';
|
||||
}
|
||||
if (!word || !pos) continue;
|
||||
if (word.length < 2) continue;
|
||||
if (!shouldKeepByPos(pos)) continue;
|
||||
if (STOP_WORDS.has(word)) continue;
|
||||
if (seen.has(word)) continue;
|
||||
|
||||
seen.add(word);
|
||||
|
||||
// 稀有度评分
|
||||
let score = 0;
|
||||
if (word.length >= 4) score += 3;
|
||||
else if (word.length >= 3) score += 1;
|
||||
if (/[a-zA-Z]/.test(word)) score += 2;
|
||||
if (/\d/.test(word)) score += 1;
|
||||
// 专名词性加分
|
||||
if (['nr', 'ns', 'nt', 'nz'].some(p => pos.startsWith(p))) score += 2;
|
||||
|
||||
candidates.push({ term: word, score });
|
||||
}
|
||||
|
||||
candidates.sort((a, b) => b.score - a.score);
|
||||
return candidates.slice(0, maxCount).map(x => x.term);
|
||||
} catch (e) {
|
||||
xbLog.warn(MODULE_ID, 'jieba tag 失败:' + (e && e.message ? e.message : String(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 非中文 / jieba 失败 → fallback
|
||||
const allNouns = await extractNouns(text, { minLen: 2, maxCount: 0 });
|
||||
|
||||
const scored = allNouns.map(t => {
|
||||
let score = 0;
|
||||
if (t.length >= 4) score += 3;
|
||||
else if (t.length >= 3) score += 1;
|
||||
if (/[a-zA-Z]/.test(t)) score += 2;
|
||||
if (/\d/.test(t)) score += 1;
|
||||
return { term: t, score };
|
||||
});
|
||||
|
||||
scored.sort((a, b) => b.score - a.score);
|
||||
return scored.slice(0, maxCount).map(x => x.term);
|
||||
}
|
||||
|
||||
export async function extractNounsFromFactsO(facts, relevantSubjects, maxCount = 5) {
|
||||
if (!facts?.length || !relevantSubjects?.size) return [];
|
||||
|
||||
const oTexts = [];
|
||||
|
||||
for (const f of facts) {
|
||||
if (f.retracted) continue;
|
||||
|
||||
// 只取相关主体的 facts
|
||||
const s = String(f.s || '').trim();
|
||||
if (!relevantSubjects.has(s)) continue;
|
||||
|
||||
const o = String(f.o || '').trim();
|
||||
if (!o) continue;
|
||||
|
||||
// 跳过太长的 O(可能是完整句子)
|
||||
if (o.length > 30) continue;
|
||||
|
||||
oTexts.push(o);
|
||||
}
|
||||
|
||||
if (!oTexts.length) return [];
|
||||
|
||||
const combined = oTexts.join(' ');
|
||||
return await extractNouns(combined, { minLen: 2, maxCount });
|
||||
}
|
||||
|
||||
export { ensureJieba };
|
||||
|
||||
Reference in New Issue
Block a user