feat(api): 新增图片问答、意图分类与任务管理功能

- 新增 SocialImageAgentService 支持朋友圈/聊天截图解析,提供图片线索提取与图谱问答建议
- 扩展 LLMService 支持多模型配置(意图分类、图片模型)与流式响应,增加思考模式控制
- 新增 /intent/classify 端点用于轻量意图分类(问答/导入/混合)以节省 token
- 新增 /tasks/{taskId} 与 /tasks/{taskId}/retry 端点用于流式任务状态查询与重试
- 前端 Dashboard 扩展人物详情显示(年龄标签、出生日期、别名、关系置信度等)
- 前端导入流程增加任务 ID 追踪与质量回放信息展示
This commit is contained in:
KOSHM-Pig
2026-03-25 00:28:22 +08:00
parent 1f087599c5
commit 447062446a
10 changed files with 3852 additions and 149 deletions

View File

@@ -26,5 +26,8 @@ export const env = {
LLM_BASE_URL: process.env.LLM_BASE_URL ?? "",
LLM_API_KEY: process.env.LLM_API_KEY ?? "",
LLM_MODEL_NAME: process.env.LLM_MODEL_NAME ?? "",
LLM_INTENT_MODEL_NAME: process.env.LLM_INTENT_MODEL_NAME ?? "",
LLM_IMAGE_MODEL_NAME: process.env.LLM_IMAGE_MODEL_NAME ?? "",
LLM_THINKING_MODE: process.env.LLM_THINKING_MODE ?? "auto",
ADMIN_PASSWORD: process.env.ADMIN_PASSWORD ?? "oncelove123"
};

View File

@@ -14,6 +14,187 @@ const sendServiceResult = async (reply, action) => {
}
};
// 内存任务存储:用于追踪流式任务状态、进度和重试信息
const TASK_STORE_MAX = 300;
const taskStore = new Map();
const nowIso = () => new Date().toISOString();
const trimTaskStore = () => {
if (taskStore.size <= TASK_STORE_MAX) return;
const entries = [...taskStore.entries()].sort(
(a, b) => new Date(a[1].updated_at).getTime() - new Date(b[1].updated_at).getTime()
);
const toDelete = entries.slice(0, Math.max(taskStore.size - TASK_STORE_MAX, 0));
toDelete.forEach(([taskId]) => taskStore.delete(taskId));
};
const createTask = (type, payload) => {
const taskId = `task_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
const record = {
task_id: taskId,
type,
status: "pending",
retry_count: 0,
payload: payload || {},
last_progress: null,
result_summary: null,
quality_replay: null,
error: null,
started_at: nowIso(),
updated_at: nowIso(),
finished_at: null
};
taskStore.set(taskId, record);
trimTaskStore();
return record;
};
const updateTask = (taskId, patch = {}) => {
const record = taskStore.get(taskId);
if (!record) return null;
const next = {
...record,
...patch,
updated_at: nowIso()
};
taskStore.set(taskId, next);
return next;
};
const buildTaskResultSummary = (type, result) => {
if (type === "graphrag_multi_stream") {
return {
rounds: Number(result?.meta?.rounds || result?.rounds?.length || 0),
retrieval_mode: result?.meta?.retrieval_mode_requested || null,
confidence: result?.final_review?.confidence ?? null,
answer_preview: String(result?.final_review?.answer || result?.answer || "").slice(0, 240)
};
}
if (type === "analyze_stream") {
return {
created: result?.stats?.created || {},
updated: result?.stats?.updated || {},
corrected_events: Number(result?.stats?.corrected_events || 0),
vector_indexed: Number(result?.vector_sync?.indexed || 0)
};
}
return { ok: Boolean(result?.ok) };
};
const buildTaskQualityReplay = (type, taskPayload, result, error = null) => {
const replay = {
input: {},
output: {},
error: error ? { message: error?.message || "internal error" } : null
};
if (type === "graphrag_multi_stream") {
replay.input = {
userId: taskPayload?.userId || "default",
query: String(taskPayload?.query_text || taskPayload?.query || "").slice(0, 300),
retrieval_mode: taskPayload?.retrieval_mode || "hybrid",
max_rounds: Number(taskPayload?.max_rounds || 3),
top_k: Number(taskPayload?.top_k || 8)
};
replay.output = result ? {
confidence: result?.final_review?.confidence ?? null,
relation_quality: result?.meta?.relation_quality || null,
retrieval_mode_adaptive: Boolean(result?.meta?.retrieval_mode_adaptive)
} : {};
return replay;
}
if (type === "analyze_stream") {
replay.input = {
userId: taskPayload?.userId || "default",
text_length: String(taskPayload?.text || "").length,
parallelism: Number(taskPayload?.parallelism || 0) || null
};
replay.output = result ? {
created: result?.stats?.created || {},
corrected_events: Number(result?.stats?.corrected_events || 0),
normalization: result?.normalization || null
} : {};
return replay;
}
replay.input = { type };
replay.output = result ? { ok: Boolean(result?.ok) } : {};
return replay;
};
const safeTaskView = (record) => {
if (!record) return null;
return {
task_id: record.task_id,
type: record.type,
status: record.status,
retry_count: record.retry_count,
last_progress: record.last_progress,
result_summary: record.result_summary,
quality_replay: record.quality_replay,
error: record.error,
started_at: record.started_at,
updated_at: record.updated_at,
finished_at: record.finished_at
};
};
const createStreamContext = (request, reply) => {
reply.hijack();
const raw = reply.raw;
const requestOrigin = request.headers.origin;
raw.setHeader('Access-Control-Allow-Origin', requestOrigin || '*');
raw.setHeader('Vary', 'Origin');
raw.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');
raw.setHeader('Access-Control-Allow-Methods', 'POST, OPTIONS');
raw.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
raw.setHeader('Cache-Control', 'no-cache, no-transform');
raw.setHeader('Connection', 'keep-alive');
raw.setHeader('X-Accel-Buffering', 'no');
raw.flushHeaders?.();
const runId = `run_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
const push = (event, data) => {
const payload = data && typeof data === 'object'
? { run_id: runId, ...data }
: { run_id: runId, value: data };
raw.write(`event: ${event}\n`);
raw.write(`data: ${JSON.stringify(payload)}\n\n`);
};
return { raw, runId, push };
};
const streamWithGuard = async (request, reply, action) => {
const { raw, runId, push } = createStreamContext(request, reply);
try {
push('meta', { run_id: runId });
const result = await action(push, runId);
push('done', result);
} catch (error) {
push('error', {
ok: false,
statusCode: Number(error?.statusCode) || 500,
message: error?.message || 'internal error'
});
} finally {
raw.end();
}
};
const executeTaskByType = async (service, taskRecord, onProgress = () => {}) => {
if (!taskRecord) throw new Error("任务不存在");
if (taskRecord.type === "graphrag_multi_stream") {
return service.queryGraphRagMultiRound(taskRecord.payload, { onProgress });
}
if (taskRecord.type === "analyze_stream") {
const payload = taskRecord.payload || {};
return service.incrementalUpdate(payload.text, payload.userId || "default", {
parallelism: payload.parallelism,
expertReview: payload.expertReview,
onProgress
});
}
throw new Error(`不支持的任务类型: ${taskRecord.type}`);
};
/**
* GraphRAG 控制器:负责请求转发与响应封装。
*/
@@ -30,37 +211,38 @@ export const createGraphRagController = (service, multiAgentService) => ({
sendServiceResult(reply, () => service.queryGraphRag(request.body)),
queryGraphRagMultiRound: async (request, reply) =>
sendServiceResult(reply, () => service.queryGraphRagMultiRound(request.body)),
classifyIntent: async (request, reply) =>
sendServiceResult(reply, () => service.classifyIntent(request.body)),
queryGraphRagMultiRoundStream: async (request, reply) => {
reply.hijack();
const raw = reply.raw;
const requestOrigin = request.headers.origin;
raw.setHeader('Access-Control-Allow-Origin', requestOrigin || '*');
raw.setHeader('Vary', 'Origin');
raw.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');
raw.setHeader('Access-Control-Allow-Methods', 'POST, OPTIONS');
raw.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
raw.setHeader('Cache-Control', 'no-cache, no-transform');
raw.setHeader('Connection', 'keep-alive');
raw.setHeader('X-Accel-Buffering', 'no');
raw.flushHeaders?.();
const push = (event, data) => {
raw.write(`event: ${event}\n`);
raw.write(`data: ${JSON.stringify(data)}\n\n`);
};
try {
const result = await service.queryGraphRagMultiRound(request.body, {
onProgress: (event) => push('progress', event)
});
push('done', result);
} catch (error) {
push('error', {
ok: false,
statusCode: Number(error?.statusCode) || 500,
message: error?.message || 'internal error'
});
} finally {
raw.end();
}
const task = createTask("graphrag_multi_stream", request.body || {});
return streamWithGuard(request, reply, async (push) => {
updateTask(task.task_id, { status: "running" });
push("meta", { task_id: task.task_id });
try {
const result = await executeTaskByType(service, task, (event) => {
updateTask(task.task_id, { last_progress: event || null });
push('progress', event);
});
updateTask(task.task_id, {
status: "done",
result_summary: buildTaskResultSummary(task.type, result),
quality_replay: buildTaskQualityReplay(task.type, task.payload, result),
finished_at: nowIso(),
error: null
});
return result;
} catch (error) {
updateTask(task.task_id, {
status: "error",
quality_replay: buildTaskQualityReplay(task.type, task.payload, null, error),
finished_at: nowIso(),
error: {
message: error?.message || "internal error"
}
});
throw error;
}
});
},
analyzeAndIngest: async (request, reply) =>
sendServiceResult(reply, () => service.incrementalUpdate(request.body.text, request.body.userId || 'default')),
@@ -70,44 +252,104 @@ export const createGraphRagController = (service, multiAgentService) => ({
limit: request.body.limit
})),
analyzeAndIngestStream: async (request, reply) => {
reply.hijack();
const raw = reply.raw;
const requestOrigin = request.headers.origin;
raw.setHeader('Access-Control-Allow-Origin', requestOrigin || '*');
raw.setHeader('Vary', 'Origin');
raw.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');
raw.setHeader('Access-Control-Allow-Methods', 'POST, OPTIONS');
raw.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
raw.setHeader('Cache-Control', 'no-cache, no-transform');
raw.setHeader('Connection', 'keep-alive');
raw.setHeader('X-Accel-Buffering', 'no');
raw.flushHeaders?.();
const push = (event, data) => {
raw.write(`event: ${event}\n`);
raw.write(`data: ${JSON.stringify(data)}\n\n`);
};
try {
const task = createTask("analyze_stream", request.body || {});
return streamWithGuard(request, reply, async (push) => {
updateTask(task.task_id, { status: "running" });
push("meta", { task_id: task.task_id });
push('progress', { stage: 'start' });
const result = await service.incrementalUpdate(
request.body.text,
request.body.userId || 'default',
{
parallelism: request.body.parallelism,
expertReview: request.body.expertReview,
onProgress: (event) => push('progress', event)
}
);
push('done', result);
} catch (error) {
push('error', {
ok: false,
statusCode: Number(error?.statusCode) || 500,
message: error?.message || 'internal error'
});
} finally {
raw.end();
}
try {
const result = await executeTaskByType(service, task, (event) => {
updateTask(task.task_id, { last_progress: event || null });
push('progress', event);
});
updateTask(task.task_id, {
status: "done",
result_summary: buildTaskResultSummary(task.type, result),
quality_replay: buildTaskQualityReplay(task.type, task.payload, result),
finished_at: nowIso(),
error: null
});
return result;
} catch (error) {
updateTask(task.task_id, {
status: "error",
quality_replay: buildTaskQualityReplay(task.type, task.payload, null, error),
finished_at: nowIso(),
error: {
message: error?.message || "internal error"
}
});
throw error;
}
});
},
getTaskStatus: async (request, reply) =>
sendServiceResult(reply, async () => {
const taskId = String(request.params.taskId || "").trim();
if (!taskId) {
const error = new Error("taskId 不能为空");
error.statusCode = 400;
throw error;
}
const task = taskStore.get(taskId);
if (!task) {
const error = new Error(`任务不存在: ${taskId}`);
error.statusCode = 404;
throw error;
}
return { ok: true, task: safeTaskView(task) };
}),
retryTask: async (request, reply) =>
sendServiceResult(reply, async () => {
const sourceTaskId = String(request.params.taskId || "").trim();
if (!sourceTaskId) {
const error = new Error("taskId 不能为空");
error.statusCode = 400;
throw error;
}
const sourceTask = taskStore.get(sourceTaskId);
if (!sourceTask) {
const error = new Error(`任务不存在: ${sourceTaskId}`);
error.statusCode = 404;
throw error;
}
const overridePayload = request.body && typeof request.body === "object" ? request.body : {};
const nextTask = createTask(sourceTask.type, {
...(sourceTask.payload || {}),
...overridePayload
});
updateTask(nextTask.task_id, {
status: "running",
retry_count: Number(sourceTask.retry_count || 0) + 1,
quality_replay: sourceTask.quality_replay || null
});
executeTaskByType(service, nextTask, (event) => {
updateTask(nextTask.task_id, { last_progress: event || null });
}).then((result) => {
updateTask(nextTask.task_id, {
status: "done",
result_summary: buildTaskResultSummary(nextTask.type, result),
quality_replay: buildTaskQualityReplay(nextTask.type, nextTask.payload, result),
finished_at: nowIso(),
error: null
});
}).catch((err) => {
updateTask(nextTask.task_id, {
status: "error",
quality_replay: buildTaskQualityReplay(nextTask.type, nextTask.payload, null, err),
finished_at: nowIso(),
error: {
message: err?.message || "internal error"
}
});
});
return {
ok: true,
source_task_id: sourceTaskId,
task_id: nextTask.task_id,
status: "running"
};
}),
queryHistory: async (request, reply) =>
sendServiceResult(reply, () => service.queryRelationshipHistory(
request.body.userId || 'default',

View File

@@ -262,6 +262,29 @@ export const registerGraphRagRoutes = async (app, controller) => {
* description: 参数错误
*/
app.post("/query/graphrag", controller.queryGraphRag);
/**
* @openapi
* /intent/classify:
* post:
* tags:
* - GraphRAG
* summary: 轻量意图分类(问答/导入/混合)用于节省 token
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* text:
* type: string
* responses:
* 200:
* description: 分类成功
* 400:
* description: 参数错误
*/
app.post("/intent/classify", controller.classifyIntent);
/**
* @openapi
* /query/graphrag/multi:
@@ -353,6 +376,53 @@ export const registerGraphRagRoutes = async (app, controller) => {
* description: 参数错误
*/
app.post("/analyze/stream", controller.analyzeAndIngestStream);
/**
* @openapi
* /tasks/{taskId}:
* get:
* tags:
* - GraphRAG
* summary: 查询任务状态(适用于流式问答/流式分析任务)
* parameters:
* - in: path
* name: taskId
* required: true
* schema:
* type: string
* responses:
* 200:
* description: 查询成功
* 404:
* description: 任务不存在
*/
app.get("/tasks/:taskId", controller.getTaskStatus);
/**
* @openapi
* /tasks/{taskId}/retry:
* post:
* tags:
* - GraphRAG
* summary: 重试指定任务异步触发立即返回新任务ID
* parameters:
* - in: path
* name: taskId
* required: true
* schema:
* type: string
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* description: 可选覆盖原任务参数
* responses:
* 200:
* description: 重试任务已启动
* 404:
* description: 原任务不存在
*/
app.post("/tasks/:taskId/retry", controller.retryTask);
/**
* @openapi

View File

@@ -5,7 +5,7 @@ import swaggerUi from "@fastify/swagger-ui";
import { env } from "./config/env.js";
import { createClients } from "./config/clients.js";
import { createSwaggerSpec } from "./config/swagger.js";
import { EmbeddingService, RerankService, GraphRagService, LLMService, MultiAgentService } from "./services/index.js";
import { EmbeddingService, RerankService, GraphRagService, LLMService, MultiAgentService, SocialImageAgentService } from "./services/index.js";
import { createGraphRagController } from "./controllers/index.js";
import { registerRoutes } from "./routes/index.js";
@@ -34,12 +34,14 @@ export const createServer = async () => {
const embeddingService = new EmbeddingService(env);
const rerankService = new RerankService(env);
const llmService = new LLMService(env);
const socialImageAgentService = new SocialImageAgentService({ llmService });
const service = new GraphRagService({
driver: neo4jDriver,
qdrantClient,
embeddingService,
rerankService,
llmService,
socialImageAgentService,
env
});
const multiAgentService = new MultiAgentService({

File diff suppressed because it is too large Load Diff

View File

@@ -3,3 +3,4 @@ export { RerankService } from "./rerank.service.js";
export { GraphRagService } from "./graphrag.service.js";
export { LLMService } from "./llm.service.js";
export { MultiAgentService } from "./multiagent.service.js";
export { SocialImageAgentService } from "./social-image-agent.service.js";

View File

@@ -12,10 +12,76 @@ const estimateTokens = (text) => {
const estimateMessageTokens = (messages = []) => {
return messages.reduce((sum, message) => {
return sum + estimateTokens(message?.content || "") + 6;
const content = message?.content;
if (Array.isArray(content)) {
const merged = content.map((part) => {
if (!part || typeof part !== "object") return "";
if (typeof part.text === "string") return part.text;
if (typeof part.image_url === "string") return part.image_url;
if (part.image_url && typeof part.image_url === "object" && typeof part.image_url.url === "string") return part.image_url.url;
if (part.source && typeof part.source === "object" && typeof part.source.data === "string") return part.source.data.slice(0, 64);
return "";
}).join(" ");
return sum + estimateTokens(merged) + 6;
}
return sum + estimateTokens(content || "") + 6;
}, 0);
};
const normalizeContentPart = (part) => {
if (typeof part === "string") {
return { type: "text", text: part };
}
if (!part || typeof part !== "object") {
return null;
}
if (typeof part.type !== "string" || !part.type.trim()) {
if (typeof part.text === "string") {
return { ...part, type: "text" };
}
return null;
}
if (part.type === "image_url" && typeof part.image_url === "string") {
return { ...part, image_url: { url: part.image_url } };
}
return part;
};
const normalizeMessagesForChat = (messages = []) => {
const hasArrayContent = messages.some((message) => Array.isArray(message?.content));
if (!hasArrayContent) return messages;
return messages.map((message) => {
const content = message?.content;
if (!Array.isArray(content)) {
return message;
}
const normalizedContent = content.map(normalizeContentPart).filter(Boolean);
return {
...message,
content: normalizedContent.length ? normalizedContent : [{ type: "text", text: "" }]
};
});
};
const extractDeltaText = (chunk = {}) => {
const choices = Array.isArray(chunk?.choices) ? chunk.choices : [];
if (!choices.length) return "";
const delta = choices[0]?.delta;
if (!delta || typeof delta !== "object") return "";
if (typeof delta.content === "string") return delta.content;
if (Array.isArray(delta.content)) {
return delta.content
.map((item) => {
if (typeof item === "string") return item;
if (item && typeof item === "object" && typeof item.text === "string") return item.text;
return "";
})
.join("");
}
if (typeof delta.reasoning_content === "string") return delta.reasoning_content;
return "";
};
const parseJsonObject = (content) => {
if (!content || typeof content !== "string") {
throw new Error("empty content");
@@ -257,19 +323,65 @@ export class LLMService {
this.baseUrl = (env.LLM_BASE_URL ?? "").replace(/\/+$/, "");
this.apiKey = env.LLM_API_KEY ?? "";
this.model = env.LLM_MODEL_NAME ?? "";
this.intentModel = env.LLM_INTENT_MODEL_NAME ?? "";
this.imageModel = env.LLM_IMAGE_MODEL_NAME ?? "";
this.thinkingMode = String(env.LLM_THINKING_MODE ?? "auto").trim().toLowerCase();
}
isEnabled() {
return Boolean(this.baseUrl && this.apiKey && this.model);
}
async chat(messages, temperature = 0.7) {
if (!this.isEnabled()) {
throw createHttpError(400, "LLM 服务未配置,请提供 LLM_BASE_URL/LLM_API_KEY/LLM_MODEL_NAME");
isModelEnabled(modelName = "") {
return Boolean(this.baseUrl && this.apiKey && String(modelName || "").trim());
}
isIntentEnabled() {
return this.isModelEnabled(this.intentModel || this.model);
}
isImageEnabled() {
return this.isModelEnabled(this.imageModel || this.model);
}
async chat(messages, temperature = 0.7, options = {}) {
return this.chatWithModel(messages, temperature, this.model, 4096, options);
}
async chatImage(messages, temperature = 0.7, options = {}) {
return this.chatWithModel(messages, temperature, this.imageModel || this.model, 4096, options);
}
async chatStream(messages, temperature = 0.7, options = {}, onToken = () => {}) {
return this.chatWithModelStream(messages, temperature, this.model, 4096, options, onToken);
}
async chatWithModel(messages, temperature = 0.7, model = this.model, maxTokens = 4096, options = {}) {
if (!this.baseUrl || !this.apiKey) {
throw createHttpError(400, "LLM 服务未配置,请提供 LLM_BASE_URL/LLM_API_KEY");
}
if (!String(model || "").trim()) {
throw createHttpError(400, "LLM 模型未配置,请提供 LLM_MODEL_NAME 或 LLM_INTENT_MODEL_NAME");
}
const promptTokensEstimated = estimateMessageTokens(messages);
console.log(`[TOKEN] model=${this.model} prompt_tokens_estimated=${promptTokensEstimated} max_tokens=4096`);
const normalizedMessages = normalizeMessagesForChat(messages);
const promptTokensEstimated = estimateMessageTokens(normalizedMessages);
const normalizedModeRaw = String(options?.thinking || this.thinkingMode || "auto").trim().toLowerCase();
const thinkingMode = ["on", "off", "auto"].includes(normalizedModeRaw) ? normalizedModeRaw : "auto";
const isQwen3Family = /qwen\s*3|qwen3/i.test(String(model || ""));
const autoEnableThinking = isQwen3Family && promptTokensEstimated >= 1600 && Number(maxTokens || 0) >= 900;
const enableThinking = thinkingMode === "on" ? true : thinkingMode === "off" ? false : autoEnableThinking;
console.log(`[TOKEN] model=${model} prompt_tokens_estimated=${promptTokensEstimated} max_tokens=${maxTokens} thinking_mode=${thinkingMode} thinking_enabled=${enableThinking}`);
const payload = {
model,
messages: normalizedMessages,
temperature: temperature,
max_tokens: maxTokens
};
if (isQwen3Family) {
payload.extra_body = { enable_thinking: enableThinking };
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: "POST",
@@ -277,12 +389,7 @@ export class LLMService {
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`
},
body: JSON.stringify({
model: this.model,
messages: messages,
temperature: temperature,
max_tokens: 4096
})
body: JSON.stringify(payload)
});
if (!response.ok) {
@@ -296,11 +403,102 @@ export class LLMService {
const completionTokens = usage.completion_tokens;
const totalTokens = usage.total_tokens;
console.log(
`[TOKEN] model=${this.model} prompt_tokens_actual=${promptTokens ?? "n/a"} completion_tokens=${completionTokens ?? "n/a"} total_tokens=${totalTokens ?? "n/a"}`
`[TOKEN] model=${model} prompt_tokens_actual=${promptTokens ?? "n/a"} completion_tokens=${completionTokens ?? "n/a"} total_tokens=${totalTokens ?? "n/a"}`
);
return data;
}
async chatWithModelStream(messages, temperature = 0.7, model = this.model, maxTokens = 4096, options = {}, onToken = () => {}) {
if (!this.baseUrl || !this.apiKey) {
throw createHttpError(400, "LLM 服务未配置,请提供 LLM_BASE_URL/LLM_API_KEY");
}
if (!String(model || "").trim()) {
throw createHttpError(400, "LLM 模型未配置,请提供 LLM_MODEL_NAME 或 LLM_INTENT_MODEL_NAME");
}
const normalizedMessages = normalizeMessagesForChat(messages);
const promptTokensEstimated = estimateMessageTokens(normalizedMessages);
const normalizedModeRaw = String(options?.thinking || this.thinkingMode || "auto").trim().toLowerCase();
const thinkingMode = ["on", "off", "auto"].includes(normalizedModeRaw) ? normalizedModeRaw : "auto";
const isQwen3Family = /qwen\s*3|qwen3/i.test(String(model || ""));
const autoEnableThinking = isQwen3Family && promptTokensEstimated >= 1600 && Number(maxTokens || 0) >= 900;
const enableThinking = thinkingMode === "on" ? true : thinkingMode === "off" ? false : autoEnableThinking;
const payload = {
model,
messages: normalizedMessages,
temperature: temperature,
max_tokens: maxTokens,
stream: true
};
if (isQwen3Family) {
payload.extra_body = { enable_thinking: enableThinking };
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${this.apiKey}`
},
body: JSON.stringify(payload)
});
if (!response.ok) {
const errorText = await response.text();
throw createHttpError(response.status, `LLM 请求失败:${errorText}`);
}
const reader = response.body?.getReader?.();
if (!reader) {
const fallback = await response.json().catch(() => null);
const text = String(fallback?.choices?.[0]?.message?.content || "");
if (text) onToken(text, text, { done: true });
return { text, done: true };
}
const decoder = new TextDecoder("utf-8");
let buffer = "";
let fullText = "";
let done = false;
while (!done) {
const readResult = await reader.read();
done = Boolean(readResult?.done);
if (!readResult?.value) continue;
buffer += decoder.decode(readResult.value, { stream: true });
const lines = buffer.split(/\r?\n/);
buffer = lines.pop() || "";
for (const line of lines) {
const trimmed = String(line || "").trim();
if (!trimmed || !trimmed.startsWith("data:")) continue;
const dataPayload = trimmed.slice(5).trim();
if (!dataPayload || dataPayload === "[DONE]") continue;
try {
const chunk = JSON.parse(dataPayload);
const deltaText = extractDeltaText(chunk);
if (deltaText) {
fullText += deltaText;
onToken(deltaText, fullText, chunk);
}
} catch {}
}
}
if (buffer.trim().startsWith("data:")) {
const dataPayload = buffer.trim().slice(5).trim();
if (dataPayload && dataPayload !== "[DONE]") {
try {
const chunk = JSON.parse(dataPayload);
const deltaText = extractDeltaText(chunk);
if (deltaText) {
fullText += deltaText;
onToken(deltaText, fullText, chunk);
}
} catch {}
}
}
return { text: fullText, done: true };
}
async _analyzeTextSinglePass(text, existingContext) {
const systemPrompt = `你是一个恋爱关系知识图谱构建专家。从用户输入的文本中提取实体和关系,用于后续的恋爱决策建议。
@@ -319,7 +517,13 @@ export class LLMService {
"name": "人物名称",
"summary": "人物描述",
"role": "用户 | 恋爱对象 | 朋友 | 家人 | 同事 | 其他",
"gender": "male | female | unknown"
"gender": "male | female | unknown",
"birth_date_utc": "YYYY-MM-DDT00:00:00.000Z 或 null",
"occupation": "职业(可选)",
"education_background": "教育背景(可选)",
"residential_status": "常住状态/所在城市(可选)",
"consent_status": "granted | denied | revoked | unknown",
"data_source": "ingest | user_input | manual | import | llm_extract | system"
}
],
"organizations": [
@@ -372,6 +576,8 @@ export class LLMService {
8. 长度控制persons.summary 不超过 100 字events.summary 不超过 120 字relations.summary 不超过 80 字
9. 人物性别person.gender 只允许 male/female/unknown无法确认时填 unknown
10. 若文本出现“某人毕业于/就读于/来自某大学”,需创建该大学 organization并补充关系如 STUDIED_AT/ALUMNUS_OF
11. 出现生日/出生年份时person.birth_date_utc 必须写成 UTC ISO如 2001-05-20T00:00:00.000Z);只有年份时用该年 01-01
12. person.consent_status 未明确时填 unknownperson.data_source 默认填 llm_extract
只返回 JSON不要有其他文字。`;
@@ -535,6 +741,60 @@ export class LLMService {
}
}
async classifyIntent(text, options = {}) {
const rawText = typeof text === "string" ? text.trim() : "";
if (!rawText) {
return {
intent: "qa",
should_ingest: false,
should_query: true,
confidence: 0.52,
reason: "empty_text",
model: this.intentModel || this.model
};
}
const model = this.intentModel || this.model;
const maxTokens = Math.min(Math.max(Number(options?.maxTokens || 220), 80), 400);
const result = await this.chatWithModel([
{
role: "system",
content: `你是“意图分类器”,只输出 JSON
{"intent":"qa|ingest|mixed|unknown","confidence":0.0,"reason":"一句话","should_ingest":true|false,"should_query":true|false}
规则:
1) qa主要是提问/求分析
2) ingest主要是陈述新增事实意图写入图谱
3) mixed既有新增事实又要求回答
4) unknown无法判定时保守返回
5) 结果要保守,不要过度推断`
},
{
role: "user",
content: rawText
}
], 0, model, maxTokens, { thinking: "off" });
const content = result?.choices?.[0]?.message?.content || "";
let parsed;
try {
parsed = parseJsonObject(content);
} catch {
parsed = {};
}
const intentRaw = String(parsed?.intent || "").trim().toLowerCase();
const allowed = new Set(["qa", "ingest", "mixed", "unknown"]);
const intent = allowed.has(intentRaw) ? intentRaw : "unknown";
const confidence = Math.min(Math.max(Number(parsed?.confidence ?? 0), 0), 1);
const shouldIngest = parsed?.should_ingest === true || intent === "ingest" || intent === "mixed";
const shouldQuery = parsed?.should_query === false ? false : intent !== "ingest";
return {
intent,
should_ingest: shouldIngest,
should_query: shouldQuery,
confidence,
reason: String(parsed?.reason || "").slice(0, 120),
model
};
}
async adjudicateEventCorrections(text, recentEvents = [], options = {}) {
const rawText = typeof text === "string" ? text.trim() : "";
if (!rawText) return { decisions: [] };

View File

@@ -0,0 +1,272 @@
const createHttpError = (statusCode, message) => {
const error = new Error(message);
error.statusCode = statusCode;
return error;
};
const tryParseJson = (rawText) => {
const text = String(rawText || "").trim();
if (!text) return null;
const candidates = [text];
const fenced = text.match(/```(?:json)?\s*([\s\S]*?)```/i);
if (fenced?.[1]) candidates.push(fenced[1].trim());
const startIdx = text.indexOf("{");
const endIdx = text.lastIndexOf("}");
if (startIdx >= 0 && endIdx > startIdx) candidates.push(text.slice(startIdx, endIdx + 1));
for (const candidate of candidates) {
try {
return JSON.parse(candidate);
} catch {}
}
return null;
};
const normalizeScene = (value) => {
const raw = String(value || "").trim().toLowerCase();
if (["moments", "wechat_moments", "朋友圈"].includes(raw)) return "moments";
if (["wechat_chat", "chat", "聊天", "微信聊天记录"].includes(raw)) return "wechat_chat";
return "auto";
};
const normalizeDataUrl = (value) => {
const text = String(value || "").trim();
if (!text || !text.startsWith("data:image/")) return "";
if (!text.includes(";base64,")) return "";
return text;
};
const normalizeImageUrl = (value) => {
const text = String(value || "").trim();
if (!text) return "";
if (/^https?:\/\//i.test(text)) return text;
return "";
};
const compactText = (value, maxLen = 600) => {
const text = String(value || "").replace(/\s+/g, " ").trim();
if (!text) return "";
return text.length > maxLen ? `${text.slice(0, maxLen - 1)}` : text;
};
const parseDataUrl = (dataUrl = "") => {
const raw = String(dataUrl || "");
const match = raw.match(/^data:(image\/[a-zA-Z0-9.+-]+);base64,(.+)$/);
if (!match) return { mediaType: "", base64: "" };
return {
mediaType: match[1] || "",
base64: match[2] || ""
};
};
export class SocialImageAgentService {
constructor({ llmService }) {
this.llmService = llmService;
this.maxImageChars = 2_600_000;
}
isEnabled() {
if (typeof this.llmService?.isImageEnabled === "function") {
return Boolean(this.llmService.isImageEnabled());
}
return Boolean(this.llmService?.isEnabled?.());
}
_readImageInput(payload = {}) {
const image = payload.image_payload && typeof payload.image_payload === "object"
? payload.image_payload
: payload.image && typeof payload.image === "object"
? payload.image
: {};
const dataUrl = normalizeDataUrl(
image.data_url
|| image.dataUrl
|| payload.image_data_url
|| payload.imageDataUrl
);
const imageUrl = normalizeImageUrl(
image.url
|| image.image_url
|| payload.image_url
);
const fileName = String(image.file_name || image.fileName || payload.image_file_name || "").trim();
const scene = normalizeScene(payload.image_scene || image.scene || payload.scene);
return { dataUrl, imageUrl, fileName, scene };
}
_buildSystemPrompt(scene = "auto") {
const sceneHint = scene === "moments"
? "当前图片偏向朋友圈动态,请重点提取发帖人、互动对象、时间线线索、情绪倾向、评论互动与配图语义。"
: scene === "wechat_chat"
? "当前图片偏向微信聊天记录,请重点提取对话双方、关键句、冲突/和好信号、时间点、主动方与边界表达。"
: "请先识别是朋友圈还是微信聊天记录,再按对应维度提取。";
return `你是图谱问答的图片理解Agent擅长解析朋友圈截图/微信聊天截图并转成可检索线索。
${sceneHint}
朋友圈模式下,请优先提取:发帖人身份、被@对象、评论关系、情绪强度、关系状态变化。
微信聊天模式下,请优先提取:说话人轮次、关键原句、冲突信号、修复信号、时间顺序与关系边界。
微信聊天角色识别规则:
1) 绿色聊天气泡默认判定为 self我方
2) 白色聊天气泡默认判定为 other对方
3) 若颜色不清晰,再结合左右位置、头像位置、昵称与时间分布判断;
4) 无法确认时标记 unknown禁止强行猜测。
只返回 JSON不要输出任何额外文字。格式
{
"scene":"moments|wechat_chat|unknown",
"confidence":0-1,
"scene_focus":["本图最关键的3-5个观察点"],
"summary":"图片核心摘要120字内",
"extracted_text":"OCR关键信息可为空",
"chat_turns":[{"speaker":"self|other|unknown","text":"一句话","cue":"green|white|left|right|avatar"}],
"key_entities":["人物/群聊/地点"],
"key_events":["关键事件线索"],
"risk_signals":["冲突/误解/边界风险信号"],
"relation_hints":["关系线索"],
"suggested_question":"可用于图谱问答的建议问题(尽量具体)"
}`;
}
_isMissingImageSignal(raw = "", parsed = {}) {
const text = `${String(raw || "")}\n${String(parsed?.summary || "")}`.toLowerCase();
return /未提供(截图|图片)|无法进行视觉解析|无可见图像|看不到图片|cannot\s+see\s+the\s+image|no\s+image\s+provided|image\s+not\s+provided/.test(text);
}
_buildUserContextParts(scene = "auto", queryText = "") {
return [
{ type: "text", text: `场景偏好: ${scene}` },
{ type: "text", text: `用户补充问题: ${queryText || "未提供文本问题,请基于图片生成可检索问题"}` },
{ type: "text", text: "请输出标准JSON。" }
];
}
_buildImageContentVariants({ dataUrl = "", imageUrl = "", contextParts = [] } = {}) {
if (imageUrl) {
return [
[...contextParts, { type: "image_url", image_url: { url: imageUrl } }],
[...contextParts, { type: "image_url", image_url: imageUrl }]
];
}
if (!dataUrl) return [contextParts];
return [
[...contextParts, { type: "image_url", image_url: { url: dataUrl } }],
[...contextParts, { type: "image_url", image_url: dataUrl }]
];
}
async analyzeForGraphQa(payload = {}) {
if (!this.isEnabled()) {
throw createHttpError(400, "LLM 服务未配置,无法启用图片问答");
}
const { dataUrl, imageUrl, fileName, scene } = this._readImageInput(payload);
if (!dataUrl && !imageUrl) {
throw createHttpError(400, "缺少图片输入,请提供 image_payload.data_url 或 image_url");
}
if (dataUrl && dataUrl.length > this.maxImageChars) {
throw createHttpError(413, "图片体积过大,请压缩后重试");
}
const queryText = String(payload.query_text || payload.query || "").trim();
const contextParts = this._buildUserContextParts(scene, queryText);
const variants = this._buildImageContentVariants({ dataUrl, imageUrl, contextParts });
let raw = "";
let parsed = {};
let lastError = null;
for (let i = 0; i < variants.length; i += 1) {
try {
const response = await this.llmService.chatImage([
{ role: "system", content: this._buildSystemPrompt(scene) },
{ role: "user", content: variants[i] }
], 0.2, { thinking: "off" });
raw = response?.choices?.[0]?.message?.content || "";
parsed = tryParseJson(raw) || {};
const missingSignal = this._isMissingImageSignal(raw, parsed);
if (missingSignal && i < variants.length - 1) {
continue;
}
break;
} catch (error) {
lastError = error;
if (i >= variants.length - 1) {
throw error;
}
}
}
if (!raw && lastError) {
throw lastError;
}
if (this._isMissingImageSignal(raw, parsed)) {
throw createHttpError(422, "图片未被模型正确识别,请尝试 Ctrl+V 粘贴截图或更换支持视觉的模型");
}
const finalScene = ["moments", "wechat_chat", "unknown"].includes(String(parsed.scene || "").trim().toLowerCase())
? String(parsed.scene || "").trim().toLowerCase()
: "unknown";
const confidenceRaw = Number(parsed.confidence);
const confidence = Number.isFinite(confidenceRaw) ? Math.max(0, Math.min(1, confidenceRaw)) : 0.6;
const summary = compactText(parsed.summary, 180);
const extractedText = compactText(parsed.extracted_text, 1200);
const keyEntities = (Array.isArray(parsed.key_entities) ? parsed.key_entities : [])
.map((item) => compactText(item, 40))
.filter(Boolean)
.slice(0, 10);
const chatTurns = (Array.isArray(parsed.chat_turns) ? parsed.chat_turns : [])
.map((item) => {
const speakerRaw = String(item?.speaker || "").trim().toLowerCase();
const speaker = ["self", "other", "unknown"].includes(speakerRaw) ? speakerRaw : "unknown";
return {
speaker,
text: compactText(item?.text, 120),
cue: compactText(item?.cue, 24)
};
})
.filter((item) => item.text)
.slice(0, 20);
const keyEvents = (Array.isArray(parsed.key_events) ? parsed.key_events : [])
.map((item) => compactText(item, 80))
.filter(Boolean)
.slice(0, 8);
const sceneFocus = (Array.isArray(parsed.scene_focus) ? parsed.scene_focus : [])
.map((item) => compactText(item, 80))
.filter(Boolean)
.slice(0, 6);
const riskSignals = (Array.isArray(parsed.risk_signals) ? parsed.risk_signals : [])
.map((item) => compactText(item, 80))
.filter(Boolean)
.slice(0, 6);
const relationHints = (Array.isArray(parsed.relation_hints) ? parsed.relation_hints : [])
.map((item) => compactText(item, 80))
.filter(Boolean)
.slice(0, 8);
const suggestedQuestion = compactText(parsed.suggested_question, 200);
const imageDigest = [
summary ? `图片摘要:${summary}` : "",
chatTurns.length
? `聊天分轨:${chatTurns.slice(0, 8).map((item) => `${item.speaker === "self" ? "我" : item.speaker === "other" ? "对方" : "未知"}:${item.text}`).join(" | ")}`
: "",
sceneFocus.length ? `场景观察:${sceneFocus.join("")}` : "",
keyEntities.length ? `关键实体:${keyEntities.join("、")}` : "",
keyEvents.length ? `事件线索:${keyEvents.join("")}` : "",
riskSignals.length ? `风险信号:${riskSignals.join("")}` : "",
relationHints.length ? `关系线索:${relationHints.join("")}` : "",
extractedText ? `OCR摘录${extractedText.slice(0, 260)}` : ""
].filter(Boolean).join("\n");
const queryTextForQa = [
queryText,
suggestedQuestion,
imageDigest ? `以下是图片提取线索,请结合图谱回答:\n${imageDigest}` : ""
].filter(Boolean).join("\n\n");
return {
ok: true,
scene: finalScene,
confidence,
file_name: fileName || null,
summary,
extracted_text: extractedText,
chat_turns: chatTurns,
scene_focus: sceneFocus,
key_entities: keyEntities,
key_events: keyEvents,
risk_signals: riskSignals,
relation_hints: relationHints,
suggested_question: suggestedQuestion,
image_digest: imageDigest,
query_text_for_qa: queryTextForQa || queryText || suggestedQuestion || "请基于图片线索给出关系判断"
};
}
}

View File

@@ -122,6 +122,64 @@
<span class="detail-label">性别</span>
<span class="detail-value">{{ selectedDetail.data.gender || 'unknown' }}</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.age_label">
<span class="detail-label">年龄标签</span>
<span class="detail-value">{{ selectedDetail.data.age_label }}</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.birth_date_utc">
<span class="detail-label">出生日期(UTC)</span>
<span class="detail-value">{{ selectedDetail.data.birth_date_utc }}</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.aliases?.length">
<span class="detail-label">别名</span>
<span class="detail-value detail-tags">
<span class="detail-tag" v-for="(alias, idx) in selectedDetail.data.aliases.slice(0, 8)" :key="`${selectedDetail.data.id}_alias_${idx}`">
{{ alias }}
</span>
</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.person_role">
<span class="detail-label">人物角色</span>
<span class="detail-value">{{ formatPersonRole(selectedDetail.data.person_role) }}</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.relation_to_user">
<span class="detail-label">与用户关系</span>
<span class="detail-value">
{{ selectedDetail.data.relation_to_user }}
<template v-if="Number.isFinite(Number(selectedDetail.data.relation_confidence))">
· {{ formatPercent(selectedDetail.data.relation_confidence) }}
</template>
</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.occupation">
<span class="detail-label">职业</span>
<span class="detail-value">{{ selectedDetail.data.occupation }}</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.education_background">
<span class="detail-label">教育背景</span>
<span class="detail-value">{{ selectedDetail.data.education_background }}</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person' && selectedDetail.data.residential_status">
<span class="detail-label">常住状态</span>
<span class="detail-value">{{ selectedDetail.data.residential_status }}</span>
</div>
<div class="detail-row" v-if="selectedDetail.data.type === 'person'">
<span class="detail-label">属性标签</span>
<span class="detail-value detail-tags">
<span class="detail-tag" v-if="selectedDetail.data.data_source">
来源: {{ formatDataSource(selectedDetail.data.data_source) }}
</span>
<span class="detail-tag" v-if="selectedDetail.data.consent_status">
同意状态: {{ formatConsentStatus(selectedDetail.data.consent_status) }}
</span>
<span class="detail-tag" v-if="selectedDetail.data.privacy_level">
隐私级别: {{ selectedDetail.data.privacy_level }}
</span>
<span class="detail-tag" v-if="Number.isFinite(Number(selectedDetail.data.data_quality))">
数据质量: {{ formatPercent(selectedDetail.data.data_quality) }}
</span>
</span>
</div>
<div class="detail-row">
<span class="detail-label">ID</span>
<span class="detail-value">{{ selectedDetail.data.id || '-' }}</span>
@@ -1052,6 +1110,51 @@ const formatDateTime = (value) => {
return String(value)
}
const formatPercent = (value) => {
const num = Number(value)
if (!Number.isFinite(num)) return '-'
const ratio = num <= 1 ? num * 100 : num
return `${Math.max(0, Math.min(100, ratio)).toFixed(1)}%`
}
const formatPersonRole = (value) => {
const raw = String(value || '').trim().toLowerCase()
const map = {
self: '本人',
partner: '伴侣',
ex: '前任',
friend: '朋友',
colleague: '同事',
other: '其他',
unknown: '未知'
}
return map[raw] || value || '未知'
}
const formatDataSource = (value) => {
const raw = String(value || '').trim().toLowerCase()
const map = {
ingest: '导入',
user_input: '用户输入',
manual: '手动',
import: '导入',
llm_extract: '模型抽取',
system: '系统'
}
return map[raw] || (value || '未知')
}
const formatConsentStatus = (value) => {
const raw = String(value || '').trim().toLowerCase()
const map = {
granted: '已同意',
denied: '拒绝',
revoked: '已撤回',
unknown: '未知'
}
return map[raw] || (value || '未知')
}
const clearSelection = () => {
selectedDetail.value = null
resetSelectionStyles?.()
@@ -1071,6 +1174,26 @@ const formatIngestProgress = (event) => {
return `阶段: ${stage}`
}
const formatQualityReplayLine = (qualityReplay) => {
if (!qualityReplay || typeof qualityReplay !== 'object') return ''
const output = qualityReplay.output || {}
const relationQuality = output.relation_quality || null
const normalization = output.normalization || null
if (relationQuality) {
const avg = Number(relationQuality.average_score || 0)
const stable = Number(relationQuality.stable_count || 0)
const risk = Number(relationQuality.conflict_risk_count || 0)
return `质量回放: 关系均分 ${(avg * 100).toFixed(1)}% · 稳定 ${stable} · 风险 ${risk}`
}
if (normalization) {
return `质量回放: 人物别名合并 ${Number(normalization.person_alias_merged || 0)} · 组织别名合并 ${Number(normalization.organization_alias_merged || 0)} · 关系去重 ${Number(normalization.relation_deduped || 0)}`
}
if (typeof output.confidence === 'number') {
return `质量回放: 置信度 ${(Number(output.confidence) * 100).toFixed(1)}%`
}
return ''
}
const handleIngest = async () => {
if (!ingestText.value) return
ingestLoading.value = true
@@ -1086,6 +1209,7 @@ const handleIngest = async () => {
})
}
try {
let ingestTaskId = ''
logAt('开始提交导入请求...', 'info')
const payload = {
text: ingestText.value,
@@ -1126,6 +1250,11 @@ const handleIngest = async () => {
}
if (eventName === 'progress') {
logAt(formatIngestProgress(payload), 'info')
} else if (eventName === 'meta') {
if (payload?.task_id) {
ingestTaskId = payload.task_id
logAt(`任务已创建: ${ingestTaskId}`, 'info')
}
} else if (eventName === 'done') {
doneResult = payload
} else if (eventName === 'error') {
@@ -1144,12 +1273,29 @@ const handleIngest = async () => {
if (buffer.trim()) parseSseBlock(buffer)
if (streamError) {
if (ingestTaskId) {
logAt(`可通过 /tasks/${ingestTaskId} 查询失败详情`, 'error')
}
throw new Error(streamError)
}
if (!doneResult?.ok) {
throw new Error(doneResult?.error || doneResult?.message || '分析失败')
}
logAt(`分析成功: ${doneResult.stats?.created?.persons || 0} 人, ${doneResult.stats?.created?.organizations || 0} 组织, ${doneResult.stats?.created?.events || 0} 事件, ${doneResult.stats?.created?.topics || 0} 主题`, 'success')
if (ingestTaskId) {
logAt(`任务完成: ${ingestTaskId}`, 'success')
try {
const taskRes = await fetch(`${apiBase}/tasks/${encodeURIComponent(ingestTaskId)}`, {
method: 'GET',
headers: { 'Content-Type': 'application/json' }
})
const taskData = await taskRes.json()
if (taskRes.ok && taskData?.ok && taskData?.task?.quality_replay) {
const replayLine = formatQualityReplayLine(taskData.task.quality_replay)
if (replayLine) logAt(replayLine, 'info')
}
} catch {}
}
ingestText.value = ''
renderGraph()
} catch (e) {
@@ -1747,6 +1893,23 @@ input:checked + .slider:before {
word-break: break-word;
}
.detail-tags {
display: flex;
flex-wrap: wrap;
gap: 6px;
}
.detail-tag {
display: inline-flex;
align-items: center;
border: 1px solid #cccccc;
border-radius: 999px;
padding: 2px 8px;
font-size: 12px;
color: #333333;
background: #ffffff;
}
.summary-value { line-height: 1.55; }
.panel-grid {