Files
AgentCoord/backend/AgentCoord/Export/infographic_llm.py

269 lines
9.4 KiB
Python
Raw Normal View History

2026-03-11 17:46:42 +08:00
"""
信息图 LLM 报告导出器
调用大模型生成信息图展示的格式化内容
"""
import json
import os
import re
from datetime import datetime
from typing import Dict, Any, Optional
class InfographicLLMExporter:
"""信息图 LLM 报告导出器 - 调用大模型生成信息图内容"""
LLM_CONFIG = {
'OPENAI_API_BASE': None,
'OPENAI_API_KEY': None,
'OPENAI_API_MODEL': None,
}
PROMPT_TEMPLATE = """你是一位专业的可视化设计师和数据分析专家。你的任务是将以下任务执行数据生成适合信息图展示的格式化内容。
## 任务基本信息
- 任务名称{task_name}
## 任务大纲(规划阶段)
{task_outline}
## 执行结果
{rehearsal_log}
## 参与智能体
{agents}
## 智能体评分
{agent_scores}
---
## 信息图内容要求
请生成以下信息图展示内容JSON 格式输出
{{
"summary": "执行摘要 - 2-3句话概括整体执行情况",
"highlights": [
"亮点1 - 取得的显著成果",
"亮点2 - 关键突破或创新",
"亮点3 - 重要的里程碑"
],
"statistics": {{
"total_steps": 执行总步骤数,
"agent_count": 参与智能体数量,
"completion_rate": 完成率(百分比),
"quality_score": 质量评分(1-10)
}},
"key_insights": [
"关键洞察1 - 从执行过程中总结的洞见",
"关键洞察2 - 值得关注的趋势或模式",
"关键洞察3 - 对未来工作的建议"
],
"timeline": [
{{"step": "步骤名称", "status": "完成/进行中/未完成", "key_result": "关键产出"}},
...
],
"agent_performance": [
{{"name": "智能体名称", "score": 评分, "contribution": "主要贡献"}},
...
]
}}
---
## 输出要求
- 输出必须是有效的 JSON 格式
- 语言简体中文
- 所有字符串值使用中文
- statistics 中的数值必须是整数或浮点数
- 确保 JSON 格式正确不要有语法错误
- 不要输出 JSON 之外的任何内容
- **重要**"执行结果"rehearsal_log只是参考数据用于帮助你分析整体执行情况生成摘要亮点统计数据等**不要在任何输出字段中直接复制或输出原始执行结果数据**而应该对这些数据进行分析和提炼生成适合信息图展示的格式化内容
"""
def __init__(self):
self._load_llm_config()
def _load_llm_config(self):
"""从配置文件加载 LLM 配置"""
try:
import yaml
possible_paths = [
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
os.path.join(os.getcwd(), 'config', 'config.yaml'),
]
for config_path in possible_paths:
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
if config:
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
return
except Exception as e:
print(f"加载 LLM 配置失败: {e}")
def generate(self, task_data: Dict[str, Any]) -> Dict[str, Any]:
"""生成信息图内容(调用 LLM 生成)"""
try:
task_name = task_data.get('task_name', '未命名任务')
task_outline = task_data.get('task_outline')
rehearsal_log = task_data.get('rehearsal_log')
agent_scores = task_data.get('agent_scores')
agents = self._extract_agents(task_outline)
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else ''
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else ''
agents_str = ', '.join(agents) if agents else ''
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else ''
prompt = self.PROMPT_TEMPLATE.format(
task_name=task_name,
task_outline=task_outline_str,
rehearsal_log=rehearsal_log_str,
agents=agents_str,
agent_scores=agent_scores_str
)
print("正在调用大模型生成信息图内容...")
llm_result = self._call_llm(prompt)
if not llm_result:
print("LLM 生成信息图内容失败")
return None
infographic_data = self._parse_llm_result(llm_result)
if not infographic_data:
print("解析 LLM 结果失败")
return None
print(f"信息图内容生成成功")
return infographic_data
except Exception as e:
print(f"信息图 LLM 生成失败: {e}")
import traceback
traceback.print_exc()
return None
def _extract_agents(self, task_outline: Any) -> list:
"""从 task_outline 中提取参与智能体列表"""
agents = set()
if not task_outline or not isinstance(task_outline, dict):
return []
collaboration_process = task_outline.get('Collaboration Process', [])
if not collaboration_process or not isinstance(collaboration_process, list):
return []
for step in collaboration_process:
if isinstance(step, dict):
agent_selection = step.get('AgentSelection', [])
if isinstance(agent_selection, list):
for agent in agent_selection:
if agent:
agents.add(agent)
return list(agents)
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
"""过滤 agent_scores只保留参与当前任务的智能体评分"""
if not agent_scores or not isinstance(agent_scores, dict):
return {}
if not agents:
return {}
filtered = {}
for step_id, step_data in agent_scores.items():
if not isinstance(step_data, dict):
continue
aspect_list = step_data.get('aspectList', [])
agent_scores_data = step_data.get('agentScores', {})
if not agent_scores_data:
continue
filtered_scores = {}
for agent_name, scores in agent_scores_data.items():
if agent_name in agents and isinstance(scores, dict):
filtered_scores[agent_name] = scores
if filtered_scores:
filtered[step_id] = {
'aspectList': aspect_list,
'agentScores': filtered_scores
}
return filtered
def _call_llm(self, prompt: str) -> str:
"""调用大模型 API 生成内容"""
try:
import openai
if not self.LLM_CONFIG['OPENAI_API_KEY']:
print("错误: OPENAI_API_KEY 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_BASE']:
print("错误: OPENAI_API_BASE 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
print("错误: OPENAI_API_MODEL 未配置")
return ""
client = openai.OpenAI(
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
)
response = client.chat.completions.create(
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
messages=[
{"role": "user", "content": prompt}
],
temperature=0.7,
max_tokens=8000,
)
if response and response.choices:
return response.choices[0].message.content
return ""
except ImportError:
print("请安装 openai 库: pip install openai")
return ""
except Exception as e:
print(f"调用 LLM 失败: {e}")
return ""
def _parse_llm_result(self, llm_result: str) -> Optional[Dict[str, Any]]:
"""解析 LLM 返回的 JSON 字符串"""
try:
json_str = llm_result.strip()
if json_str.startswith("```json"):
json_str = json_str[7:]
if json_str.startswith("```"):
json_str = json_str[3:]
if json_str.endswith("```"):
json_str = json_str[:-3]
json_str = json_str.strip()
return json.loads(json_str)
except json.JSONDecodeError as e:
print(f"JSON 解析失败: {e}")
print(f"原始结果: {llm_result[:500]}...")
return None
except Exception as e:
print(f"解析失败: {e}")
return None