This commit is contained in:
liailing1026
2025-12-07 17:18:10 +08:00
committed by zhaoweijie
parent 6392301833
commit c00c0072b8
53 changed files with 16040 additions and 275 deletions

View File

@@ -0,0 +1,24 @@
{
"permissions": {
"allow": [
"Read(//Users/zhaoweijie/Desktop/agent/AgentCoord/**)",
"Bash(python3:*)",
"Bash(source:*)",
"Bash(pip install:*)",
"Bash(python:*)",
"Bash(tree:*)",
"Bash(export FAST_DESIGN_MODE=True:*)",
"Bash(echo:*)",
"Bash(chmod:*)",
"Bash(lsof:*)",
"Bash(curl:*)",
"Bash(xargs kill:*)",
"Bash(pip:*)",
"WebSearch",
"WebFetch(domain:pypi.org)",
"Bash(cp:*)"
],
"deny": [],
"ask": []
}
}

View File

@@ -42,7 +42,7 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
},
]
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
return read_LLM_Completion(messages)["AbilityRequirement"]
PROMPT_AGENT_ABILITY_SCORING = """
@@ -83,7 +83,6 @@ class JSON_Agent(BaseModel):
class JSON_AGENT_ABILITY_SCORING(RootModel):
root: Dict[str, JSON_Agent]
def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
scoreTable = {}
for Ability_Requirement in Ability_Requirement_List:
@@ -105,7 +104,7 @@ def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
return scoreTable
def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
async def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
Current_Task = {
"TaskName": stepTask["StepName"],
"InputObject_List": stepTask["InputObject_List"],
@@ -125,10 +124,10 @@ def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
),
},
]
Ability_Requirement_List = read_LLM_Completion(messages)[
Ability_Requirement_List = await read_LLM_Completion(messages)[
"AbilityRequirement"
]
scoreTable = agentAbilityScoring(Agent_Board, Ability_Requirement_List)
scoreTable = await agentAbilityScoring(Agent_Board, Ability_Requirement_List)
return scoreTable

View File

@@ -76,8 +76,7 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
},
]
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
return read_LLM_Completion(messages)["AbilityRequirement"]
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
# Check if Agent_Board is None or empty
@@ -103,7 +102,7 @@ def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
agentboard_set = {agent["Name"] for agent in Agent_Board}
while True:
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
if len(candidate) > MAX_TEAM_SIZE:
teamSize = random.randint(2, MAX_TEAM_SIZE)
candidate = candidate[0:teamSize]

View File

@@ -9,6 +9,14 @@ import AgentCoord.util as util
def generate_basePlan(
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
):
Agent_Board = [
{"Name": (a.get("Name") or "").strip(),"Profile": (a.get("Profile") or "").strip()}
for a in Agent_Board
if a and a.get("Name") is not None
]
if not Agent_Board: # 洗完后还是空 → 直接返回空计划
return {"Plan_Outline": []}
basePlan = {
"Initial Input Object": InitialObject_List,
"Collaboration Process": [],

View File

@@ -95,7 +95,7 @@ def branch_PlanOutline(
},
{"role": "system", "content": prompt},
]
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
Remaining_Steps = read_LLM_Completion(messages)[
"Remaining Steps"
]
branch_List.append(Remaining_Steps)

View File

@@ -145,7 +145,7 @@ def branch_TaskProcess(
},
{"role": "system", "content": prompt},
]
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
Remaining_Steps = read_LLM_Completion(messages)[
"Remaining Steps"
]

View File

@@ -106,6 +106,9 @@ class TaskProcessPlan(BaseModel):
def generate_TaskProcess(General_Goal, Current_Task_Description):
# 新增参数验证
if not General_Goal or str(General_Goal).strip() == "":
raise ValueError("General_Goal cannot be empty")
messages = [
{
"role": "system",

View File

@@ -90,7 +90,7 @@ class BaseAction():
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = agentProfile, General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
print_colored(text = prompt, text_color="red")
messages = [{"role":"system", "content": prompt}]
ActionResult = LLM_Completion(messages,True,False)
ActionResult = LLM_Completion(messages,stream=False)
ActionInfo_with_Result = copy.deepcopy(self.info)
ActionInfo_with_Result["Action_Result"] = ActionResult

View File

@@ -1,2 +1,23 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 导出常用模块和函数,根据实际需求调整
from .PlanEngine.planOutline_Generator import generate_PlanOutline
from .PlanEngine.basePlan_Generator import generate_basePlan
from .PlanEngine.AgentSelection_Generator import generate_AgentSelection
from .LLMAPI.LLMAPI import LLM_Completion
from .util.converter import read_LLM_Completion
# 定义包元数据
__version__ = "0.1.0"
__all__ = [
"generate_PlanOutline",
"generate_basePlan",
"generate_AgentSelection",
"LLM_Completion",
"read_LLM_Completion"
]

View File

@@ -1,12 +1,12 @@
## config for default LLM
OPENAI_API_BASE: ""
OPENAI_API_KEY: ""
OPENAI_API_MODEL: "gpt-4-turbo-preview"
OPENAI_API_BASE: "https://ai.gitee.com/v1"
OPENAI_API_KEY: "HYCNGM39GGFNSB1F8MBBMI9QYJR3P1CRSYS2PV1A"
OPENAI_API_MODEL: "DeepSeek-V3"
## config for fast mode
FAST_DESIGN_MODE: True
GROQ_API_KEY: ""
MISTRAL_API_KEY: ""
## options under experimentation, leave them as Fasle unless you know what it is for
USE_CACHE: False
## options under experimentation, leave them as False unless you know what it is for
USE_CACHE: False

View File

@@ -3,5 +3,5 @@ openai==2.8.1
PyYAML==6.0.1
termcolor==2.4.0
groq==0.4.2
mistralai==0.1.6
mistralai==1.5.2
socksio==1.0.0

13
backend/restart.ps1 Normal file
View File

@@ -0,0 +1,13 @@
# restart.ps1
$port=8000
$env:PYTHONUNBUFFERED="1"
python server.py --port 8000
Write-Host "Killing PID on port $port..." -ForegroundColor Red
Get-NetTCPConnection -LocalPort $port -ErrorAction SilentlyContinue | ForEach-Object {
Stop-Process -Id $_.OwningProcess -Force
}
Write-Host "Starting Flask..." -ForegroundColor Green
python server.py --port $port

View File

@@ -0,0 +1,159 @@
# AgentCoord Backend 代码逻辑架构分析
## 🏗️ Backend 代码逻辑架构
### 📁 核心目录结构
```
backend/
├── server.py # Flask主服务器入口
├── config/config.yaml # LLM API配置
├── AgentRepo/agentBoard_v1.json # 智能体定义库
├── DataProcess/ # 数据处理层
├── RequestCache/ # 缓存机制
└── AgentCoord/ # 核心业务逻辑
├── LLMAPI/ # LLM接口封装
├── PlanEngine/ # 计划生成引擎
├── RehearsalEngine_V2/ # 计划执行引擎
└── util/ # 工具模块
```
### 🔄 主要工作流程
#### 1⃣ **计划生成流程** (PlanEngine)
```
用户目标 → 生成计划大纲 → 选择智能体 → 生成任务流程 → 输出完整计划
```
**核心模块:**
- `basePlan_Generator.py` - 整合所有计划生成组件
- `planOutline_Generator.py` - 生成高级计划大纲
- `taskProcess_Generator.py` - 生成详细任务执行流程
- `AgentSelection_Generator.py` - 选择最适合的智能体
#### 2⃣ **计划执行流程** (RehearsalEngine_V2)
```
协作计划 → 初始化执行环境 → 按步骤执行 → 智能体协作 → 记录执行日志
```
**动作类型:**
- **Propose** - 提出建议和方案
- **Critique** - 提供反馈和批评
- **Improve** - 基于反馈改进结果
- **Finalize** - 最终确定输出
#### 3⃣ **LLM接口层** (LLMAPI)
支持的模型:
- OpenAI GPT-4/GPT-3.5
- Groq Mixtral-8x7b (快速模式)
- Mistral Open-Mixtral-8x7b
### 🌐 API端点架构
#### **计划生成APIs**
- `POST /generate_basePlan` - 生成基础协作计划
- `POST /fill_stepTask` - 填充步骤任务详情
- `POST /branch_PlanOutline` - 处理计划分支
#### **计划执行APIs**
- `POST /executePlan` - 执行协作计划
- `POST /agentSelectModify_init` - 初始化智能体选择
#### **系统管理APIs**
- `POST /setAgents` - 设置智能体板
- `POST /_saveRequestCache` - 保存请求缓存
### 💾 数据流设计
#### **输入层**
- HTTP请求验证
- 参数提取和格式化
- 缓存检查
#### **业务逻辑层**
- LLM API调用
- 多智能体协调
- 任务状态管理
#### **输出层**
- 结果格式化
- 前端渲染模板生成
- JSON响应
### ⚙️ 配置与优化
#### **配置文件 (config.yaml)**
```yaml
OPENAI_API_BASE: "https://api.openai.com"
OPENAI_API_KEY: "your-key"
OPENAI_API_MODEL: "gpt-4-turbo-preview"
FAST_DESIGN_MODE: True # 启用快速模式
USE_CACHE: False # 缓存开关
```
#### **性能优化**
- 请求缓存机制
- 快速模式支持Groq
- 异步LLM调用
- 重试和错误处理
### 🔧 关键技术特点
1. **模块化架构** - 清晰的职责分离
2. **多LLM支持** - 灵活的模型切换
3. **智能体协作** - 复杂的多智能体工作流
4. **前端适配** - 自动生成渲染模板
5. **可扩展性** - 支持自定义智能体和动作
## 📋 详细模块说明
### 1. 服务器入口 (server.py)
- **功能**: Flask应用主入口提供RESTful API
- **特点**: 支持请求缓存、全局状态管理、参数化端口配置
### 2. 计划引擎 (PlanEngine)
**核心功能**: 生成多智能体协作计划
- **basePlan_Generator.py**: 整合所有生成器,生成完整协作计划
- **planOutline_Generator.py**: 基于目标生成计划大纲
- **taskProcess_Generator.py**: 为每个任务步骤生成执行流程
- **AgentSelection_Generator.py**: 选择合适的智能体执行任务
### 3. 排练引擎 (RehearsalEngine_V2)
**核心功能**: 执行生成的协作计划
- **ExecutePlan.py**: 计划执行控制器
- **Action模块**: 实现各种协作动作Propose, Critique, Improve, Finalize
### 4. LLM API接口 (LLMAPI)
**核心功能**: 封装多种大语言模型API
- 支持流式响应
- 异步处理
- 快速模式切换
### 5. 数据处理 (DataProcess)
**核心功能**: 格式转换和前端适配
- 颜色映射:不同元素类型的视觉区分
- 模板生成:为前端生成渲染模板
- 格式化:处理驼峰命名和自然语言转换
## 🚀 启动和调试
### 开发环境启动
```bash
cd backend
source venv/bin/activate
python server.py --port 8017
```
### 调试模式
Flask已内置debug=True支持
- 交互式调试器
- 自动重载
- 详细错误页面
### Docker部署
```bash
docker-compose up
```
这个backend实现了一个完整的多智能体协作平台通过精心设计的模块化架构支持复杂任务的规划和执行。