Compare commits
37 Commits
v2026.01.0
...
v2026.01.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef34cc326c | ||
|
|
5fa56ba88d | ||
|
|
b71df8ef43 | ||
|
|
8c6fe6784e | ||
|
|
29fa5bae29 | ||
|
|
dab465d924 | ||
|
|
77c0defe93 | ||
|
|
80cf2b5a52 | ||
|
|
96638d8092 | ||
|
|
21ad55ae55 | ||
|
|
530a6cd463 | ||
|
|
8615773b67 | ||
|
|
16eaec64b7 | ||
|
|
8558077dfe | ||
|
|
a15353ea52 | ||
|
|
5b44e3e688 | ||
|
|
a4b3628e01 | ||
|
|
bbb7db3878 | ||
|
|
dec2bbb4bf | ||
|
|
6a241b0ae0 | ||
|
|
51c53e0ed0 | ||
|
|
8cb6382e72 | ||
|
|
5889471e82 | ||
|
|
ca2e0b4fba | ||
|
|
10d24fbfa2 | ||
|
|
322bd6e167 | ||
|
|
3cc4478dd9 | ||
|
|
59f6f2ba97 | ||
|
|
172d9e0b41 | ||
|
|
de7086c9e1 | ||
|
|
5f63e8d1e2 | ||
|
|
3da0b894fd | ||
|
|
ad2d26aa16 | ||
|
|
a09f3e0bdb | ||
|
|
3a0faf27df | ||
|
|
cd3e7309a8 | ||
|
|
54cc10bb41 |
41
.github/copilot-instructions.md
vendored
@@ -260,7 +260,46 @@ async def _emit_notification(
|
||||
|
||||
## 📋 日志规范 (Logging Standard)
|
||||
|
||||
- **禁止使用** `print()` 语句
|
||||
### 1. 前端控制台调试 (Frontend Console Debugging) - **优先推荐 (Preferred)**
|
||||
|
||||
对于需要实时查看数据流、排查 UI 交互或内容变更的场景,**优先使用**前端控制台日志。这种方式可以直接在浏览器 DevTools (F12) 中查看,无需访问服务端日志。
|
||||
|
||||
**实现方式**: 通过 `__event_emitter__` 发送 `type: "execute"` 事件执行 JS 代码。
|
||||
|
||||
```python
|
||||
import json
|
||||
|
||||
async def _emit_debug_log(self, __event_emitter__, title: str, data: dict):
|
||||
"""在浏览器控制台打印结构化调试日志"""
|
||||
if not self.valves.show_debug_log or not __event_emitter__:
|
||||
return
|
||||
|
||||
try:
|
||||
js_code = f"""
|
||||
(async function() {{
|
||||
console.group("🛠️ {title}");
|
||||
console.log({json.dumps(data, ensure_ascii=False)});
|
||||
console.groupEnd();
|
||||
}})();
|
||||
"""
|
||||
|
||||
await __event_emitter__({
|
||||
"type": "execute",
|
||||
"data": {"code": js_code}
|
||||
})
|
||||
except Exception as e:
|
||||
print(f"Error emitting debug log: {e}")
|
||||
```
|
||||
|
||||
**配置要求**:
|
||||
- 在 `Valves` 中添加 `show_debug_log: bool` 开关,默认关闭。
|
||||
- 仅在开关开启时发送日志。
|
||||
|
||||
### 2. 服务端日志 (Server-side Logging)
|
||||
|
||||
用于记录系统级错误、异常堆栈或无需前端感知的后台任务。
|
||||
|
||||
- **禁止使用** `print()` 语句 (除非用于简单的脚本调试)
|
||||
- 必须使用 Python 标准库 `logging`
|
||||
|
||||
```python
|
||||
|
||||
32
.github/workflows/community-stats.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# OpenWebUI 社区统计报告自动生成
|
||||
# 每小时自动获取并更新社区统计数据
|
||||
# 只在统计数据变化时 commit,避免频繁提交
|
||||
|
||||
name: Community Stats
|
||||
|
||||
@@ -32,6 +32,17 @@ jobs:
|
||||
run: |
|
||||
pip install requests python-dotenv
|
||||
|
||||
- name: Get previous stats
|
||||
id: prev_stats
|
||||
run: |
|
||||
# 获取当前的 points 用于比较
|
||||
if [ -f docs/community-stats.json ]; then
|
||||
OLD_POINTS=$(jq -r '.user.total_points' docs/community-stats.json 2>/dev/null || echo "0")
|
||||
echo "old_points=$OLD_POINTS" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "old_points=0" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Generate stats report
|
||||
env:
|
||||
OPENWEBUI_API_KEY: ${{ secrets.OPENWEBUI_API_KEY }}
|
||||
@@ -39,10 +50,23 @@ jobs:
|
||||
run: |
|
||||
python scripts/openwebui_stats.py
|
||||
|
||||
- name: Check for changes
|
||||
- name: Check for significant changes
|
||||
id: check_changes
|
||||
run: |
|
||||
git diff --quiet docs/community-stats.zh.md docs/community-stats.md README.md README_CN.md || echo "changed=true" >> $GITHUB_OUTPUT
|
||||
# 获取新的 points
|
||||
NEW_POINTS=$(jq -r '.user.total_points' docs/community-stats.json 2>/dev/null || echo "0")
|
||||
|
||||
echo "📊 Previous points: ${{ steps.prev_stats.outputs.old_points }}"
|
||||
echo "📊 Current points: $NEW_POINTS"
|
||||
|
||||
# 只在 points 变化时才 commit
|
||||
if [ "$NEW_POINTS" != "${{ steps.prev_stats.outputs.old_points }}" ]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
echo "✅ Points changed (${{ steps.prev_stats.outputs.old_points }} → $NEW_POINTS), will commit"
|
||||
else
|
||||
echo "changed=false" >> $GITHUB_OUTPUT
|
||||
echo "⏭️ Points unchanged, skipping commit"
|
||||
fi
|
||||
|
||||
- name: Commit and push changes
|
||||
if: steps.check_changes.outputs.changed == 'true'
|
||||
@@ -50,5 +74,5 @@ jobs:
|
||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local user.name "github-actions[bot]"
|
||||
git add docs/community-stats.zh.md docs/community-stats.md docs/community-stats.json README.md README_CN.md
|
||||
git commit -m "chore: update community stats $(date +'%Y-%m-%d')"
|
||||
git diff --staged --quiet || git commit -m "chore: update community stats $(date +'%Y-%m-%d')"
|
||||
git push
|
||||
|
||||
68
.github/workflows/publish_new_plugin.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
name: Publish New Plugin
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
plugin_dir:
|
||||
description: 'Plugin directory (e.g., plugins/actions/deep-dive)'
|
||||
required: true
|
||||
type: string
|
||||
dry_run:
|
||||
description: 'Dry run mode (preview only)'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install requests
|
||||
|
||||
- name: Validate plugin directory
|
||||
run: |
|
||||
if [ ! -d "${{ github.event.inputs.plugin_dir }}" ]; then
|
||||
echo "❌ Error: Directory '${{ github.event.inputs.plugin_dir }}' does not exist"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Found plugin directory: ${{ github.event.inputs.plugin_dir }}"
|
||||
ls -la "${{ github.event.inputs.plugin_dir }}"
|
||||
|
||||
- name: Publish Plugin
|
||||
env:
|
||||
OPENWEBUI_API_KEY: ${{ secrets.OPENWEBUI_API_KEY }}
|
||||
run: |
|
||||
if [ "${{ github.event.inputs.dry_run }}" = "true" ]; then
|
||||
echo "🔍 Dry run mode - previewing..."
|
||||
python scripts/publish_plugin.py --new "${{ github.event.inputs.plugin_dir }}" --dry-run
|
||||
else
|
||||
echo "🚀 Publishing plugin..."
|
||||
python scripts/publish_plugin.py --new "${{ github.event.inputs.plugin_dir }}"
|
||||
fi
|
||||
|
||||
- name: Commit changes (if ID was added)
|
||||
if: ${{ github.event.inputs.dry_run != 'true' }}
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Check if there are changes to commit
|
||||
if git diff --quiet; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git add "${{ github.event.inputs.plugin_dir }}"
|
||||
git commit -m "feat: add openwebui_id to ${{ github.event.inputs.plugin_dir }}"
|
||||
git push
|
||||
echo "✅ Committed and pushed openwebui_id changes"
|
||||
fi
|
||||
18
README.md
@@ -7,26 +7,26 @@ A collection of enhancements, plugins, and prompts for [OpenWebUI](https://githu
|
||||
<!-- STATS_START -->
|
||||
## 📊 Community Stats
|
||||
|
||||
> 🕐 Auto-updated: 2026-01-08 00:11
|
||||
> 🕐 Auto-updated: 2026-01-09 20:14
|
||||
|
||||
| 👤 Author | 👥 Followers | ⭐ Points | 🏆 Contributions |
|
||||
|:---:|:---:|:---:|:---:|
|
||||
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **49** | **63** | **18** |
|
||||
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **59** | **70** | **20** |
|
||||
|
||||
| 📝 Posts | ⬇️ Downloads | 👁️ Views | 👍 Upvotes | 💾 Saves |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| **11** | **889** | **9358** | **55** | **48** |
|
||||
| **13** | **1016** | **10831** | **62** | **56** |
|
||||
|
||||
### 🔥 Top 6 Popular Plugins
|
||||
|
||||
| Rank | Plugin | Downloads | Views |
|
||||
|:---:|------|:---:|:---:|
|
||||
| 🥇 | [Turn Any Text into Beautiful Mind Maps](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 283 | 2441 |
|
||||
| 🥈 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 175 | 486 |
|
||||
| 🥉 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 118 | 1287 |
|
||||
| 4️⃣ | [Flash Card ](https://openwebui.com/posts/flash_card_65a2ea8f) | 82 | 1528 |
|
||||
| 5️⃣ | [Smart Infographic](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 80 | 1081 |
|
||||
| 6️⃣ | [Export to Word (Enhanced Formatting)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 67 | 605 |
|
||||
| 🥇 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 323 | 2878 |
|
||||
| 🥈 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 180 | 532 |
|
||||
| 🥉 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 121 | 1355 |
|
||||
| 4️⃣ | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 106 | 1265 |
|
||||
| 5️⃣ | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | 91 | 1665 |
|
||||
| 6️⃣ | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 80 | 751 |
|
||||
|
||||
*See full stats in [Community Stats Report](./docs/community-stats.md)*
|
||||
<!-- STATS_END -->
|
||||
|
||||
18
README_CN.md
@@ -7,26 +7,26 @@ OpenWebUI 增强功能集合。包含个人开发与收集的插件、提示词
|
||||
<!-- STATS_START -->
|
||||
## 📊 社区统计
|
||||
|
||||
> 🕐 自动更新于 2026-01-08 00:11
|
||||
> 🕐 自动更新于 2026-01-09 20:14
|
||||
|
||||
| 👤 作者 | 👥 粉丝 | ⭐ 积分 | 🏆 贡献 |
|
||||
|:---:|:---:|:---:|:---:|
|
||||
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **49** | **63** | **18** |
|
||||
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **59** | **70** | **20** |
|
||||
|
||||
| 📝 发布 | ⬇️ 下载 | 👁️ 浏览 | 👍 点赞 | 💾 收藏 |
|
||||
|:---:|:---:|:---:|:---:|:---:|
|
||||
| **11** | **889** | **9358** | **55** | **48** |
|
||||
| **13** | **1016** | **10831** | **62** | **56** |
|
||||
|
||||
### 🔥 热门插件 Top 6
|
||||
|
||||
| 排名 | 插件 | 下载 | 浏览 |
|
||||
|:---:|------|:---:|:---:|
|
||||
| 🥇 | [Turn Any Text into Beautiful Mind Maps](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 283 | 2441 |
|
||||
| 🥈 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 175 | 486 |
|
||||
| 🥉 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 118 | 1287 |
|
||||
| 4️⃣ | [Flash Card ](https://openwebui.com/posts/flash_card_65a2ea8f) | 82 | 1528 |
|
||||
| 5️⃣ | [Smart Infographic](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 80 | 1081 |
|
||||
| 6️⃣ | [Export to Word (Enhanced Formatting)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 67 | 605 |
|
||||
| 🥇 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 323 | 2878 |
|
||||
| 🥈 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 180 | 532 |
|
||||
| 🥉 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 121 | 1355 |
|
||||
| 4️⃣ | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 106 | 1265 |
|
||||
| 5️⃣ | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | 91 | 1665 |
|
||||
| 6️⃣ | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 80 | 751 |
|
||||
|
||||
*完整统计请查看 [社区统计报告](./docs/community-stats.zh.md)*
|
||||
<!-- STATS_END -->
|
||||
|
||||
@@ -1,46 +1,46 @@
|
||||
{
|
||||
"total_posts": 11,
|
||||
"total_downloads": 889,
|
||||
"total_views": 9358,
|
||||
"total_upvotes": 55,
|
||||
"total_downvotes": 1,
|
||||
"total_saves": 48,
|
||||
"total_posts": 13,
|
||||
"total_downloads": 1016,
|
||||
"total_views": 10831,
|
||||
"total_upvotes": 62,
|
||||
"total_downvotes": 2,
|
||||
"total_saves": 56,
|
||||
"total_comments": 15,
|
||||
"by_type": {
|
||||
"action": 9,
|
||||
"action": 11,
|
||||
"filter": 2
|
||||
},
|
||||
"posts": [
|
||||
{
|
||||
"title": "Turn Any Text into Beautiful Mind Maps",
|
||||
"title": "Smart Mind Map",
|
||||
"slug": "turn_any_text_into_beautiful_mind_maps_3094c59a",
|
||||
"type": "action",
|
||||
"version": "0.9.1",
|
||||
"author": "Fu-Jie",
|
||||
"description": "Intelligently analyzes text content and generates interactive mind maps to help users structure and visualize knowledge.",
|
||||
"downloads": 283,
|
||||
"views": 2441,
|
||||
"downloads": 323,
|
||||
"views": 2878,
|
||||
"upvotes": 10,
|
||||
"saves": 15,
|
||||
"saves": 17,
|
||||
"comments": 10,
|
||||
"created_at": "2025-12-30",
|
||||
"updated_at": "2026-01-06",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a"
|
||||
},
|
||||
{
|
||||
"title": "Export to Excel",
|
||||
"slug": "export_mulit_table_to_excel_244b8f9d",
|
||||
"type": "action",
|
||||
"version": "0.3.6",
|
||||
"version": "0.3.7",
|
||||
"author": "Fu-Jie",
|
||||
"description": "Extracts tables from chat messages and exports them to Excel (.xlsx) files with smart formatting.",
|
||||
"downloads": 175,
|
||||
"views": 486,
|
||||
"downloads": 180,
|
||||
"views": 532,
|
||||
"upvotes": 3,
|
||||
"saves": 3,
|
||||
"comments": 0,
|
||||
"created_at": "2025-05-30",
|
||||
"updated_at": "2026-01-03",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d"
|
||||
},
|
||||
{
|
||||
@@ -49,126 +49,142 @@
|
||||
"type": "filter",
|
||||
"version": "1.1.0",
|
||||
"author": "Fu-Jie",
|
||||
"description": "This filter automatically compresses long conversation contexts by intelligently summarizing and removing intermediate messages while preserving critical information, thereby significantly reducing token consumption.",
|
||||
"downloads": 118,
|
||||
"views": 1287,
|
||||
"description": "Reduces token consumption in long conversations while maintaining coherence through intelligent summarization and message compression.",
|
||||
"downloads": 121,
|
||||
"views": 1355,
|
||||
"upvotes": 5,
|
||||
"saves": 9,
|
||||
"comments": 0,
|
||||
"created_at": "2025-11-08",
|
||||
"updated_at": "2025-12-31",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/async_context_compression_b1655bc8"
|
||||
},
|
||||
{
|
||||
"title": "Flash Card ",
|
||||
"slug": "flash_card_65a2ea8f",
|
||||
"type": "action",
|
||||
"version": "0.2.4",
|
||||
"author": "Fu-Jie",
|
||||
"description": "Quickly generates beautiful flashcards from text, extracting key points and categories.",
|
||||
"downloads": 82,
|
||||
"views": 1528,
|
||||
"upvotes": 8,
|
||||
"saves": 5,
|
||||
"comments": 2,
|
||||
"created_at": "2025-12-30",
|
||||
"updated_at": "2026-01-03",
|
||||
"url": "https://openwebui.com/posts/flash_card_65a2ea8f"
|
||||
},
|
||||
{
|
||||
"title": "Smart Infographic",
|
||||
"title": "📊 Smart Infographic (AntV)",
|
||||
"slug": "smart_infographic_ad6f0c7f",
|
||||
"type": "action",
|
||||
"version": "1.4.0",
|
||||
"version": "1.4.1",
|
||||
"author": "jeff",
|
||||
"description": "AI-powered infographic generator based on AntV Infographic. Supports professional templates, auto-icon matching, and SVG/PNG downloads.",
|
||||
"downloads": 80,
|
||||
"views": 1081,
|
||||
"downloads": 106,
|
||||
"views": 1265,
|
||||
"upvotes": 7,
|
||||
"saves": 8,
|
||||
"saves": 9,
|
||||
"comments": 2,
|
||||
"created_at": "2025-12-28",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/smart_infographic_ad6f0c7f"
|
||||
},
|
||||
{
|
||||
"title": "Export to Word (Enhanced Formatting)",
|
||||
"title": "Flash Card",
|
||||
"slug": "flash_card_65a2ea8f",
|
||||
"type": "action",
|
||||
"version": "0.2.4",
|
||||
"author": "Fu-Jie",
|
||||
"description": "Quickly generates beautiful flashcards from text, extracting key points and categories.",
|
||||
"downloads": 91,
|
||||
"views": 1665,
|
||||
"upvotes": 8,
|
||||
"saves": 5,
|
||||
"comments": 2,
|
||||
"created_at": "2025-12-30",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/flash_card_65a2ea8f"
|
||||
},
|
||||
{
|
||||
"title": "Export to Word (Enhanced)",
|
||||
"slug": "export_to_word_enhanced_formatting_fca6a315",
|
||||
"type": "action",
|
||||
"version": "0.4.2",
|
||||
"version": "0.4.3",
|
||||
"author": "Fu-Jie",
|
||||
"description": "Export the current conversation to a formatted Word doc with syntax highlighting, AI-generated titles, and perfect Markdown rendering (tables, quotes, lists).",
|
||||
"downloads": 67,
|
||||
"views": 605,
|
||||
"description": "Export current conversation from Markdown to Word (.docx) with Mermaid diagrams rendered client-side (Mermaid.js, SVG+PNG), LaTeX math, real hyperlinks, improved tables, syntax highlighting, and blockquote support.",
|
||||
"downloads": 80,
|
||||
"views": 751,
|
||||
"upvotes": 5,
|
||||
"saves": 4,
|
||||
"saves": 6,
|
||||
"comments": 0,
|
||||
"created_at": "2026-01-03",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315"
|
||||
},
|
||||
{
|
||||
"title": "智能信息图",
|
||||
"title": "📊 智能信息图 (AntV Infographic)",
|
||||
"slug": "智能信息图_e04a48ff",
|
||||
"type": "action",
|
||||
"version": "1.3.1",
|
||||
"version": "1.4.1",
|
||||
"author": "jeff",
|
||||
"description": "基于 AntV Infographic 的智能信息图生成插件。支持多种专业模板,自动图标匹配,并提供 SVG/PNG 下载功能。",
|
||||
"downloads": 33,
|
||||
"views": 426,
|
||||
"downloads": 35,
|
||||
"views": 473,
|
||||
"upvotes": 3,
|
||||
"saves": 0,
|
||||
"comments": 0,
|
||||
"created_at": "2025-12-28",
|
||||
"updated_at": "2025-12-29",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/智能信息图_e04a48ff"
|
||||
},
|
||||
{
|
||||
"title": "导出为 Word-支持公式、流程图、表格和代码块",
|
||||
"title": "导出为 Word (增强版)",
|
||||
"slug": "导出为_word_支持公式流程图表格和代码块_8a6306c0",
|
||||
"type": "action",
|
||||
"version": "0.4.1",
|
||||
"version": "0.4.3",
|
||||
"author": "Fu-Jie",
|
||||
"description": "将当前对话内容从 Markdown 转换并导出为 Word (.docx) 文件,支持中英文无乱码。",
|
||||
"downloads": 20,
|
||||
"views": 799,
|
||||
"upvotes": 7,
|
||||
"saves": 1,
|
||||
"description": "将对话导出为 Word (.docx),支持 Mermaid 图表 (客户端渲染 SVG+PNG)、LaTeX 数学公式、真实超链接、增强表格格式、代码高亮和引用块。",
|
||||
"downloads": 30,
|
||||
"views": 902,
|
||||
"upvotes": 8,
|
||||
"saves": 2,
|
||||
"comments": 1,
|
||||
"created_at": "2026-01-04",
|
||||
"updated_at": "2026-01-05",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0"
|
||||
},
|
||||
{
|
||||
"title": "智能生成交互式思维导图,帮助用户可视化知识",
|
||||
"title": "思维导图",
|
||||
"slug": "智能生成交互式思维导图帮助用户可视化知识_8d4b097b",
|
||||
"type": "action",
|
||||
"version": "0.8.0",
|
||||
"author": "",
|
||||
"version": "0.9.1",
|
||||
"author": "Fu-Jie",
|
||||
"description": "智能分析文本内容,生成交互式思维导图,帮助用户结构化和可视化知识。",
|
||||
"downloads": 14,
|
||||
"views": 263,
|
||||
"downloads": 17,
|
||||
"views": 295,
|
||||
"upvotes": 2,
|
||||
"saves": 1,
|
||||
"comments": 0,
|
||||
"created_at": "2025-12-31",
|
||||
"updated_at": "2025-12-31",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b"
|
||||
},
|
||||
{
|
||||
"title": "闪记卡生成插件",
|
||||
"slug": "闪记卡生成插件_4a31eac3",
|
||||
"title": "Deep Dive",
|
||||
"slug": "deep_dive_c0b846e4",
|
||||
"type": "action",
|
||||
"version": "0.2.2",
|
||||
"version": "1.0.0",
|
||||
"author": "Fu-Jie",
|
||||
"description": "快速将文本提炼为精美的学习记忆卡片,支持核心要点提取与分类。",
|
||||
"downloads": 12,
|
||||
"views": 320,
|
||||
"description": "A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.",
|
||||
"downloads": 14,
|
||||
"views": 167,
|
||||
"upvotes": 3,
|
||||
"saves": 1,
|
||||
"comments": 0,
|
||||
"created_at": "2026-01-08",
|
||||
"updated_at": "2026-01-08",
|
||||
"url": "https://openwebui.com/posts/deep_dive_c0b846e4"
|
||||
},
|
||||
{
|
||||
"title": "闪记卡 (Flash Card)",
|
||||
"slug": "闪记卡生成插件_4a31eac3",
|
||||
"type": "action",
|
||||
"version": "0.2.4",
|
||||
"author": "Fu-Jie",
|
||||
"description": "快速将文本提炼为精美的学习记忆卡片,支持核心要点提取与分类。",
|
||||
"downloads": 12,
|
||||
"views": 339,
|
||||
"upvotes": 4,
|
||||
"saves": 1,
|
||||
"comments": 0,
|
||||
"created_at": "2025-12-30",
|
||||
"updated_at": "2025-12-31",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/闪记卡生成插件_4a31eac3"
|
||||
},
|
||||
{
|
||||
@@ -177,15 +193,31 @@
|
||||
"type": "filter",
|
||||
"version": "1.1.0",
|
||||
"author": "Fu-Jie",
|
||||
"description": "在 LLM 响应完成后进行上下文摘要和压缩",
|
||||
"downloads": 5,
|
||||
"views": 122,
|
||||
"description": "通过智能摘要和消息压缩,降低长对话的 token 消耗,同时保持对话连贯性。",
|
||||
"downloads": 6,
|
||||
"views": 148,
|
||||
"upvotes": 2,
|
||||
"saves": 1,
|
||||
"comments": 0,
|
||||
"created_at": "2025-11-08",
|
||||
"updated_at": "2025-12-31",
|
||||
"updated_at": "2026-01-07",
|
||||
"url": "https://openwebui.com/posts/异步上下文压缩_5c0617cb"
|
||||
},
|
||||
{
|
||||
"title": "精读",
|
||||
"slug": "精读_99830b0f",
|
||||
"type": "action",
|
||||
"version": "1.0.0",
|
||||
"author": "Fu-Jie",
|
||||
"description": "全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。",
|
||||
"downloads": 1,
|
||||
"views": 61,
|
||||
"upvotes": 2,
|
||||
"saves": 1,
|
||||
"comments": 0,
|
||||
"created_at": "2026-01-08",
|
||||
"updated_at": "2026-01-08",
|
||||
"url": "https://openwebui.com/posts/精读_99830b0f"
|
||||
}
|
||||
],
|
||||
"user": {
|
||||
@@ -193,11 +225,11 @@
|
||||
"name": "Fu-Jie",
|
||||
"profile_url": "https://openwebui.com/u/Fu-Jie",
|
||||
"profile_image": "https://community.s3.openwebui.com/uploads/users/b15d1348-4347-42b4-b815-e053342d6cb0/profile_d9510745-4bd4-4f8f-a997-4a21847d9300.webp",
|
||||
"followers": 49,
|
||||
"followers": 59,
|
||||
"following": 2,
|
||||
"total_points": 63,
|
||||
"post_points": 54,
|
||||
"comment_points": 9,
|
||||
"contributions": 18
|
||||
"total_points": 70,
|
||||
"post_points": 60,
|
||||
"comment_points": 10,
|
||||
"contributions": 20
|
||||
}
|
||||
}
|
||||
@@ -1,35 +1,37 @@
|
||||
# 📊 OpenWebUI Community Stats Report
|
||||
|
||||
> 📅 Updated: 2026-01-08 00:11
|
||||
> 📅 Updated: 2026-01-09 20:14
|
||||
|
||||
## 📈 Overview
|
||||
|
||||
| Metric | Value |
|
||||
|------|------|
|
||||
| 📝 Total Posts | 11 |
|
||||
| ⬇️ Total Downloads | 889 |
|
||||
| 👁️ Total Views | 9358 |
|
||||
| 👍 Total Upvotes | 55 |
|
||||
| 💾 Total Saves | 48 |
|
||||
| 📝 Total Posts | 13 |
|
||||
| ⬇️ Total Downloads | 1016 |
|
||||
| 👁️ Total Views | 10831 |
|
||||
| 👍 Total Upvotes | 62 |
|
||||
| 💾 Total Saves | 56 |
|
||||
| 💬 Total Comments | 15 |
|
||||
|
||||
## 📂 By Type
|
||||
|
||||
- **action**: 9
|
||||
- **action**: 11
|
||||
- **filter**: 2
|
||||
|
||||
## 📋 Posts List
|
||||
|
||||
| Rank | Title | Type | Version | Downloads | Views | Upvotes | Saves | Updated |
|
||||
|:---:|------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| 1 | [Turn Any Text into Beautiful Mind Maps](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 283 | 2441 | 10 | 15 | 2026-01-06 |
|
||||
| 2 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.6 | 175 | 486 | 3 | 3 | 2026-01-03 |
|
||||
| 3 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | filter | 1.1.0 | 118 | 1287 | 5 | 9 | 2025-12-31 |
|
||||
| 4 | [Flash Card ](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 82 | 1528 | 8 | 5 | 2026-01-03 |
|
||||
| 5 | [Smart Infographic](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.0 | 80 | 1081 | 7 | 8 | 2026-01-07 |
|
||||
| 6 | [Export to Word (Enhanced Formatting)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.2 | 67 | 605 | 5 | 4 | 2026-01-07 |
|
||||
| 7 | [智能信息图](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.3.1 | 33 | 426 | 3 | 0 | 2025-12-29 |
|
||||
| 8 | [导出为 Word-支持公式、流程图、表格和代码块](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.1 | 20 | 799 | 7 | 1 | 2026-01-05 |
|
||||
| 9 | [智能生成交互式思维导图,帮助用户可视化知识](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.8.0 | 14 | 263 | 2 | 1 | 2025-12-31 |
|
||||
| 10 | [闪记卡生成插件](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.2 | 12 | 320 | 3 | 1 | 2025-12-31 |
|
||||
| 11 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | filter | 1.1.0 | 5 | 122 | 2 | 1 | 2025-12-31 |
|
||||
| 1 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 323 | 2878 | 10 | 17 | 2026-01-07 |
|
||||
| 2 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.7 | 180 | 532 | 3 | 3 | 2026-01-07 |
|
||||
| 3 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | filter | 1.1.0 | 121 | 1355 | 5 | 9 | 2026-01-07 |
|
||||
| 4 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.1 | 106 | 1265 | 7 | 9 | 2026-01-07 |
|
||||
| 5 | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 91 | 1665 | 8 | 5 | 2026-01-07 |
|
||||
| 6 | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.3 | 80 | 751 | 5 | 6 | 2026-01-07 |
|
||||
| 7 | [📊 智能信息图 (AntV Infographic)](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.4.1 | 35 | 473 | 3 | 0 | 2026-01-07 |
|
||||
| 8 | [导出为 Word (增强版)](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.3 | 30 | 902 | 8 | 2 | 2026-01-07 |
|
||||
| 9 | [思维导图](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.9.1 | 17 | 295 | 2 | 1 | 2026-01-07 |
|
||||
| 10 | [Deep Dive](https://openwebui.com/posts/deep_dive_c0b846e4) | action | 1.0.0 | 14 | 167 | 3 | 1 | 2026-01-08 |
|
||||
| 11 | [闪记卡 (Flash Card)](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.4 | 12 | 339 | 4 | 1 | 2026-01-07 |
|
||||
| 12 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | filter | 1.1.0 | 6 | 148 | 2 | 1 | 2026-01-07 |
|
||||
| 13 | [精读](https://openwebui.com/posts/精读_99830b0f) | action | 1.0.0 | 1 | 61 | 2 | 1 | 2026-01-08 |
|
||||
|
||||
@@ -1,35 +1,37 @@
|
||||
# 📊 OpenWebUI 社区统计报告
|
||||
|
||||
> 📅 更新时间: 2026-01-08 00:11
|
||||
> 📅 更新时间: 2026-01-09 20:14
|
||||
|
||||
## 📈 总览
|
||||
|
||||
| 指标 | 数值 |
|
||||
|------|------|
|
||||
| 📝 发布数量 | 11 |
|
||||
| ⬇️ 总下载量 | 889 |
|
||||
| 👁️ 总浏览量 | 9358 |
|
||||
| 👍 总点赞数 | 55 |
|
||||
| 💾 总收藏数 | 48 |
|
||||
| 📝 发布数量 | 13 |
|
||||
| ⬇️ 总下载量 | 1016 |
|
||||
| 👁️ 总浏览量 | 10831 |
|
||||
| 👍 总点赞数 | 62 |
|
||||
| 💾 总收藏数 | 56 |
|
||||
| 💬 总评论数 | 15 |
|
||||
|
||||
## 📂 按类型分类
|
||||
|
||||
- **action**: 9
|
||||
- **action**: 11
|
||||
- **filter**: 2
|
||||
|
||||
## 📋 发布列表
|
||||
|
||||
| 排名 | 标题 | 类型 | 版本 | 下载 | 浏览 | 点赞 | 收藏 | 更新日期 |
|
||||
|:---:|------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| 1 | [Turn Any Text into Beautiful Mind Maps](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 283 | 2441 | 10 | 15 | 2026-01-06 |
|
||||
| 2 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.6 | 175 | 486 | 3 | 3 | 2026-01-03 |
|
||||
| 3 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | filter | 1.1.0 | 118 | 1287 | 5 | 9 | 2025-12-31 |
|
||||
| 4 | [Flash Card ](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 82 | 1528 | 8 | 5 | 2026-01-03 |
|
||||
| 5 | [Smart Infographic](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.0 | 80 | 1081 | 7 | 8 | 2026-01-07 |
|
||||
| 6 | [Export to Word (Enhanced Formatting)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.2 | 67 | 605 | 5 | 4 | 2026-01-07 |
|
||||
| 7 | [智能信息图](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.3.1 | 33 | 426 | 3 | 0 | 2025-12-29 |
|
||||
| 8 | [导出为 Word-支持公式、流程图、表格和代码块](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.1 | 20 | 799 | 7 | 1 | 2026-01-05 |
|
||||
| 9 | [智能生成交互式思维导图,帮助用户可视化知识](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.8.0 | 14 | 263 | 2 | 1 | 2025-12-31 |
|
||||
| 10 | [闪记卡生成插件](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.2 | 12 | 320 | 3 | 1 | 2025-12-31 |
|
||||
| 11 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | filter | 1.1.0 | 5 | 122 | 2 | 1 | 2025-12-31 |
|
||||
| 1 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 323 | 2878 | 10 | 17 | 2026-01-07 |
|
||||
| 2 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.7 | 180 | 532 | 3 | 3 | 2026-01-07 |
|
||||
| 3 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | filter | 1.1.0 | 121 | 1355 | 5 | 9 | 2026-01-07 |
|
||||
| 4 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.1 | 106 | 1265 | 7 | 9 | 2026-01-07 |
|
||||
| 5 | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 91 | 1665 | 8 | 5 | 2026-01-07 |
|
||||
| 6 | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.3 | 80 | 751 | 5 | 6 | 2026-01-07 |
|
||||
| 7 | [📊 智能信息图 (AntV Infographic)](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.4.1 | 35 | 473 | 3 | 0 | 2026-01-07 |
|
||||
| 8 | [导出为 Word (增强版)](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.3 | 30 | 902 | 8 | 2 | 2026-01-07 |
|
||||
| 9 | [思维导图](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.9.1 | 17 | 295 | 2 | 1 | 2026-01-07 |
|
||||
| 10 | [Deep Dive](https://openwebui.com/posts/deep_dive_c0b846e4) | action | 1.0.0 | 14 | 167 | 3 | 1 | 2026-01-08 |
|
||||
| 11 | [闪记卡 (Flash Card)](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.4 | 12 | 339 | 4 | 1 | 2026-01-07 |
|
||||
| 12 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | filter | 1.1.0 | 6 | 148 | 2 | 1 | 2026-01-07 |
|
||||
| 13 | [精读](https://openwebui.com/posts/精读_99830b0f) | action | 1.0.0 | 1 | 61 | 2 | 1 | 2026-01-08 |
|
||||
|
||||
150
docs/development/frontend-console-debugging.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# 🛠️ Debugging Python Plugins with Frontend Console
|
||||
|
||||
When developing plugins for Open WebUI, debugging can be challenging. Standard `print()` statements or server-side logging might not always be accessible, especially in hosted environments or when you want to see the data flow in real-time alongside the UI interactions.
|
||||
|
||||
This guide introduces a powerful technique: **Frontend Console Debugging**. By injecting JavaScript from your Python plugin, you can print structured logs directly to the browser's Developer Tools console (F12).
|
||||
|
||||
## Why Frontend Debugging?
|
||||
|
||||
* **Real-time Feedback**: See logs immediately as actions happen in the browser.
|
||||
* **Rich Objects**: Inspect complex JSON objects (like `body` or `messages`) interactively, rather than reading massive text dumps.
|
||||
* **No Server Access Needed**: Debug issues even if you don't have SSH/Console access to the backend server.
|
||||
* **Clean Output**: Group logs using `console.group()` to keep your console organized.
|
||||
|
||||
## The Core Mechanism
|
||||
|
||||
Open WebUI plugins (both Actions and Filters) support an event system. We can leverage the `__event_call__` (or sometimes `__event_emitter__`) to send a special event of type `execute`. This tells the frontend to run the provided JavaScript code.
|
||||
|
||||
### The Helper Method
|
||||
|
||||
To make this easy to use, we recommend adding a helper method `_emit_debug_log` to your plugin class.
|
||||
|
||||
```python
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
async def _emit_debug_log(
|
||||
self,
|
||||
__event_call__,
|
||||
title: str,
|
||||
data: dict
|
||||
):
|
||||
"""
|
||||
Emit debug log to browser console via JS execution.
|
||||
|
||||
Args:
|
||||
__event_call__: The event callable passed to action/outlet.
|
||||
title: A title for the log group.
|
||||
data: A dictionary of data to log.
|
||||
"""
|
||||
# 1. Check if debugging is enabled (recommended)
|
||||
if not getattr(self.valves, "show_debug_log", True) or not __event_call__:
|
||||
return
|
||||
|
||||
try:
|
||||
# 2. Construct the JavaScript code
|
||||
# We use an async IIFE (Immediately Invoked Function Expression)
|
||||
# to ensure a clean scope and support await if needed.
|
||||
js_code = f"""
|
||||
(async function() {{
|
||||
console.group("🛠️ Plugin Debug: {title}");
|
||||
console.log({json.dumps(data, ensure_ascii=False)});
|
||||
console.groupEnd();
|
||||
}})();
|
||||
"""
|
||||
|
||||
# 3. Send the execute event
|
||||
await __event_call__(
|
||||
{
|
||||
"type": "execute",
|
||||
"data": {"code": js_code},
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error emitting debug log: {e}")
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### 1. Add a Valve for Control
|
||||
|
||||
It's best practice to make debugging optional so it doesn't clutter the console for normal users.
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class Filter:
|
||||
class Valves(BaseModel):
|
||||
show_debug_log: bool = Field(
|
||||
default=False,
|
||||
description="Print debug logs to browser console (F12)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
```
|
||||
|
||||
### 2. Inject `__event_call__`
|
||||
|
||||
Ensure your `action` (for Actions) or `outlet` (for Filters) method accepts `__event_call__`.
|
||||
|
||||
**For Filters (`outlet`):**
|
||||
|
||||
```python
|
||||
async def outlet(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: Optional[dict] = None,
|
||||
__event_call__=None, # <--- Add this
|
||||
__metadata__: Optional[dict] = None,
|
||||
) -> dict:
|
||||
```
|
||||
|
||||
**For Actions (`action`):**
|
||||
|
||||
```python
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__=None,
|
||||
__event_call__=None, # <--- Add this
|
||||
__request__=None,
|
||||
):
|
||||
```
|
||||
|
||||
### 3. Call the Helper
|
||||
|
||||
Now you can log anything, anywhere in your logic!
|
||||
|
||||
```python
|
||||
# Inside your logic...
|
||||
new_content = self.process_content(content)
|
||||
|
||||
# Log the before and after
|
||||
await self._emit_debug_log(
|
||||
__event_call__,
|
||||
"Content Normalization",
|
||||
{
|
||||
"original": content,
|
||||
"processed": new_content,
|
||||
"changes": diff_list
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use `json.dumps`**: Always serialize your Python dictionaries to JSON strings before embedding them in the f-string. This handles escaping quotes and special characters correctly.
|
||||
2. **Async IIFE**: Wrapping your JS in `(async function() { ... })();` is safer than raw code. It prevents variable collisions with other scripts and allows using `await` inside your debug script if you ever need to check DOM elements.
|
||||
3. **Check for None**: Always check if `__event_call__` is not None before using it, as it might not be available in all contexts (e.g., when running tests or in older Open WebUI versions).
|
||||
|
||||
## Example Output
|
||||
|
||||
When enabled, your browser console will show:
|
||||
|
||||
```text
|
||||
> 🛠️ Plugin Debug: Content Normalization
|
||||
> {original: "...", processed: "...", changes: [...]}
|
||||
```
|
||||
|
||||
You can expand the object to inspect every detail of your data. Happy debugging!
|
||||
64
docs/development/mermaid-syntax-standards.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Mermaid Syntax Standards & Best Practices
|
||||
|
||||
This document summarizes the official syntax standards for Mermaid flowcharts, focusing on node labels, quoting rules, and special character handling. It serves as a reference for the `markdown_normalizer` plugin logic.
|
||||
|
||||
## 1. Node Shapes & Syntax
|
||||
|
||||
Mermaid supports various node shapes defined by specific wrapping characters.
|
||||
|
||||
| Shape | Syntax | Example |
|
||||
| :--- | :--- | :--- |
|
||||
| **Rectangle** (Default) | `id[Label]` | `A[Start]` |
|
||||
| **Rounded** | `id(Label)` | `B(Process)` |
|
||||
| **Stadium** (Pill) | `id([Label])` | `C([End])` |
|
||||
| **Subroutine** | `id[[Label]]` | `D[[Subroutine]]` |
|
||||
| **Cylinder** (Database) | `id[(Label)]` | `E[(Database)]` |
|
||||
| **Circle** | `id((Label))` | `F((Point))` |
|
||||
| **Double Circle** | `id(((Label)))` | `G(((Endpoint)))` |
|
||||
| **Asymmetric** | `id>Label]` | `H>Flag]` |
|
||||
| **Rhombus** (Decision) | `id{Label}` | `I{Decision}` |
|
||||
| **Hexagon** | `id{{Label}}` | `J{{Prepare}}` |
|
||||
| **Parallelogram** | `id[/Label/]` | `K[/Input/]` |
|
||||
| **Parallelogram Alt** | `id[\Label\]` | `L[\Output\]` |
|
||||
| **Trapezoid** | `id[/Label\]` | `M[/Trap/]` |
|
||||
| **Trapezoid Alt** | `id[\Label/]` | `N[\TrapAlt/]` |
|
||||
|
||||
## 2. Quoting Rules (Critical)
|
||||
|
||||
### Why Quote?
|
||||
Quoting node labels is **highly recommended** and sometimes **mandatory** to prevent syntax errors.
|
||||
|
||||
### Mandatory Quoting Scenarios
|
||||
You **MUST** enclose labels in double quotes `"` if they contain:
|
||||
1. **Special Characters**: `()`, `[]`, `{}`, `;`, `"`, etc.
|
||||
2. **Keywords**: Words like `end`, `subgraph`, etc., if used in specific contexts.
|
||||
3. **Unicode/Emoji**: While often supported without quotes, quoting ensures consistent rendering across different environments.
|
||||
4. **Markdown**: If you want to use Markdown formatting (bold, italic) inside a label.
|
||||
|
||||
### Best Practice: Always Quote
|
||||
To ensure robustness, especially when processing LLM-generated content which may contain unpredictable characters, **always enclosing labels in double quotes is the safest strategy**.
|
||||
|
||||
**Examples:**
|
||||
* ❌ Risky: `id(Start: 15:00)` (Colon might be interpreted as style separator)
|
||||
* ✅ Safe: `id("Start: 15:00")`
|
||||
* ❌ Broken: `id(Func(x))` (Nested parentheses break parsing)
|
||||
* ✅ Safe: `id("Func(x)")`
|
||||
|
||||
## 3. Escape Characters
|
||||
|
||||
Inside a quoted string:
|
||||
* Double quotes `"` must be escaped as `\"`.
|
||||
* HTML entities (e.g., `#35;` for `#`) can be used.
|
||||
|
||||
## 4. Plugin Logic Verification
|
||||
|
||||
The `markdown_normalizer` plugin implements the following logic:
|
||||
|
||||
1. **Detection**: Identifies Mermaid node definitions using a comprehensive regex covering all shapes above.
|
||||
2. **Normalization**:
|
||||
* Checks if the label is already quoted.
|
||||
* If **NOT quoted**, it wraps the label in double quotes `""`.
|
||||
* Escapes any existing double quotes inside the label (`"` -> `\"`).
|
||||
3. **Shape Preservation**: The regex captures the specific opening and closing delimiters (e.g., `((` and `))`) to ensure the node shape is strictly preserved during normalization.
|
||||
|
||||
**Conclusion**: The plugin's behavior of automatically adding quotes to unquoted labels is **fully aligned with Mermaid's official best practices** for robustness and error prevention.
|
||||
111
docs/plugins/actions/deep-dive.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# Deep Dive
|
||||
|
||||
<span class="category-badge action">Action</span>
|
||||
<span class="version-badge">v1.0.0</span>
|
||||
|
||||
A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The Deep Dive plugin transforms how you understand complex content by guiding you through a structured thinking process. Rather than just summarizing, it deconstructs content across four phases:
|
||||
|
||||
- **🔍 The Context (What?)**: Panoramic view of the situation and background
|
||||
- **🧠 The Logic (Why?)**: Deconstruction of reasoning and mental models
|
||||
- **💎 The Insight (So What?)**: Non-obvious value and hidden implications
|
||||
- **🚀 The Path (Now What?)**: Specific, prioritized strategic actions
|
||||
|
||||
## Features
|
||||
|
||||
- :material-brain: **Thinking Chain**: Complete structured analysis process
|
||||
- :material-eye: **Deep Understanding**: Reveals hidden assumptions and blind spots
|
||||
- :material-lightbulb-on: **Insight Extraction**: Finds the "Aha!" moments
|
||||
- :material-rocket-launch: **Action Oriented**: Translates understanding into actionable steps
|
||||
- :material-theme-light-dark: **Theme Adaptive**: Auto-adapts to OpenWebUI light/dark theme
|
||||
- :material-translate: **Multi-language**: Outputs in user's preferred language
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
1. Download the plugin file: [`deep_dive.py`](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/deep-dive)
|
||||
2. Upload to OpenWebUI: **Admin Panel** → **Settings** → **Functions**
|
||||
3. Enable the plugin
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
|
||||
1. Provide any long text, article, or meeting notes in the chat
|
||||
2. Click the **Deep Dive** button in the message action bar
|
||||
3. Follow the visual timeline from Context → Logic → Insight → Path
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `SHOW_STATUS` | boolean | `true` | Show status updates during processing |
|
||||
| `MODEL_ID` | string | `""` | LLM model for analysis (empty = current model) |
|
||||
| `MIN_TEXT_LENGTH` | integer | `200` | Minimum text length for analysis |
|
||||
| `CLEAR_PREVIOUS_HTML` | boolean | `true` | Clear previous plugin results |
|
||||
| `MESSAGE_COUNT` | integer | `1` | Number of recent messages to analyze |
|
||||
|
||||
---
|
||||
|
||||
## Theme Support
|
||||
|
||||
Deep Dive automatically adapts to OpenWebUI's light/dark theme:
|
||||
|
||||
- Detects theme from parent document `<meta name="theme-color">` tag
|
||||
- Falls back to `html/body` class or `data-theme` attribute
|
||||
- Uses system preference `prefers-color-scheme: dark` as last resort
|
||||
|
||||
!!! tip "For Best Results"
|
||||
Enable **iframe Sandbox Allow Same Origin** in OpenWebUI:
|
||||
**Settings** → **Interface** → **Artifacts** → Check **iframe Sandbox Allow Same Origin**
|
||||
|
||||
---
|
||||
|
||||
## Example Output
|
||||
|
||||
The plugin generates a beautiful structured timeline:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ 🌊 Deep Dive Analysis │
|
||||
│ 👤 User 📅 Date 📊 Word count │
|
||||
├─────────────────────────────────────┤
|
||||
│ 🔍 Phase 01: The Context │
|
||||
│ [High-level panoramic view] │
|
||||
│ │
|
||||
│ 🧠 Phase 02: The Logic │
|
||||
│ • Reasoning structure... │
|
||||
│ • Hidden assumptions... │
|
||||
│ │
|
||||
│ 💎 Phase 03: The Insight │
|
||||
│ • Non-obvious value... │
|
||||
│ • Blind spots revealed... │
|
||||
│ │
|
||||
│ 🚀 Phase 04: The Path │
|
||||
│ ▸ Priority Action 1... │
|
||||
│ ▸ Priority Action 2... │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
!!! note "Prerequisites"
|
||||
- OpenWebUI v0.3.0 or later
|
||||
- Uses the active LLM model for analysis
|
||||
- Requires `markdown` Python package
|
||||
|
||||
---
|
||||
|
||||
## Source Code
|
||||
|
||||
[:fontawesome-brands-github: View on GitHub](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/deep-dive){ .md-button }
|
||||
111
docs/plugins/actions/deep-dive.zh.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# 精读 (Deep Dive)
|
||||
|
||||
<span class="category-badge action">Action</span>
|
||||
<span class="version-badge">v1.0.0</span>
|
||||
|
||||
全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。
|
||||
|
||||
---
|
||||
|
||||
## 概述
|
||||
|
||||
精读插件改变了您理解复杂内容的方式,通过结构化的思维过程引导您进行深度分析。它不仅仅是摘要,而是从四个阶段解构内容:
|
||||
|
||||
- **🔍 全景 (The Context)**: 情境与背景的高层级全景视图
|
||||
- **🧠 脉络 (The Logic)**: 解构底层推理逻辑与思维模型
|
||||
- **💎 洞察 (The Insight)**: 提取非显性价值与隐藏含义
|
||||
- **🚀 路径 (The Path)**: 具体的、按优先级排列的战略行动
|
||||
|
||||
## 功能特性
|
||||
|
||||
- :material-brain: **思维链**: 完整的结构化分析过程
|
||||
- :material-eye: **深度理解**: 揭示隐藏的假设和思维盲点
|
||||
- :material-lightbulb-on: **洞察提取**: 发现"原来如此"的时刻
|
||||
- :material-rocket-launch: **行动导向**: 将深度理解转化为可执行步骤
|
||||
- :material-theme-light-dark: **主题自适应**: 自动适配 OpenWebUI 深色/浅色主题
|
||||
- :material-translate: **多语言**: 以用户偏好语言输出
|
||||
|
||||
---
|
||||
|
||||
## 安装
|
||||
|
||||
1. 下载插件文件: [`deep_dive_cn.py`](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/deep-dive)
|
||||
2. 上传到 OpenWebUI: **管理面板** → **设置** → **Functions**
|
||||
3. 启用插件
|
||||
|
||||
---
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 在聊天中提供任何长文本、文章或会议记录
|
||||
2. 点击消息操作栏中的 **精读** 按钮
|
||||
3. 沿着视觉时间轴从"全景"探索到"路径"
|
||||
|
||||
---
|
||||
|
||||
## 配置参数
|
||||
|
||||
| 选项 | 类型 | 默认值 | 描述 |
|
||||
|------|------|--------|------|
|
||||
| `SHOW_STATUS` | boolean | `true` | 处理过程中是否显示状态更新 |
|
||||
| `MODEL_ID` | string | `""` | 用于分析的 LLM 模型(空 = 当前模型) |
|
||||
| `MIN_TEXT_LENGTH` | integer | `200` | 分析所需的最小文本长度 |
|
||||
| `CLEAR_PREVIOUS_HTML` | boolean | `true` | 是否清除之前的插件结果 |
|
||||
| `MESSAGE_COUNT` | integer | `1` | 要分析的最近消息数量 |
|
||||
|
||||
---
|
||||
|
||||
## 主题支持
|
||||
|
||||
精读插件自动适配 OpenWebUI 的深色/浅色主题:
|
||||
|
||||
- 从父文档 `<meta name="theme-color">` 标签检测主题
|
||||
- 回退到 `html/body` 的 class 或 `data-theme` 属性
|
||||
- 最后使用系统偏好 `prefers-color-scheme: dark`
|
||||
|
||||
!!! tip "最佳效果"
|
||||
请在 OpenWebUI 中启用 **iframe Sandbox Allow Same Origin**:
|
||||
**设置** → **界面** → **Artifacts** → 勾选 **iframe Sandbox Allow Same Origin**
|
||||
|
||||
---
|
||||
|
||||
## 输出示例
|
||||
|
||||
插件生成精美的结构化时间轴:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ 📖 精读分析报告 │
|
||||
│ 👤 用户 📅 日期 📊 字数 │
|
||||
├─────────────────────────────────────┤
|
||||
│ 🔍 阶段 01: 全景 (The Context) │
|
||||
│ [高层级全景视图内容] │
|
||||
│ │
|
||||
│ 🧠 阶段 02: 脉络 (The Logic) │
|
||||
│ • 推理结构分析... │
|
||||
│ • 隐藏假设识别... │
|
||||
│ │
|
||||
│ 💎 阶段 03: 洞察 (The Insight) │
|
||||
│ • 非显性价值提取... │
|
||||
│ • 思维盲点揭示... │
|
||||
│ │
|
||||
│ 🚀 阶段 04: 路径 (The Path) │
|
||||
│ ▸ 优先级行动 1... │
|
||||
│ ▸ 优先级行动 2... │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 系统要求
|
||||
|
||||
!!! note "前提条件"
|
||||
- OpenWebUI v0.3.0 或更高版本
|
||||
- 使用当前活跃的 LLM 模型进行分析
|
||||
- 需要 `markdown` Python 包
|
||||
|
||||
---
|
||||
|
||||
## 源代码
|
||||
|
||||
[:fontawesome-brands-github: 在 GitHub 上查看](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/deep-dive){ .md-button }
|
||||
@@ -67,15 +67,15 @@ Actions are interactive plugins that:
|
||||
|
||||
[:octicons-arrow-right-24: Documentation](export-to-word.md)
|
||||
|
||||
- :material-text-box-search:{ .lg .middle } **Summary**
|
||||
- :material-brain:{ .lg .middle } **Deep Dive**
|
||||
|
||||
---
|
||||
|
||||
Generate concise summaries of long text content with key points extraction.
|
||||
A comprehensive thinking lens that dives deep into any content - Context → Logic → Insight → Path. Supports theme auto-adaptation.
|
||||
|
||||
**Version:** 0.1.0
|
||||
**Version:** 1.0.0
|
||||
|
||||
[:octicons-arrow-right-24: Documentation](summary.md)
|
||||
[:octicons-arrow-right-24: Documentation](deep-dive.md)
|
||||
|
||||
- :material-image-text:{ .lg .middle } **Infographic to Markdown**
|
||||
|
||||
|
||||
@@ -67,15 +67,15 @@ Actions 是交互式插件,能够:
|
||||
|
||||
[:octicons-arrow-right-24: 查看文档](export-to-word.md)
|
||||
|
||||
- :material-text-box-search:{ .lg .middle } **Summary**
|
||||
- :material-brain:{ .lg .middle } **精读 (Deep Dive)**
|
||||
|
||||
---
|
||||
|
||||
对长文本进行精简总结,提取要点。
|
||||
全方位的思维透镜 —— 全景 → 脉络 → 洞察 → 路径。支持主题自适应。
|
||||
|
||||
**版本:** 0.1.0
|
||||
**版本:** 1.0.0
|
||||
|
||||
[:octicons-arrow-right-24: 查看文档](summary.md)
|
||||
[:octicons-arrow-right-24: 查看文档](deep-dive.zh.md)
|
||||
|
||||
- :material-image-text:{ .lg .middle } **信息图转 Markdown**
|
||||
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
# Summary
|
||||
|
||||
<span class="category-badge action">Action</span>
|
||||
<span class="version-badge">v0.1.0</span>
|
||||
|
||||
Generate concise summaries of long text content with key points extraction.
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The Summary plugin helps you quickly understand long pieces of text by generating concise summaries with extracted key points. It's perfect for:
|
||||
|
||||
- Summarizing long articles or documents
|
||||
- Extracting key points from conversations
|
||||
- Creating quick overviews of complex topics
|
||||
|
||||
## Features
|
||||
|
||||
- :material-text-box-search: **Smart Summarization**: AI-powered content analysis
|
||||
- :material-format-list-bulleted: **Key Points**: Extracted important highlights
|
||||
- :material-content-copy: **Easy Copy**: One-click copying of summaries
|
||||
- :material-tune: **Adjustable Length**: Control summary detail level
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
1. Download the plugin file: [`summary.py`](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/summary)
|
||||
2. Upload to OpenWebUI: **Admin Panel** → **Settings** → **Functions**
|
||||
3. Enable the plugin
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
|
||||
1. Get a long response from the AI or paste long text
|
||||
2. Click the **Summary** button in the message action bar
|
||||
3. View the generated summary with key points
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `summary_length` | string | `"medium"` | Length of summary (short/medium/long) |
|
||||
| `include_key_points` | boolean | `true` | Extract and list key points |
|
||||
| `language` | string | `"auto"` | Output language |
|
||||
|
||||
---
|
||||
|
||||
## Example Output
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
|
||||
This document discusses the implementation of a new feature
|
||||
for the application, focusing on user experience improvements
|
||||
and performance optimizations.
|
||||
|
||||
### Key Points
|
||||
|
||||
- ✅ New user interface design improves accessibility
|
||||
- ✅ Backend optimizations reduce load times by 40%
|
||||
- ✅ Mobile responsiveness enhanced
|
||||
- ✅ Integration with third-party services simplified
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
!!! note "Prerequisites"
|
||||
- OpenWebUI v0.3.0 or later
|
||||
- Uses the active LLM model for summarization
|
||||
|
||||
---
|
||||
|
||||
## Source Code
|
||||
|
||||
[:fontawesome-brands-github: View on GitHub](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/summary){ .md-button }
|
||||
@@ -1,82 +0,0 @@
|
||||
# Summary(摘要)
|
||||
|
||||
<span class="category-badge action">Action</span>
|
||||
<span class="version-badge">v0.1.0</span>
|
||||
|
||||
为长文本生成简洁摘要,并提取关键要点。
|
||||
|
||||
---
|
||||
|
||||
## 概览
|
||||
|
||||
Summary 插件可以快速理解长文本,生成精炼摘要并列出关键点,适合:
|
||||
|
||||
- 总结长文章或文档
|
||||
- 从对话中提炼要点
|
||||
- 为复杂主题制作快速概览
|
||||
|
||||
## 功能特性
|
||||
|
||||
- :material-text-box-search: **智能摘要**:AI 驱动的内容分析
|
||||
- :material-format-list-bulleted: **关键点**:提取重要信息
|
||||
- :material-content-copy: **便捷复制**:一键复制摘要
|
||||
- :material-tune: **长度可调**:可选择摘要详略程度
|
||||
|
||||
---
|
||||
|
||||
## 安装
|
||||
|
||||
1. 下载插件文件:[`summary.py`](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/summary)
|
||||
2. 上传到 OpenWebUI:**Admin Panel** → **Settings** → **Functions**
|
||||
3. 启用插件
|
||||
|
||||
---
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 获取一段较长的 AI 回复或粘贴长文本
|
||||
2. 点击消息操作栏的 **Summary** 按钮
|
||||
3. 查看生成的摘要与关键点
|
||||
|
||||
---
|
||||
|
||||
## 配置项
|
||||
|
||||
| 选项 | 类型 | 默认值 | 说明 |
|
||||
|--------|------|---------|-------------|
|
||||
| `summary_length` | string | `"medium"` | 摘要长度(short/medium/long) |
|
||||
| `include_key_points` | boolean | `true` | 是否提取并列出关键点 |
|
||||
| `language` | string | `"auto"` | 输出语言 |
|
||||
|
||||
---
|
||||
|
||||
## 输出示例
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
|
||||
This document discusses the implementation of a new feature
|
||||
for the application, focusing on user experience improvements
|
||||
and performance optimizations.
|
||||
|
||||
### Key Points
|
||||
|
||||
- ✅ New user interface design improves accessibility
|
||||
- ✅ Backend optimizations reduce load times by 40%
|
||||
- ✅ Mobile responsiveness enhanced
|
||||
- ✅ Integration with third-party services simplified
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 运行要求
|
||||
|
||||
!!! note "前置条件"
|
||||
- OpenWebUI v0.3.0 及以上
|
||||
- 使用当前会话的 LLM 模型进行摘要
|
||||
|
||||
---
|
||||
|
||||
## 源码
|
||||
|
||||
[:fontawesome-brands-github: 在 GitHub 查看](https://github.com/Fu-Jie/awesome-openwebui/tree/main/plugins/actions/summary){ .md-button }
|
||||
83
plugins/actions/deep-dive/README.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# 🌊 Deep Dive
|
||||
|
||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie) | **Version:** 1.0.0 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
||||
|
||||
A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.
|
||||
|
||||
## 🔥 What's New in v1.0.0
|
||||
|
||||
- ✨ **Thinking Chain Structure**: Moves from surface understanding to deep strategic action.
|
||||
- 🔍 **Phase 01: The Context**: Panoramic view of the situation and background.
|
||||
- 🧠 **Phase 02: The Logic**: Deconstruction of the underlying reasoning and mental models.
|
||||
- 💎 **Phase 03: The Insight**: Extraction of non-obvious value and hidden implications.
|
||||
- 🚀 **Phase 04: The Path**: Definition of specific, prioritized strategic directions.
|
||||
- 🎨 **Premium UI**: Modern, process-oriented design with a "Thinking Line" timeline.
|
||||
- 🌗 **Theme Adaptive**: Automatically adapts to OpenWebUI's light/dark theme.
|
||||
|
||||
## ✨ Key Features
|
||||
|
||||
- 🌊 **Deep Thinking**: Not just a summary, but a full deconstruction of content.
|
||||
- 🧠 **Logical Analysis**: Reveals how arguments are built and identifies hidden assumptions.
|
||||
- 💎 **Value Extraction**: Finds the "Aha!" moments and blind spots.
|
||||
- 🚀 **Action Oriented**: Translates deep understanding into immediate, actionable steps.
|
||||
- 🌍 **Multi-language**: Automatically adapts to the user's preferred language.
|
||||
- 🌗 **Theme Support**: Seamlessly switches between light and dark themes based on OpenWebUI settings.
|
||||
|
||||
## 🚀 How to Use
|
||||
|
||||
1. **Input Content**: Provide any text, article, or meeting notes in the chat.
|
||||
2. **Trigger Deep Dive**: Click the **Deep Dive** action button.
|
||||
3. **Explore the Chain**: Follow the visual timeline from Context to Path.
|
||||
|
||||
## ⚙️ Configuration (Valves)
|
||||
|
||||
| Parameter | Default | Description |
|
||||
| :--- | :--- | :--- |
|
||||
| **Show Status (SHOW_STATUS)** | `True` | Whether to show status updates during the thinking process. |
|
||||
| **Model ID (MODEL_ID)** | `Empty` | LLM model for analysis. Empty = use current model. |
|
||||
| **Min Text Length (MIN_TEXT_LENGTH)** | `200` | Minimum characters required for a meaningful deep dive. |
|
||||
| **Clear Previous HTML (CLEAR_PREVIOUS_HTML)** | `True` | Whether to clear previous plugin results. |
|
||||
| **Message Count (MESSAGE_COUNT)** | `1` | Number of recent messages to analyze. |
|
||||
|
||||
## 🌗 Theme Support
|
||||
|
||||
The plugin automatically detects and adapts to OpenWebUI's theme settings:
|
||||
|
||||
- **Detection Priority**:
|
||||
1. Parent document `<meta name="theme-color">` tag
|
||||
2. Parent document `html/body` class or `data-theme` attribute
|
||||
3. System preference via `prefers-color-scheme: dark`
|
||||
|
||||
- **Requirements**: For best results, enable **iframe Sandbox Allow Same Origin** in OpenWebUI:
|
||||
- Go to **Settings** → **Interface** → **Artifacts** → Check **iframe Sandbox Allow Same Origin**
|
||||
|
||||
## 🎨 Visual Preview
|
||||
|
||||
The plugin generates a structured thinking timeline:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ 🌊 Deep Dive Analysis │
|
||||
│ 👤 User 📅 Date 📊 Word count │
|
||||
├─────────────────────────────────────┤
|
||||
│ 🔍 Phase 01: The Context │
|
||||
│ [High-level panoramic view] │
|
||||
│ │
|
||||
│ 🧠 Phase 02: The Logic │
|
||||
│ • Reasoning structure... │
|
||||
│ • Hidden assumptions... │
|
||||
│ │
|
||||
│ 💎 Phase 03: The Insight │
|
||||
│ • Non-obvious value... │
|
||||
│ • Blind spots revealed... │
|
||||
│ │
|
||||
│ 🚀 Phase 04: The Path │
|
||||
│ ▸ Priority Action 1... │
|
||||
│ ▸ Priority Action 2... │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## 📂 Files
|
||||
|
||||
- `deep_dive.py` - English version
|
||||
- `deep_dive_cn.py` - Chinese version (精读)
|
||||
83
plugins/actions/deep-dive/README_CN.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# 📖 精读
|
||||
|
||||
**作者:** [Fu-Jie](https://github.com/Fu-Jie) | **版本:** 1.0.0 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
||||
|
||||
全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。
|
||||
|
||||
## 🔥 v1.0.0 更新内容
|
||||
|
||||
- ✨ **思维链结构**: 从表面理解一步步深入到战略行动。
|
||||
- 🔍 **阶段 01: 全景 (The Context)**: 提供情境与背景的高层级全景视图。
|
||||
- 🧠 **阶段 02: 脉络 (The Logic)**: 解构底层推理逻辑与思维模型。
|
||||
- 💎 **阶段 03: 洞察 (The Insight)**: 提取非显性价值与隐藏的深层含义。
|
||||
- 🚀 **阶段 04: 路径 (The Path)**: 定义具体的、按优先级排列的战略方向。
|
||||
- 🎨 **高端 UI**: 现代化的过程导向设计,带有"思维导火索"时间轴。
|
||||
- 🌗 **主题自适应**: 自动适配 OpenWebUI 的深色/浅色主题。
|
||||
|
||||
## ✨ 核心特性
|
||||
|
||||
- 📖 **深度思考**: 不仅仅是摘要,而是对内容的全面解构。
|
||||
- 🧠 **逻辑分析**: 揭示论点是如何构建的,识别隐藏的假设。
|
||||
- 💎 **价值提取**: 发现"原来如此"的时刻与思维盲点。
|
||||
- 🚀 **行动导向**: 将深度理解转化为立即、可执行的步骤。
|
||||
- 🌍 **多语言支持**: 自动适配用户的偏好语言。
|
||||
- 🌗 **主题支持**: 根据 OpenWebUI 设置自动切换深色/浅色主题。
|
||||
|
||||
## 🚀 如何使用
|
||||
|
||||
1. **输入内容**: 在聊天中提供任何文本、文章或会议记录。
|
||||
2. **触发精读**: 点击 **精读** 操作按钮。
|
||||
3. **探索思维链**: 沿着视觉时间轴从"全景"探索到"路径"。
|
||||
|
||||
## ⚙️ 配置参数 (Valves)
|
||||
|
||||
| 参数 | 默认值 | 描述 |
|
||||
| :--- | :--- | :--- |
|
||||
| **显示状态 (SHOW_STATUS)** | `True` | 是否在思维过程中显示状态更新。 |
|
||||
| **模型 ID (MODEL_ID)** | `空` | 用于分析的 LLM 模型。留空 = 使用当前模型。 |
|
||||
| **最小文本长度 (MIN_TEXT_LENGTH)** | `200` | 进行有意义的精读所需的最小字符数。 |
|
||||
| **清除旧 HTML (CLEAR_PREVIOUS_HTML)** | `True` | 是否清除之前的插件结果。 |
|
||||
| **消息数量 (MESSAGE_COUNT)** | `1` | 要分析的最近消息数量。 |
|
||||
|
||||
## 🌗 主题支持
|
||||
|
||||
插件会自动检测并适配 OpenWebUI 的主题设置:
|
||||
|
||||
- **检测优先级**:
|
||||
1. 父文档 `<meta name="theme-color">` 标签
|
||||
2. 父文档 `html/body` 的 class 或 `data-theme` 属性
|
||||
3. 系统偏好 `prefers-color-scheme: dark`
|
||||
|
||||
- **环境要求**: 为获得最佳效果,请在 OpenWebUI 中启用 **iframe Sandbox Allow Same Origin**:
|
||||
- 进入 **设置** → **界面** → **Artifacts** → 勾选 **iframe Sandbox Allow Same Origin**
|
||||
|
||||
## 🎨 视觉预览
|
||||
|
||||
插件生成结构化的思维时间轴:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ 📖 精读分析报告 │
|
||||
│ 👤 用户 📅 日期 📊 字数 │
|
||||
├─────────────────────────────────────┤
|
||||
│ 🔍 阶段 01: 全景 (The Context) │
|
||||
│ [高层级全景视图内容] │
|
||||
│ │
|
||||
│ 🧠 阶段 02: 脉络 (The Logic) │
|
||||
│ • 推理结构分析... │
|
||||
│ • 隐藏假设识别... │
|
||||
│ │
|
||||
│ 💎 阶段 03: 洞察 (The Insight) │
|
||||
│ • 非显性价值提取... │
|
||||
│ • 思维盲点揭示... │
|
||||
│ │
|
||||
│ 🚀 阶段 04: 路径 (The Path) │
|
||||
│ ▸ 优先级行动 1... │
|
||||
│ ▸ 优先级行动 2... │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## 📂 文件说明
|
||||
|
||||
- `deep_dive.py` - 英文版 (Deep Dive)
|
||||
- `deep_dive_cn.py` - 中文版 (精读)
|
||||
BIN
plugins/actions/deep-dive/deep_dive.png
Normal file
|
After Width: | Height: | Size: 783 KiB |
884
plugins/actions/deep-dive/deep_dive.py
Normal file
@@ -0,0 +1,884 @@
|
||||
"""
|
||||
title: Deep Dive
|
||||
author: Fu-Jie
|
||||
author_url: https://github.com/Fu-Jie
|
||||
funding_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||
version: 1.0.0
|
||||
icon_url: data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9ImN1cnJlbnRDb2xvciIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiPjxwYXRoIGQ9Ik0xMiA3djE0Ii8+PHBhdGggZD0iTTMgMThhMSAxIDAgMCAxLTEtMVY0YTEgMSAwIDAgMSAxLTFoNWE0IDQgMCAwIDEgNCA0IDQgNCAwIDAgMSA0LTRoNWExIDEgMCAwIDEgMSAxdjEzYTEgMSAwIDAgMS0xIDFoLTZhMyAzIDAgMCAwLTMgMyAzIDMgMCAwIDAtMy0zeiIvPjxwYXRoIGQ9Ik02IDEyaDIiLz48cGF0aCBkPSJNMTYgMTJoMiIvPjwvc3ZnPg==
|
||||
requirements: markdown
|
||||
description: A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.
|
||||
"""
|
||||
|
||||
# Standard library imports
|
||||
import re
|
||||
import logging
|
||||
from typing import Optional, Dict, Any, Callable, Awaitable
|
||||
from datetime import datetime
|
||||
|
||||
# Third-party imports
|
||||
from pydantic import BaseModel, Field
|
||||
from fastapi import Request
|
||||
import markdown
|
||||
|
||||
# OpenWebUI imports
|
||||
from open_webui.utils.chat import generate_chat_completion
|
||||
from open_webui.models.users import Users
|
||||
|
||||
# Logging setup
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =================================================================
|
||||
# HTML Template - Process-Oriented Design with Theme Support
|
||||
# =================================================================
|
||||
HTML_WRAPPER_TEMPLATE = """
|
||||
<!-- OPENWEBUI_PLUGIN_OUTPUT -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="{user_language}">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
:root {
|
||||
--dd-bg-primary: #ffffff;
|
||||
--dd-bg-secondary: #f8fafc;
|
||||
--dd-bg-tertiary: #f1f5f9;
|
||||
--dd-text-primary: #0f172a;
|
||||
--dd-text-secondary: #334155;
|
||||
--dd-text-dim: #64748b;
|
||||
--dd-border: #e2e8f0;
|
||||
--dd-accent: #3b82f6;
|
||||
--dd-accent-soft: #eff6ff;
|
||||
--dd-header-gradient: linear-gradient(135deg, #1e293b 0%, #0f172a 100%);
|
||||
--dd-shadow: 0 10px 40px rgba(0,0,0,0.06);
|
||||
--dd-code-bg: #f1f5f9;
|
||||
}
|
||||
.theme-dark {
|
||||
--dd-bg-primary: #1e293b;
|
||||
--dd-bg-secondary: #0f172a;
|
||||
--dd-bg-tertiary: #334155;
|
||||
--dd-text-primary: #f1f5f9;
|
||||
--dd-text-secondary: #e2e8f0;
|
||||
--dd-text-dim: #94a3b8;
|
||||
--dd-border: #475569;
|
||||
--dd-accent: #60a5fa;
|
||||
--dd-accent-soft: rgba(59, 130, 246, 0.15);
|
||||
--dd-header-gradient: linear-gradient(135deg, #0f172a 0%, #1e1e2e 100%);
|
||||
--dd-shadow: 0 10px 40px rgba(0,0,0,0.3);
|
||||
--dd-code-bg: #334155;
|
||||
}
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 10px;
|
||||
background-color: transparent;
|
||||
}
|
||||
#main-container {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 24px;
|
||||
width: 100%;
|
||||
max-width: 900px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
.plugin-item {
|
||||
background: var(--dd-bg-primary);
|
||||
border-radius: 24px;
|
||||
box-shadow: var(--dd-shadow);
|
||||
overflow: hidden;
|
||||
border: 1px solid var(--dd-border);
|
||||
}
|
||||
/* STYLES_INSERTION_POINT */
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="main-container">
|
||||
<!-- CONTENT_INSERTION_POINT -->
|
||||
</div>
|
||||
<!-- SCRIPTS_INSERTION_POINT -->
|
||||
<script>
|
||||
(function() {
|
||||
const parseColorLuma = (colorStr) => {
|
||||
if (!colorStr) return null;
|
||||
let m = colorStr.match(/^#?([0-9a-f]{6})$/i);
|
||||
if (m) {
|
||||
const hex = m[1];
|
||||
const r = parseInt(hex.slice(0, 2), 16);
|
||||
const g = parseInt(hex.slice(2, 4), 16);
|
||||
const b = parseInt(hex.slice(4, 6), 16);
|
||||
return (0.2126 * r + 0.7152 * g + 0.0722 * b) / 255;
|
||||
}
|
||||
m = colorStr.match(/rgba?\\s*\\(\\s*(\\d+)\\s*,\\s*(\\d+)\\s*,\\s*(\\d+)/i);
|
||||
if (m) {
|
||||
const r = parseInt(m[1], 10);
|
||||
const g = parseInt(m[2], 10);
|
||||
const b = parseInt(m[3], 10);
|
||||
return (0.2126 * r + 0.7152 * g + 0.0722 * b) / 255;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
const getThemeFromMeta = (doc) => {
|
||||
const metas = Array.from((doc || document).querySelectorAll('meta[name="theme-color"]'));
|
||||
if (!metas.length) return null;
|
||||
const color = metas[metas.length - 1].content.trim();
|
||||
const luma = parseColorLuma(color);
|
||||
if (luma === null) return null;
|
||||
return luma < 0.5 ? 'dark' : 'light';
|
||||
};
|
||||
const getParentDocumentSafe = () => {
|
||||
try {
|
||||
if (!window.parent || window.parent === window) return null;
|
||||
const pDoc = window.parent.document;
|
||||
void pDoc.title;
|
||||
return pDoc;
|
||||
} catch (err) { return null; }
|
||||
};
|
||||
const getThemeFromParentClass = () => {
|
||||
try {
|
||||
if (!window.parent || window.parent === window) return null;
|
||||
const pDoc = window.parent.document;
|
||||
const html = pDoc.documentElement;
|
||||
const body = pDoc.body;
|
||||
const htmlClass = html ? html.className : '';
|
||||
const bodyClass = body ? body.className : '';
|
||||
const htmlDataTheme = html ? html.getAttribute('data-theme') : '';
|
||||
if (htmlDataTheme === 'dark' || bodyClass.includes('dark') || htmlClass.includes('dark')) return 'dark';
|
||||
if (htmlDataTheme === 'light' || bodyClass.includes('light') || htmlClass.includes('light')) return 'light';
|
||||
return null;
|
||||
} catch (err) { return null; }
|
||||
};
|
||||
const setTheme = () => {
|
||||
const parentDoc = getParentDocumentSafe();
|
||||
const metaTheme = parentDoc ? getThemeFromMeta(parentDoc) : null;
|
||||
const parentClassTheme = getThemeFromParentClass();
|
||||
const prefersDark = window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches;
|
||||
const chosen = metaTheme || parentClassTheme || (prefersDark ? 'dark' : 'light');
|
||||
document.documentElement.classList.toggle('theme-dark', chosen === 'dark');
|
||||
};
|
||||
setTheme();
|
||||
if (window.matchMedia) {
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', setTheme);
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# LLM Prompts - Deep Dive Thinking Chain
|
||||
# =================================================================
|
||||
|
||||
SYSTEM_PROMPT = """
|
||||
You are a Deep Dive Analyst. Your goal is to guide the user through a comprehensive thinking process, moving from surface understanding to deep strategic action.
|
||||
|
||||
## Thinking Structure (STRICT)
|
||||
|
||||
You MUST analyze the input across these four specific dimensions:
|
||||
|
||||
### 1. 🔍 The Context (What?)
|
||||
Provide a high-level panoramic view. What is this content about? What is the core situation, background, or problem being addressed? (2-3 paragraphs)
|
||||
|
||||
### 2. 🧠 The Logic (Why?)
|
||||
Deconstruct the underlying structure. How is the argument built? What is the reasoning, the hidden assumptions, or the mental models at play? (Bullet points)
|
||||
|
||||
### 3. 💎 The Insight (So What?)
|
||||
Extract the non-obvious value. What are the "Aha!" moments? What are the implications, the blind spots, or the unique perspectives revealed? (Bullet points)
|
||||
|
||||
### 4. 🚀 The Path (Now What?)
|
||||
Define the strategic direction. What are the specific, prioritized next steps? How can this knowledge be applied immediately? (Actionable steps)
|
||||
|
||||
## Rules
|
||||
- Output in the user's specified language.
|
||||
- Maintain a professional, analytical, yet inspiring tone.
|
||||
- Focus on the *process* of understanding, not just the result.
|
||||
- No greetings or meta-commentary.
|
||||
"""
|
||||
|
||||
USER_PROMPT = """
|
||||
Initiate a Deep Dive into the following content:
|
||||
|
||||
**User Context:**
|
||||
- User: {user_name}
|
||||
- Time: {current_date_time_str}
|
||||
- Language: {user_language}
|
||||
|
||||
**Content to Analyze:**
|
||||
```
|
||||
{long_text_content}
|
||||
```
|
||||
|
||||
Please execute the full thinking chain: Context → Logic → Insight → Path.
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# Premium CSS Design - Deep Dive Theme
|
||||
# =================================================================
|
||||
|
||||
CSS_TEMPLATE = """
|
||||
.deep-dive {
|
||||
font-family: 'Inter', -apple-system, system-ui, sans-serif;
|
||||
color: var(--dd-text-secondary);
|
||||
}
|
||||
|
||||
.dd-header {
|
||||
background: var(--dd-header-gradient);
|
||||
padding: 40px 32px;
|
||||
color: white;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.dd-header-badge {
|
||||
display: inline-block;
|
||||
padding: 4px 12px;
|
||||
background: rgba(255,255,255,0.1);
|
||||
border: 1px solid rgba(255,255,255,0.2);
|
||||
border-radius: 100px;
|
||||
font-size: 0.75rem;
|
||||
font-weight: 600;
|
||||
letter-spacing: 0.05em;
|
||||
text-transform: uppercase;
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
|
||||
.dd-title {
|
||||
font-size: 2rem;
|
||||
font-weight: 800;
|
||||
margin: 0 0 12px 0;
|
||||
letter-spacing: -0.02em;
|
||||
}
|
||||
|
||||
.dd-meta {
|
||||
display: flex;
|
||||
gap: 20px;
|
||||
font-size: 0.85rem;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.dd-body {
|
||||
padding: 32px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 40px;
|
||||
position: relative;
|
||||
background: var(--dd-bg-primary);
|
||||
}
|
||||
|
||||
/* The Thinking Line */
|
||||
.dd-body::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: 52px;
|
||||
top: 40px;
|
||||
bottom: 40px;
|
||||
width: 2px;
|
||||
background: var(--dd-border);
|
||||
z-index: 0;
|
||||
}
|
||||
|
||||
.dd-step {
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
display: flex;
|
||||
gap: 24px;
|
||||
}
|
||||
|
||||
.dd-step-icon {
|
||||
flex-shrink: 0;
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
background: var(--dd-bg-primary);
|
||||
border: 2px solid var(--dd-border);
|
||||
border-radius: 12px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
font-size: 1.25rem;
|
||||
box-shadow: 0 4px 12px rgba(0,0,0,0.03);
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.dd-step:hover .dd-step-icon {
|
||||
border-color: var(--dd-accent);
|
||||
transform: scale(1.1);
|
||||
}
|
||||
|
||||
.dd-step-content {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.dd-step-label {
|
||||
font-size: 0.75rem;
|
||||
font-weight: 700;
|
||||
color: var(--dd-accent);
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.1em;
|
||||
margin-bottom: 4px;
|
||||
}
|
||||
|
||||
.dd-step-title {
|
||||
font-size: 1.25rem;
|
||||
font-weight: 700;
|
||||
color: var(--dd-text-primary);
|
||||
margin: 0 0 16px 0;
|
||||
}
|
||||
|
||||
.dd-text {
|
||||
line-height: 1.7;
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
.dd-text p { margin-bottom: 16px; }
|
||||
.dd-text p:last-child { margin-bottom: 0; }
|
||||
|
||||
.dd-list {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
display: grid;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.dd-list-item {
|
||||
background: var(--dd-bg-secondary);
|
||||
padding: 16px 20px;
|
||||
border-radius: 12px;
|
||||
border-left: 4px solid var(--dd-border);
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.dd-list-item:hover {
|
||||
background: var(--dd-bg-tertiary);
|
||||
border-left-color: var(--dd-accent);
|
||||
transform: translateX(4px);
|
||||
}
|
||||
|
||||
.dd-list-item strong {
|
||||
color: var(--dd-text-primary);
|
||||
display: block;
|
||||
margin-bottom: 4px;
|
||||
}
|
||||
|
||||
.dd-path-item {
|
||||
background: var(--dd-accent-soft);
|
||||
border-left-color: var(--dd-accent);
|
||||
}
|
||||
|
||||
.dd-footer {
|
||||
padding: 24px 32px;
|
||||
background: var(--dd-bg-secondary);
|
||||
border-top: 1px solid var(--dd-border);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
font-size: 0.8rem;
|
||||
color: var(--dd-text-dim);
|
||||
}
|
||||
|
||||
.dd-tag {
|
||||
padding: 2px 8px;
|
||||
background: var(--dd-bg-tertiary);
|
||||
border-radius: 4px;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.dd-text code,
|
||||
.dd-list-item code {
|
||||
background: var(--dd-code-bg);
|
||||
color: var(--dd-text-primary);
|
||||
padding: 2px 6px;
|
||||
border-radius: 4px;
|
||||
font-family: 'SF Mono', 'Consolas', 'Monaco', monospace;
|
||||
font-size: 0.85em;
|
||||
}
|
||||
|
||||
.dd-list-item em {
|
||||
font-style: italic;
|
||||
color: var(--dd-text-dim);
|
||||
}
|
||||
"""
|
||||
|
||||
CONTENT_TEMPLATE = """
|
||||
<div class="deep-dive">
|
||||
<div class="dd-header">
|
||||
<div class="dd-header-badge">Thinking Process</div>
|
||||
<h1 class="dd-title">Deep Dive Analysis</h1>
|
||||
<div class="dd-meta">
|
||||
<span>👤 {user_name}</span>
|
||||
<span>📅 {current_date_time_str}</span>
|
||||
<span>📊 {word_count} words</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="dd-body">
|
||||
<!-- Step 1: Context -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">🔍</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 01</div>
|
||||
<h2 class="dd-step-title">The Context</h2>
|
||||
<div class="dd-text">{context_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Step 2: Logic -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">🧠</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 02</div>
|
||||
<h2 class="dd-step-title">The Logic</h2>
|
||||
<div class="dd-text">{logic_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Step 3: Insight -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">💎</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 03</div>
|
||||
<h2 class="dd-step-title">The Insight</h2>
|
||||
<div class="dd-text">{insight_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Step 4: Path -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">🚀</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 04</div>
|
||||
<h2 class="dd-step-title">The Path</h2>
|
||||
<div class="dd-text">{path_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="dd-footer">
|
||||
<span>Deep Dive Engine v1.0</span>
|
||||
<span><span class="dd-tag">AI-Powered</span></span>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
class Action:
|
||||
class Valves(BaseModel):
|
||||
SHOW_STATUS: bool = Field(
|
||||
default=True,
|
||||
description="Whether to show operation status updates.",
|
||||
)
|
||||
MODEL_ID: str = Field(
|
||||
default="",
|
||||
description="LLM Model ID for analysis. Empty = use current model.",
|
||||
)
|
||||
MIN_TEXT_LENGTH: int = Field(
|
||||
default=200,
|
||||
description="Minimum text length for deep dive (chars).",
|
||||
)
|
||||
CLEAR_PREVIOUS_HTML: bool = Field(
|
||||
default=True,
|
||||
description="Whether to clear previous plugin results.",
|
||||
)
|
||||
MESSAGE_COUNT: int = Field(
|
||||
default=1,
|
||||
description="Number of recent messages to analyze.",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _get_user_context(self, __user__: Optional[Dict[str, Any]]) -> Dict[str, str]:
|
||||
"""Safely extracts user context information."""
|
||||
if isinstance(__user__, (list, tuple)):
|
||||
user_data = __user__[0] if __user__ else {}
|
||||
elif isinstance(__user__, dict):
|
||||
user_data = __user__
|
||||
else:
|
||||
user_data = {}
|
||||
|
||||
return {
|
||||
"user_id": user_data.get("id", "unknown_user"),
|
||||
"user_name": user_data.get("name", "User"),
|
||||
"user_language": user_data.get("language", "en-US"),
|
||||
}
|
||||
|
||||
def _process_llm_output(self, llm_output: str) -> Dict[str, str]:
|
||||
"""Parse LLM output and convert to styled HTML."""
|
||||
# Extract sections using flexible regex
|
||||
context_match = re.search(
|
||||
r"###\s*1\.\s*🔍?\s*The Context\s*\((.*?)\)\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
logic_match = re.search(
|
||||
r"###\s*2\.\s*🧠?\s*The Logic\s*\((.*?)\)\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
insight_match = re.search(
|
||||
r"###\s*3\.\s*💎?\s*The Insight\s*\((.*?)\)\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
path_match = re.search(
|
||||
r"###\s*4\.\s*🚀?\s*The Path\s*\((.*?)\)\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
|
||||
# Fallback if numbering is different
|
||||
if not context_match:
|
||||
context_match = re.search(
|
||||
r"###\s*🔍?\s*The Context.*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
if not logic_match:
|
||||
logic_match = re.search(
|
||||
r"###\s*🧠?\s*The Logic.*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
if not insight_match:
|
||||
insight_match = re.search(
|
||||
r"###\s*💎?\s*The Insight.*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
if not path_match:
|
||||
path_match = re.search(
|
||||
r"###\s*🚀?\s*The Path.*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
|
||||
context_md = (
|
||||
context_match.group(1 if context_match.lastindex == 1 else 2).strip()
|
||||
if context_match
|
||||
else ""
|
||||
)
|
||||
logic_md = (
|
||||
logic_match.group(1 if logic_match.lastindex == 1 else 2).strip()
|
||||
if logic_match
|
||||
else ""
|
||||
)
|
||||
insight_md = (
|
||||
insight_match.group(1 if insight_match.lastindex == 1 else 2).strip()
|
||||
if insight_match
|
||||
else ""
|
||||
)
|
||||
path_md = (
|
||||
path_match.group(1 if path_match.lastindex == 1 else 2).strip()
|
||||
if path_match
|
||||
else ""
|
||||
)
|
||||
|
||||
if not any([context_md, logic_md, insight_md, path_md]):
|
||||
context_md = llm_output.strip()
|
||||
logger.warning("LLM output did not follow format. Using as context.")
|
||||
|
||||
md_extensions = ["nl2br"]
|
||||
|
||||
context_html = (
|
||||
markdown.markdown(context_md, extensions=md_extensions)
|
||||
if context_md
|
||||
else '<p class="dd-no-content">No context extracted.</p>'
|
||||
)
|
||||
logic_html = (
|
||||
self._process_list_items(logic_md, "logic")
|
||||
if logic_md
|
||||
else '<p class="dd-no-content">No logic deconstructed.</p>'
|
||||
)
|
||||
insight_html = (
|
||||
self._process_list_items(insight_md, "insight")
|
||||
if insight_md
|
||||
else '<p class="dd-no-content">No insights found.</p>'
|
||||
)
|
||||
path_html = (
|
||||
self._process_list_items(path_md, "path")
|
||||
if path_md
|
||||
else '<p class="dd-no-content">No path defined.</p>'
|
||||
)
|
||||
|
||||
return {
|
||||
"context_html": context_html,
|
||||
"logic_html": logic_html,
|
||||
"insight_html": insight_html,
|
||||
"path_html": path_html,
|
||||
}
|
||||
|
||||
def _process_list_items(self, md_content: str, section_type: str) -> str:
|
||||
"""Convert markdown list to styled HTML cards with full markdown support."""
|
||||
lines = md_content.strip().split("\n")
|
||||
items = []
|
||||
current_paragraph = []
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
|
||||
# Check for list item (bullet or numbered)
|
||||
bullet_match = re.match(r"^[-*]\s+(.+)$", line)
|
||||
numbered_match = re.match(r"^\d+\.\s+(.+)$", line)
|
||||
|
||||
if bullet_match or numbered_match:
|
||||
# Flush any accumulated paragraph
|
||||
if current_paragraph:
|
||||
para_text = " ".join(current_paragraph)
|
||||
para_html = self._convert_inline_markdown(para_text)
|
||||
items.append(f"<p>{para_html}</p>")
|
||||
current_paragraph = []
|
||||
|
||||
# Extract the list item content
|
||||
text = (
|
||||
bullet_match.group(1) if bullet_match else numbered_match.group(1)
|
||||
)
|
||||
|
||||
# Handle bold title pattern: **Title:** Description or **Title**: Description
|
||||
title_match = re.match(r"\*\*(.+?)\*\*[:\s]*(.*)$", text)
|
||||
if title_match:
|
||||
title = self._convert_inline_markdown(title_match.group(1))
|
||||
desc = self._convert_inline_markdown(title_match.group(2).strip())
|
||||
path_class = "dd-path-item" if section_type == "path" else ""
|
||||
item_html = f'<div class="dd-list-item {path_class}"><strong>{title}</strong>{desc}</div>'
|
||||
else:
|
||||
text_html = self._convert_inline_markdown(text)
|
||||
path_class = "dd-path-item" if section_type == "path" else ""
|
||||
item_html = (
|
||||
f'<div class="dd-list-item {path_class}">{text_html}</div>'
|
||||
)
|
||||
items.append(item_html)
|
||||
elif line and not line.startswith("#"):
|
||||
# Accumulate paragraph text
|
||||
current_paragraph.append(line)
|
||||
elif not line and current_paragraph:
|
||||
# Empty line ends paragraph
|
||||
para_text = " ".join(current_paragraph)
|
||||
para_html = self._convert_inline_markdown(para_text)
|
||||
items.append(f"<p>{para_html}</p>")
|
||||
current_paragraph = []
|
||||
|
||||
# Flush remaining paragraph
|
||||
if current_paragraph:
|
||||
para_text = " ".join(current_paragraph)
|
||||
para_html = self._convert_inline_markdown(para_text)
|
||||
items.append(f"<p>{para_html}</p>")
|
||||
|
||||
if items:
|
||||
return f'<div class="dd-list">{" ".join(items)}</div>'
|
||||
return f'<p class="dd-no-content">No items found.</p>'
|
||||
|
||||
def _convert_inline_markdown(self, text: str) -> str:
|
||||
"""Convert inline markdown (bold, italic, code) to HTML."""
|
||||
# Convert inline code: `code` -> <code>code</code>
|
||||
text = re.sub(r"`([^`]+)`", r"<code>\1</code>", text)
|
||||
# Convert bold: **text** -> <strong>text</strong>
|
||||
text = re.sub(r"\*\*(.+?)\*\*", r"<strong>\1</strong>", text)
|
||||
# Convert italic: *text* -> <em>text</em> (but not inside **)
|
||||
text = re.sub(r"(?<!\*)\*([^*]+)\*(?!\*)", r"<em>\1</em>", text)
|
||||
return text
|
||||
|
||||
async def _emit_status(
|
||||
self,
|
||||
emitter: Optional[Callable[[Any], Awaitable[None]]],
|
||||
description: str,
|
||||
done: bool = False,
|
||||
):
|
||||
"""Emits a status update event."""
|
||||
if self.valves.SHOW_STATUS and emitter:
|
||||
await emitter(
|
||||
{"type": "status", "data": {"description": description, "done": done}}
|
||||
)
|
||||
|
||||
async def _emit_notification(
|
||||
self,
|
||||
emitter: Optional[Callable[[Any], Awaitable[None]]],
|
||||
content: str,
|
||||
ntype: str = "info",
|
||||
):
|
||||
"""Emits a notification event."""
|
||||
if emitter:
|
||||
await emitter(
|
||||
{"type": "notification", "data": {"type": ntype, "content": content}}
|
||||
)
|
||||
|
||||
def _remove_existing_html(self, content: str) -> str:
|
||||
"""Removes existing plugin-generated HTML."""
|
||||
pattern = r"```html\s*<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?```"
|
||||
return re.sub(pattern, "", content).strip()
|
||||
|
||||
def _extract_text_content(self, content) -> str:
|
||||
"""Extract text from message content."""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "text":
|
||||
text_parts.append(item.get("text", ""))
|
||||
elif isinstance(item, str):
|
||||
text_parts.append(item)
|
||||
return "\n".join(text_parts)
|
||||
return str(content) if content else ""
|
||||
|
||||
def _merge_html(
|
||||
self,
|
||||
existing_html: str,
|
||||
new_content: str,
|
||||
new_styles: str = "",
|
||||
user_language: str = "en-US",
|
||||
) -> str:
|
||||
"""Merges new content into HTML container."""
|
||||
if "<!-- OPENWEBUI_PLUGIN_OUTPUT -->" in existing_html:
|
||||
base_html = re.sub(r"^```html\s*", "", existing_html)
|
||||
base_html = re.sub(r"\s*```$", "", base_html)
|
||||
else:
|
||||
base_html = HTML_WRAPPER_TEMPLATE.replace("{user_language}", user_language)
|
||||
|
||||
wrapped = f'<div class="plugin-item">\n{new_content}\n</div>'
|
||||
|
||||
if new_styles:
|
||||
base_html = base_html.replace(
|
||||
"/* STYLES_INSERTION_POINT */",
|
||||
f"{new_styles}\n/* STYLES_INSERTION_POINT */",
|
||||
)
|
||||
|
||||
base_html = base_html.replace(
|
||||
"<!-- CONTENT_INSERTION_POINT -->",
|
||||
f"{wrapped}\n<!-- CONTENT_INSERTION_POINT -->",
|
||||
)
|
||||
|
||||
return base_html.strip()
|
||||
|
||||
def _build_content_html(self, context: dict) -> str:
|
||||
"""Build content HTML."""
|
||||
html = CONTENT_TEMPLATE
|
||||
for key, value in context.items():
|
||||
html = html.replace(f"{{{key}}}", str(value))
|
||||
return html
|
||||
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: Optional[Dict[str, Any]] = None,
|
||||
__event_emitter__: Optional[Callable[[Any], Awaitable[None]]] = None,
|
||||
__request__: Optional[Request] = None,
|
||||
) -> Optional[dict]:
|
||||
logger.info("Action: Deep Dive v1.0.0 started")
|
||||
|
||||
user_ctx = self._get_user_context(__user__)
|
||||
user_id = user_ctx["user_id"]
|
||||
user_name = user_ctx["user_name"]
|
||||
user_language = user_ctx["user_language"]
|
||||
|
||||
now = datetime.now()
|
||||
current_date_time_str = now.strftime("%b %d, %Y %H:%M")
|
||||
|
||||
original_content = ""
|
||||
try:
|
||||
messages = body.get("messages", [])
|
||||
if not messages:
|
||||
raise ValueError("No messages found.")
|
||||
|
||||
message_count = min(self.valves.MESSAGE_COUNT, len(messages))
|
||||
recent_messages = messages[-message_count:]
|
||||
|
||||
aggregated_parts = []
|
||||
for msg in recent_messages:
|
||||
text = self._extract_text_content(msg.get("content"))
|
||||
if text:
|
||||
aggregated_parts.append(text)
|
||||
|
||||
if not aggregated_parts:
|
||||
raise ValueError("No text content found.")
|
||||
|
||||
original_content = "\n\n---\n\n".join(aggregated_parts)
|
||||
word_count = len(original_content.split())
|
||||
|
||||
if len(original_content) < self.valves.MIN_TEXT_LENGTH:
|
||||
msg = f"Content too brief ({len(original_content)} chars). Deep Dive requires at least {self.valves.MIN_TEXT_LENGTH} chars for meaningful analysis."
|
||||
await self._emit_notification(__event_emitter__, msg, "warning")
|
||||
return {"messages": [{"role": "assistant", "content": f"⚠️ {msg}"}]}
|
||||
|
||||
await self._emit_notification(
|
||||
__event_emitter__, "🌊 Initiating Deep Dive thinking process...", "info"
|
||||
)
|
||||
await self._emit_status(
|
||||
__event_emitter__, "🌊 Deep Dive: Analyzing Context & Logic...", False
|
||||
)
|
||||
|
||||
prompt = USER_PROMPT.format(
|
||||
user_name=user_name,
|
||||
current_date_time_str=current_date_time_str,
|
||||
user_language=user_language,
|
||||
long_text_content=original_content,
|
||||
)
|
||||
|
||||
model = self.valves.MODEL_ID or body.get("model")
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
user_obj = Users.get_user_by_id(user_id)
|
||||
if not user_obj:
|
||||
raise ValueError(f"User not found: {user_id}")
|
||||
|
||||
response = await generate_chat_completion(__request__, payload, user_obj)
|
||||
llm_output = response["choices"][0]["message"]["content"]
|
||||
|
||||
processed = self._process_llm_output(llm_output)
|
||||
|
||||
context = {
|
||||
"user_name": user_name,
|
||||
"current_date_time_str": current_date_time_str,
|
||||
"word_count": word_count,
|
||||
**processed,
|
||||
}
|
||||
|
||||
content_html = self._build_content_html(context)
|
||||
|
||||
# Handle existing HTML
|
||||
existing = ""
|
||||
match = re.search(
|
||||
r"```html\s*(<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?)```",
|
||||
original_content,
|
||||
)
|
||||
if match:
|
||||
existing = match.group(1)
|
||||
|
||||
if self.valves.CLEAR_PREVIOUS_HTML or not existing:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
"", content_html, CSS_TEMPLATE, user_language
|
||||
)
|
||||
else:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
existing, content_html, CSS_TEMPLATE, user_language
|
||||
)
|
||||
|
||||
body["messages"][-1][
|
||||
"content"
|
||||
] = f"{original_content}\n\n```html\n{final_html}\n```"
|
||||
|
||||
await self._emit_status(__event_emitter__, "🌊 Deep Dive complete!", True)
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
f"🌊 Deep Dive complete, {user_name}! Thinking chain generated.",
|
||||
"success",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Deep Dive Error: {e}", exc_info=True)
|
||||
body["messages"][-1][
|
||||
"content"
|
||||
] = f"{original_content}\n\n❌ **Error:** {str(e)}"
|
||||
await self._emit_status(__event_emitter__, "Deep Dive failed.", True)
|
||||
await self._emit_notification(
|
||||
__event_emitter__, f"Error: {str(e)}", "error"
|
||||
)
|
||||
|
||||
return body
|
||||
BIN
plugins/actions/deep-dive/deep_dive_cn.png
Normal file
|
After Width: | Height: | Size: 997 KiB |
876
plugins/actions/deep-dive/deep_dive_cn.py
Normal file
@@ -0,0 +1,876 @@
|
||||
"""
|
||||
title: 精读
|
||||
author: Fu-Jie
|
||||
author_url: https://github.com/Fu-Jie
|
||||
funding_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||
version: 1.0.0
|
||||
icon_url: data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9ImN1cnJlbnRDb2xvciIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiPjxwYXRoIGQ9Ik0xMiA3djE0Ii8+PHBhdGggZD0iTTMgMThhMSAxIDAgMCAxLTEtMVY0YTEgMSAwIDAgMSAxLTFoNWE0IDQgMCAwIDEgNCA0IDQgNCAwIDAgMSA0LTRoNWExIDEgMCAwIDEgMSAxdjEzYTEgMSAwIDAgMS0xIDFoLTZhMyAzIDAgMCAwLTMgMyAzIDMgMCAwIDAtMy0zeiIvPjxwYXRoIGQ9Ik02IDEyaDIiLz48cGF0aCBkPSJNMTYgMTJoMiIvPjwvc3ZnPg==
|
||||
requirements: markdown
|
||||
description: 全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。
|
||||
"""
|
||||
|
||||
# Standard library imports
|
||||
import re
|
||||
import logging
|
||||
from typing import Optional, Dict, Any, Callable, Awaitable
|
||||
from datetime import datetime
|
||||
|
||||
# Third-party imports
|
||||
from pydantic import BaseModel, Field
|
||||
from fastapi import Request
|
||||
import markdown
|
||||
|
||||
# OpenWebUI imports
|
||||
from open_webui.utils.chat import generate_chat_completion
|
||||
from open_webui.models.users import Users
|
||||
|
||||
# Logging setup
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =================================================================
|
||||
# HTML 模板 - 过程导向设计,支持主题自适应
|
||||
# =================================================================
|
||||
HTML_WRAPPER_TEMPLATE = """
|
||||
<!-- OPENWEBUI_PLUGIN_OUTPUT -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="{user_language}">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
:root {
|
||||
--dd-bg-primary: #ffffff;
|
||||
--dd-bg-secondary: #f8fafc;
|
||||
--dd-bg-tertiary: #f1f5f9;
|
||||
--dd-text-primary: #0f172a;
|
||||
--dd-text-secondary: #334155;
|
||||
--dd-text-dim: #64748b;
|
||||
--dd-border: #e2e8f0;
|
||||
--dd-accent: #3b82f6;
|
||||
--dd-accent-soft: #eff6ff;
|
||||
--dd-header-gradient: linear-gradient(135deg, #1e293b 0%, #0f172a 100%);
|
||||
--dd-shadow: 0 10px 40px rgba(0,0,0,0.06);
|
||||
--dd-code-bg: #f1f5f9;
|
||||
}
|
||||
.theme-dark {
|
||||
--dd-bg-primary: #1e293b;
|
||||
--dd-bg-secondary: #0f172a;
|
||||
--dd-bg-tertiary: #334155;
|
||||
--dd-text-primary: #f1f5f9;
|
||||
--dd-text-secondary: #e2e8f0;
|
||||
--dd-text-dim: #94a3b8;
|
||||
--dd-border: #475569;
|
||||
--dd-accent: #60a5fa;
|
||||
--dd-accent-soft: rgba(59, 130, 246, 0.15);
|
||||
--dd-header-gradient: linear-gradient(135deg, #0f172a 0%, #1e1e2e 100%);
|
||||
--dd-shadow: 0 10px 40px rgba(0,0,0,0.3);
|
||||
--dd-code-bg: #334155;
|
||||
}
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 10px;
|
||||
background-color: transparent;
|
||||
}
|
||||
#main-container {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 24px;
|
||||
width: 100%;
|
||||
max-width: 900px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
.plugin-item {
|
||||
background: var(--dd-bg-primary);
|
||||
border-radius: 24px;
|
||||
box-shadow: var(--dd-shadow);
|
||||
overflow: hidden;
|
||||
border: 1px solid var(--dd-border);
|
||||
}
|
||||
/* STYLES_INSERTION_POINT */
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="main-container">
|
||||
<!-- CONTENT_INSERTION_POINT -->
|
||||
</div>
|
||||
<!-- SCRIPTS_INSERTION_POINT -->
|
||||
<script>
|
||||
(function() {
|
||||
const parseColorLuma = (colorStr) => {
|
||||
if (!colorStr) return null;
|
||||
let m = colorStr.match(/^#?([0-9a-f]{6})$/i);
|
||||
if (m) {
|
||||
const hex = m[1];
|
||||
const r = parseInt(hex.slice(0, 2), 16);
|
||||
const g = parseInt(hex.slice(2, 4), 16);
|
||||
const b = parseInt(hex.slice(4, 6), 16);
|
||||
return (0.2126 * r + 0.7152 * g + 0.0722 * b) / 255;
|
||||
}
|
||||
m = colorStr.match(/rgba?\\s*\\(\\s*(\\d+)\\s*,\\s*(\\d+)\\s*,\\s*(\\d+)/i);
|
||||
if (m) {
|
||||
const r = parseInt(m[1], 10);
|
||||
const g = parseInt(m[2], 10);
|
||||
const b = parseInt(m[3], 10);
|
||||
return (0.2126 * r + 0.7152 * g + 0.0722 * b) / 255;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
const getThemeFromMeta = (doc) => {
|
||||
const metas = Array.from((doc || document).querySelectorAll('meta[name="theme-color"]'));
|
||||
if (!metas.length) return null;
|
||||
const color = metas[metas.length - 1].content.trim();
|
||||
const luma = parseColorLuma(color);
|
||||
if (luma === null) return null;
|
||||
return luma < 0.5 ? 'dark' : 'light';
|
||||
};
|
||||
const getParentDocumentSafe = () => {
|
||||
try {
|
||||
if (!window.parent || window.parent === window) return null;
|
||||
const pDoc = window.parent.document;
|
||||
void pDoc.title;
|
||||
return pDoc;
|
||||
} catch (err) { return null; }
|
||||
};
|
||||
const getThemeFromParentClass = () => {
|
||||
try {
|
||||
if (!window.parent || window.parent === window) return null;
|
||||
const pDoc = window.parent.document;
|
||||
const html = pDoc.documentElement;
|
||||
const body = pDoc.body;
|
||||
const htmlClass = html ? html.className : '';
|
||||
const bodyClass = body ? body.className : '';
|
||||
const htmlDataTheme = html ? html.getAttribute('data-theme') : '';
|
||||
if (htmlDataTheme === 'dark' || bodyClass.includes('dark') || htmlClass.includes('dark')) return 'dark';
|
||||
if (htmlDataTheme === 'light' || bodyClass.includes('light') || htmlClass.includes('light')) return 'light';
|
||||
return null;
|
||||
} catch (err) { return null; }
|
||||
};
|
||||
const setTheme = () => {
|
||||
const parentDoc = getParentDocumentSafe();
|
||||
const metaTheme = parentDoc ? getThemeFromMeta(parentDoc) : null;
|
||||
const parentClassTheme = getThemeFromParentClass();
|
||||
const prefersDark = window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches;
|
||||
const chosen = metaTheme || parentClassTheme || (prefersDark ? 'dark' : 'light');
|
||||
document.documentElement.classList.toggle('theme-dark', chosen === 'dark');
|
||||
};
|
||||
setTheme();
|
||||
if (window.matchMedia) {
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', setTheme);
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# LLM 提示词 - 深度下潜思维链
|
||||
# =================================================================
|
||||
|
||||
SYSTEM_PROMPT = """
|
||||
你是一位“深度下潜 (Deep Dive)”分析专家。你的目标是引导用户完成一个全面的思维过程,从表面理解深入到战略行动。
|
||||
|
||||
## 思维结构 (严格遵守)
|
||||
|
||||
你必须从以下四个维度剖析输入内容:
|
||||
|
||||
### 1. 🔍 The Context (全景)
|
||||
提供一个高层级的全景视图。内容是关于什么的?核心情境、背景或正在解决的问题是什么?(2-3 段话)
|
||||
|
||||
### 2. 🧠 The Logic (脉络)
|
||||
解构底层结构。论点是如何构建的?其中的推理逻辑、隐藏假设或起作用的思维模型是什么?(列表形式)
|
||||
|
||||
### 3. 💎 The Insight (洞察)
|
||||
提取非显性的价值。有哪些“原来如此”的时刻?揭示了哪些深层含义、盲点或独特视角?(列表形式)
|
||||
|
||||
### 4. 🚀 The Path (路径)
|
||||
定义战略方向。具体的、按优先级排列的下一步行动是什么?如何立即应用这些知识?(可执行步骤)
|
||||
|
||||
## 规则
|
||||
- 使用用户指定的语言输出。
|
||||
- 保持专业、分析性且富有启发性的语调。
|
||||
- 聚焦于“理解的过程”,而不仅仅是结果。
|
||||
- 不要包含寒暄或元对话。
|
||||
"""
|
||||
|
||||
USER_PROMPT = """
|
||||
对以下内容发起“深度下潜”:
|
||||
|
||||
**用户上下文:**
|
||||
- 用户:{user_name}
|
||||
- 时间:{current_date_time_str}
|
||||
- 语言:{user_language}
|
||||
|
||||
**待分析内容:**
|
||||
```
|
||||
{long_text_content}
|
||||
```
|
||||
|
||||
请执行完整的思维链:全景 (Context) → 脉络 (Logic) → 洞察 (Insight) → 路径 (Path)。
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# 现代 CSS 设计 - 深度下潜主题
|
||||
# =================================================================
|
||||
|
||||
CSS_TEMPLATE = """
|
||||
.deep-dive {
|
||||
font-family: 'Inter', -apple-system, system-ui, sans-serif;
|
||||
color: var(--dd-text-secondary);
|
||||
}
|
||||
|
||||
.dd-header {
|
||||
background: var(--dd-header-gradient);
|
||||
padding: 40px 32px;
|
||||
color: white;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.dd-header-badge {
|
||||
display: inline-block;
|
||||
padding: 4px 12px;
|
||||
background: rgba(255,255,255,0.1);
|
||||
border: 1px solid rgba(255,255,255,0.2);
|
||||
border-radius: 100px;
|
||||
font-size: 0.75rem;
|
||||
font-weight: 600;
|
||||
letter-spacing: 0.05em;
|
||||
text-transform: uppercase;
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
|
||||
.dd-title {
|
||||
font-size: 2rem;
|
||||
font-weight: 800;
|
||||
margin: 0 0 12px 0;
|
||||
letter-spacing: -0.02em;
|
||||
}
|
||||
|
||||
.dd-meta {
|
||||
display: flex;
|
||||
gap: 20px;
|
||||
font-size: 0.85rem;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.dd-body {
|
||||
padding: 32px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 40px;
|
||||
position: relative;
|
||||
background: var(--dd-bg-primary);
|
||||
}
|
||||
|
||||
/* 思维导火索 */
|
||||
.dd-body::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: 52px;
|
||||
top: 40px;
|
||||
bottom: 40px;
|
||||
width: 2px;
|
||||
background: var(--dd-border);
|
||||
z-index: 0;
|
||||
}
|
||||
|
||||
.dd-step {
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
display: flex;
|
||||
gap: 24px;
|
||||
}
|
||||
|
||||
.dd-step-icon {
|
||||
flex-shrink: 0;
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
background: var(--dd-bg-primary);
|
||||
border: 2px solid var(--dd-border);
|
||||
border-radius: 12px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
font-size: 1.25rem;
|
||||
box-shadow: 0 4px 12px rgba(0,0,0,0.03);
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.dd-step:hover .dd-step-icon {
|
||||
border-color: var(--dd-accent);
|
||||
transform: scale(1.1);
|
||||
}
|
||||
|
||||
.dd-step-content {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.dd-step-label {
|
||||
font-size: 0.75rem;
|
||||
font-weight: 700;
|
||||
color: var(--dd-accent);
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.1em;
|
||||
margin-bottom: 4px;
|
||||
}
|
||||
|
||||
.dd-step-title {
|
||||
font-size: 1.25rem;
|
||||
font-weight: 700;
|
||||
color: var(--dd-text-primary);
|
||||
margin: 0 0 16px 0;
|
||||
}
|
||||
|
||||
.dd-text {
|
||||
line-height: 1.7;
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
.dd-text p { margin-bottom: 16px; }
|
||||
.dd-text p:last-child { margin-bottom: 0; }
|
||||
|
||||
.dd-list {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
display: grid;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.dd-list-item {
|
||||
background: var(--dd-bg-secondary);
|
||||
padding: 16px 20px;
|
||||
border-radius: 12px;
|
||||
border-left: 4px solid var(--dd-border);
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.dd-list-item:hover {
|
||||
background: var(--dd-bg-tertiary);
|
||||
border-left-color: var(--dd-accent);
|
||||
transform: translateX(4px);
|
||||
}
|
||||
|
||||
.dd-list-item strong {
|
||||
color: var(--dd-text-primary);
|
||||
display: block;
|
||||
margin-bottom: 4px;
|
||||
}
|
||||
|
||||
.dd-path-item {
|
||||
background: var(--dd-accent-soft);
|
||||
border-left-color: var(--dd-accent);
|
||||
}
|
||||
|
||||
.dd-footer {
|
||||
padding: 24px 32px;
|
||||
background: var(--dd-bg-secondary);
|
||||
border-top: 1px solid var(--dd-border);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
font-size: 0.8rem;
|
||||
color: var(--dd-text-dim);
|
||||
}
|
||||
|
||||
.dd-tag {
|
||||
padding: 2px 8px;
|
||||
background: var(--dd-bg-tertiary);
|
||||
border-radius: 4px;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.dd-text code,
|
||||
.dd-list-item code {
|
||||
background: var(--dd-code-bg);
|
||||
color: var(--dd-text-primary);
|
||||
padding: 2px 6px;
|
||||
border-radius: 4px;
|
||||
font-family: 'SF Mono', 'Consolas', 'Monaco', monospace;
|
||||
font-size: 0.85em;
|
||||
}
|
||||
|
||||
.dd-list-item em {
|
||||
font-style: italic;
|
||||
color: var(--dd-text-dim);
|
||||
}
|
||||
"""
|
||||
|
||||
CONTENT_TEMPLATE = """
|
||||
<div class="deep-dive">
|
||||
<div class="dd-header">
|
||||
<div class="dd-header-badge">思维过程</div>
|
||||
<h1 class="dd-title">精读分析报告</h1>
|
||||
<div class="dd-meta">
|
||||
<span>👤 {user_name}</span>
|
||||
<span>📅 {current_date_time_str}</span>
|
||||
<span>📊 {word_count} 字</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="dd-body">
|
||||
<!-- 第一步:全景 -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">🔍</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 01</div>
|
||||
<h2 class="dd-step-title">全景 (The Context)</h2>
|
||||
<div class="dd-text">{context_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- 第二步:脉络 -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">🧠</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 02</div>
|
||||
<h2 class="dd-step-title">脉络 (The Logic)</h2>
|
||||
<div class="dd-text">{logic_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- 第三步:洞察 -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">💎</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 03</div>
|
||||
<h2 class="dd-step-title">洞察 (The Insight)</h2>
|
||||
<div class="dd-text">{insight_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- 第四步:路径 -->
|
||||
<div class="dd-step">
|
||||
<div class="dd-step-icon">🚀</div>
|
||||
<div class="dd-step-content">
|
||||
<div class="dd-step-label">Phase 04</div>
|
||||
<h2 class="dd-step-title">路径 (The Path)</h2>
|
||||
<div class="dd-text">{path_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="dd-footer">
|
||||
<span>Deep Dive Engine v1.0</span>
|
||||
<span><span class="dd-tag">AI 驱动分析</span></span>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
class Action:
|
||||
class Valves(BaseModel):
|
||||
SHOW_STATUS: bool = Field(
|
||||
default=True,
|
||||
description="是否显示操作状态更新。",
|
||||
)
|
||||
MODEL_ID: str = Field(
|
||||
default="",
|
||||
description="用于分析的 LLM 模型 ID。留空则使用当前模型。",
|
||||
)
|
||||
MIN_TEXT_LENGTH: int = Field(
|
||||
default=200,
|
||||
description="深度下潜所需的最小文本长度(字符)。",
|
||||
)
|
||||
CLEAR_PREVIOUS_HTML: bool = Field(
|
||||
default=True,
|
||||
description="是否清除之前的插件结果。",
|
||||
)
|
||||
MESSAGE_COUNT: int = Field(
|
||||
default=1,
|
||||
description="要分析的最近消息数量。",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _get_user_context(self, __user__: Optional[Dict[str, Any]]) -> Dict[str, str]:
|
||||
"""安全提取用户上下文信息。"""
|
||||
if isinstance(__user__, (list, tuple)):
|
||||
user_data = __user__[0] if __user__ else {}
|
||||
elif isinstance(__user__, dict):
|
||||
user_data = __user__
|
||||
else:
|
||||
user_data = {}
|
||||
|
||||
return {
|
||||
"user_id": user_data.get("id", "unknown_user"),
|
||||
"user_name": user_data.get("name", "用户"),
|
||||
"user_language": user_data.get("language", "zh-CN"),
|
||||
}
|
||||
|
||||
def _process_llm_output(self, llm_output: str) -> Dict[str, str]:
|
||||
"""解析 LLM 输出并转换为样式化 HTML。"""
|
||||
# 使用灵活的正则提取各部分
|
||||
context_match = re.search(
|
||||
r"###\s*1\.\s*🔍?\s*(?:全景|The Context)\s*(?:\((.*?)\))?\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
logic_match = re.search(
|
||||
r"###\s*2\.\s*🧠?\s*(?:脉络|The Logic)\s*(?:\((.*?)\))?\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
insight_match = re.search(
|
||||
r"###\s*3\.\s*💎?\s*(?:洞察|The Insight)\s*(?:\((.*?)\))?\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
path_match = re.search(
|
||||
r"###\s*4\.\s*🚀?\s*(?:路径|The Path)\s*(?:\((.*?)\))?\s*\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
|
||||
# 兜底正则
|
||||
if not context_match:
|
||||
context_match = re.search(
|
||||
r"###\s*🔍?\s*(?:全景|The Context).*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
if not logic_match:
|
||||
logic_match = re.search(
|
||||
r"###\s*🧠?\s*(?:脉络|The Logic).*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
if not insight_match:
|
||||
insight_match = re.search(
|
||||
r"###\s*💎?\s*(?:洞察|The Insight).*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
if not path_match:
|
||||
path_match = re.search(
|
||||
r"###\s*🚀?\s*(?:路径|The Path).*?\n(.*?)(?=\n###|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
|
||||
context_md = (
|
||||
context_match.group(context_match.lastindex).strip()
|
||||
if context_match
|
||||
else ""
|
||||
)
|
||||
logic_md = (
|
||||
logic_match.group(logic_match.lastindex).strip() if logic_match else ""
|
||||
)
|
||||
insight_md = (
|
||||
insight_match.group(insight_match.lastindex).strip()
|
||||
if insight_match
|
||||
else ""
|
||||
)
|
||||
path_md = path_match.group(path_match.lastindex).strip() if path_match else ""
|
||||
|
||||
if not any([context_md, logic_md, insight_md, path_md]):
|
||||
context_md = llm_output.strip()
|
||||
logger.warning("LLM 输出未遵循格式,将作为全景处理。")
|
||||
|
||||
md_extensions = ["nl2br"]
|
||||
|
||||
context_html = (
|
||||
markdown.markdown(context_md, extensions=md_extensions)
|
||||
if context_md
|
||||
else '<p class="dd-no-content">未能提取全景信息。</p>'
|
||||
)
|
||||
logic_html = (
|
||||
self._process_list_items(logic_md, "logic")
|
||||
if logic_md
|
||||
else '<p class="dd-no-content">未能解构脉络。</p>'
|
||||
)
|
||||
insight_html = (
|
||||
self._process_list_items(insight_md, "insight")
|
||||
if insight_md
|
||||
else '<p class="dd-no-content">未能发现洞察。</p>'
|
||||
)
|
||||
path_html = (
|
||||
self._process_list_items(path_md, "path")
|
||||
if path_md
|
||||
else '<p class="dd-no-content">未能定义路径。</p>'
|
||||
)
|
||||
|
||||
return {
|
||||
"context_html": context_html,
|
||||
"logic_html": logic_html,
|
||||
"insight_html": insight_html,
|
||||
"path_html": path_html,
|
||||
}
|
||||
|
||||
def _process_list_items(self, md_content: str, section_type: str) -> str:
|
||||
"""将 markdown 列表转换为样式化卡片,支持完整的 markdown 格式。"""
|
||||
lines = md_content.strip().split("\n")
|
||||
items = []
|
||||
current_paragraph = []
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
|
||||
# 检查列表项(无序或有序)
|
||||
bullet_match = re.match(r"^[-*]\s+(.+)$", line)
|
||||
numbered_match = re.match(r"^\d+\.\s+(.+)$", line)
|
||||
|
||||
if bullet_match or numbered_match:
|
||||
# 清空累积的段落
|
||||
if current_paragraph:
|
||||
para_text = " ".join(current_paragraph)
|
||||
para_html = self._convert_inline_markdown(para_text)
|
||||
items.append(f"<p>{para_html}</p>")
|
||||
current_paragraph = []
|
||||
|
||||
# 提取列表项内容
|
||||
text = (
|
||||
bullet_match.group(1) if bullet_match else numbered_match.group(1)
|
||||
)
|
||||
|
||||
# 处理粗体标题模式:**标题:** 描述 或 **标题**: 描述
|
||||
title_match = re.match(r"\*\*(.+?)\*\*[:\s:]*(.*)$", text)
|
||||
if title_match:
|
||||
title = self._convert_inline_markdown(title_match.group(1))
|
||||
desc = self._convert_inline_markdown(title_match.group(2).strip())
|
||||
path_class = "dd-path-item" if section_type == "path" else ""
|
||||
item_html = f'<div class="dd-list-item {path_class}"><strong>{title}</strong>{desc}</div>'
|
||||
else:
|
||||
text_html = self._convert_inline_markdown(text)
|
||||
path_class = "dd-path-item" if section_type == "path" else ""
|
||||
item_html = (
|
||||
f'<div class="dd-list-item {path_class}">{text_html}</div>'
|
||||
)
|
||||
items.append(item_html)
|
||||
elif line and not line.startswith("#"):
|
||||
# 累积段落文本
|
||||
current_paragraph.append(line)
|
||||
elif not line and current_paragraph:
|
||||
# 空行结束段落
|
||||
para_text = " ".join(current_paragraph)
|
||||
para_html = self._convert_inline_markdown(para_text)
|
||||
items.append(f"<p>{para_html}</p>")
|
||||
current_paragraph = []
|
||||
|
||||
# 清空剩余段落
|
||||
if current_paragraph:
|
||||
para_text = " ".join(current_paragraph)
|
||||
para_html = self._convert_inline_markdown(para_text)
|
||||
items.append(f"<p>{para_html}</p>")
|
||||
|
||||
if items:
|
||||
return f'<div class="dd-list">{" ".join(items)}</div>'
|
||||
return f'<p class="dd-no-content">未找到条目。</p>'
|
||||
|
||||
def _convert_inline_markdown(self, text: str) -> str:
|
||||
"""将行内 markdown(粗体、斜体、代码)转换为 HTML。"""
|
||||
# 转换行内代码:`code` -> <code>code</code>
|
||||
text = re.sub(r"`([^`]+)`", r"<code>\1</code>", text)
|
||||
# 转换粗体:**text** -> <strong>text</strong>
|
||||
text = re.sub(r"\*\*(.+?)\*\*", r"<strong>\1</strong>", text)
|
||||
# 转换斜体:*text* -> <em>text</em>(但不在 ** 内部)
|
||||
text = re.sub(r"(?<!\*)\*([^*]+)\*(?!\*)", r"<em>\1</em>", text)
|
||||
return text
|
||||
|
||||
async def _emit_status(
|
||||
self,
|
||||
emitter: Optional[Callable[[Any], Awaitable[None]]],
|
||||
description: str,
|
||||
done: bool = False,
|
||||
):
|
||||
"""发送状态更新事件。"""
|
||||
if self.valves.SHOW_STATUS and emitter:
|
||||
await emitter(
|
||||
{"type": "status", "data": {"description": description, "done": done}}
|
||||
)
|
||||
|
||||
async def _emit_notification(
|
||||
self,
|
||||
emitter: Optional[Callable[[Any], Awaitable[None]]],
|
||||
content: str,
|
||||
ntype: str = "info",
|
||||
):
|
||||
"""发送通知事件。"""
|
||||
if emitter:
|
||||
await emitter(
|
||||
{"type": "notification", "data": {"type": ntype, "content": content}}
|
||||
)
|
||||
|
||||
def _remove_existing_html(self, content: str) -> str:
|
||||
"""移除已有的插件生成的 HTML。"""
|
||||
pattern = r"```html\s*<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?```"
|
||||
return re.sub(pattern, "", content).strip()
|
||||
|
||||
def _extract_text_content(self, content) -> str:
|
||||
"""从消息内容中提取文本。"""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "text":
|
||||
text_parts.append(item.get("text", ""))
|
||||
elif isinstance(item, str):
|
||||
text_parts.append(item)
|
||||
return "\n".join(text_parts)
|
||||
return str(content) if content else ""
|
||||
|
||||
def _merge_html(
|
||||
self,
|
||||
existing_html: str,
|
||||
new_content: str,
|
||||
new_styles: str = "",
|
||||
user_language: str = "zh-CN",
|
||||
) -> str:
|
||||
"""合并新内容到 HTML 容器。"""
|
||||
if "<!-- OPENWEBUI_PLUGIN_OUTPUT -->" in existing_html:
|
||||
base_html = re.sub(r"^```html\s*", "", existing_html)
|
||||
base_html = re.sub(r"\s*```$", "", base_html)
|
||||
else:
|
||||
base_html = HTML_WRAPPER_TEMPLATE.replace("{user_language}", user_language)
|
||||
|
||||
wrapped = f'<div class="plugin-item">\n{new_content}\n</div>'
|
||||
|
||||
if new_styles:
|
||||
base_html = base_html.replace(
|
||||
"/* STYLES_INSERTION_POINT */",
|
||||
f"{new_styles}\n/* STYLES_INSERTION_POINT */",
|
||||
)
|
||||
|
||||
base_html = base_html.replace(
|
||||
"<!-- CONTENT_INSERTION_POINT -->",
|
||||
f"{wrapped}\n<!-- CONTENT_INSERTION_POINT -->",
|
||||
)
|
||||
|
||||
return base_html.strip()
|
||||
|
||||
def _build_content_html(self, context: dict) -> str:
|
||||
"""构建内容 HTML。"""
|
||||
html = CONTENT_TEMPLATE
|
||||
for key, value in context.items():
|
||||
html = html.replace(f"{{{key}}}", str(value))
|
||||
return html
|
||||
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: Optional[Dict[str, Any]] = None,
|
||||
__event_emitter__: Optional[Callable[[Any], Awaitable[None]]] = None,
|
||||
__request__: Optional[Request] = None,
|
||||
) -> Optional[dict]:
|
||||
logger.info("Action: 精读 v1.0.0 启动")
|
||||
|
||||
user_ctx = self._get_user_context(__user__)
|
||||
user_id = user_ctx["user_id"]
|
||||
user_name = user_ctx["user_name"]
|
||||
user_language = user_ctx["user_language"]
|
||||
|
||||
now = datetime.now()
|
||||
current_date_time_str = now.strftime("%Y年%m月%d日 %H:%M")
|
||||
|
||||
original_content = ""
|
||||
try:
|
||||
messages = body.get("messages", [])
|
||||
if not messages:
|
||||
raise ValueError("未找到消息内容。")
|
||||
|
||||
message_count = min(self.valves.MESSAGE_COUNT, len(messages))
|
||||
recent_messages = messages[-message_count:]
|
||||
|
||||
aggregated_parts = []
|
||||
for msg in recent_messages:
|
||||
text = self._extract_text_content(msg.get("content"))
|
||||
if text:
|
||||
aggregated_parts.append(text)
|
||||
|
||||
if not aggregated_parts:
|
||||
raise ValueError("未找到文本内容。")
|
||||
|
||||
original_content = "\n\n---\n\n".join(aggregated_parts)
|
||||
word_count = len(original_content)
|
||||
|
||||
if len(original_content) < self.valves.MIN_TEXT_LENGTH:
|
||||
msg = f"内容过短({len(original_content)} 字符)。精读至少需要 {self.valves.MIN_TEXT_LENGTH} 字符才能进行有意义的分析。"
|
||||
await self._emit_notification(__event_emitter__, msg, "warning")
|
||||
return {"messages": [{"role": "assistant", "content": f"⚠️ {msg}"}]}
|
||||
|
||||
await self._emit_notification(
|
||||
__event_emitter__, "📖 正在发起精读分析...", "info"
|
||||
)
|
||||
await self._emit_status(
|
||||
__event_emitter__, "📖 精读:正在分析全景与脉络...", False
|
||||
)
|
||||
|
||||
prompt = USER_PROMPT.format(
|
||||
user_name=user_name,
|
||||
current_date_time_str=current_date_time_str,
|
||||
user_language=user_language,
|
||||
long_text_content=original_content,
|
||||
)
|
||||
|
||||
model = self.valves.MODEL_ID or body.get("model")
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
user_obj = Users.get_user_by_id(user_id)
|
||||
if not user_obj:
|
||||
raise ValueError(f"未找到用户:{user_id}")
|
||||
|
||||
response = await generate_chat_completion(__request__, payload, user_obj)
|
||||
llm_output = response["choices"][0]["message"]["content"]
|
||||
|
||||
processed = self._process_llm_output(llm_output)
|
||||
|
||||
context = {
|
||||
"user_name": user_name,
|
||||
"current_date_time_str": current_date_time_str,
|
||||
"word_count": word_count,
|
||||
**processed,
|
||||
}
|
||||
|
||||
content_html = self._build_content_html(context)
|
||||
|
||||
# 处理已有 HTML
|
||||
existing = ""
|
||||
match = re.search(
|
||||
r"```html\s*(<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?)```",
|
||||
original_content,
|
||||
)
|
||||
if match:
|
||||
existing = match.group(1)
|
||||
|
||||
if self.valves.CLEAR_PREVIOUS_HTML or not existing:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
"", content_html, CSS_TEMPLATE, user_language
|
||||
)
|
||||
else:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
existing, content_html, CSS_TEMPLATE, user_language
|
||||
)
|
||||
|
||||
body["messages"][-1][
|
||||
"content"
|
||||
] = f"{original_content}\n\n```html\n{final_html}\n```"
|
||||
|
||||
await self._emit_status(__event_emitter__, "📖 精读完成!", True)
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
f"📖 精读完成,{user_name}!思维链已生成。",
|
||||
"success",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Deep Dive 错误:{e}", exc_info=True)
|
||||
body["messages"][-1][
|
||||
"content"
|
||||
] = f"{original_content}\n\n❌ **错误:** {str(e)}"
|
||||
await self._emit_status(__event_emitter__, "精读失败。", True)
|
||||
await self._emit_notification(__event_emitter__, f"错误:{str(e)}", "error")
|
||||
|
||||
return body
|
||||
BIN
plugins/actions/export_to_docx/export_to_word.png
Normal file
|
After Width: | Height: | Size: 78 KiB |
BIN
plugins/actions/export_to_docx/export_to_word_cn.png
Normal file
|
After Width: | Height: | Size: 86 KiB |
BIN
plugins/actions/infographic/infographic.png
Normal file
|
After Width: | Height: | Size: 162 KiB |
BIN
plugins/actions/infographic/infographic_cn.png
Normal file
|
After Width: | Height: | Size: 169 KiB |
@@ -1,170 +0,0 @@
|
||||
# Infographic to Markdown
|
||||
|
||||
> **Version:** 1.0.0
|
||||
|
||||
AI-powered infographic generator that renders SVG on the frontend and embeds it directly into Markdown as a Data URL image.
|
||||
|
||||
## Overview
|
||||
|
||||
This plugin combines the power of AI text analysis with AntV Infographic visualization to create beautiful infographics that are embedded directly into chat messages as Markdown images.
|
||||
|
||||
### How It Works
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Open WebUI Plugin │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 1. Python Action │
|
||||
│ ├── Receive message content │
|
||||
│ ├── Call LLM to generate Infographic syntax │
|
||||
│ └── Send __event_call__ to execute frontend JS │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 2. Browser JS (via __event_call__) │
|
||||
│ ├── Dynamically load AntV Infographic library │
|
||||
│ ├── Render SVG offscreen │
|
||||
│ ├── Export to Data URL via toDataURL() │
|
||||
│ └── Update message content via REST API │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 3. Markdown Rendering │
|
||||
│ └── Display  │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- 🤖 **AI-Powered**: Automatically analyzes text and selects the best infographic template
|
||||
- 📊 **Multiple Templates**: Supports 18+ infographic templates (lists, charts, comparisons, etc.)
|
||||
- 🖼️ **Self-Contained**: SVG/PNG embedded as Data URL, no external dependencies
|
||||
- 📝 **Markdown Native**: Results are pure Markdown images, compatible everywhere
|
||||
- 🔄 **API Writeback**: Updates message content via REST API for persistence
|
||||
|
||||
## Plugins in This Directory
|
||||
|
||||
### 1. `infographic_markdown.py` - Main Plugin ⭐
|
||||
- **Purpose**: Production use
|
||||
- **Features**: Full AI + AntV Infographic + Data URL embedding
|
||||
|
||||
### 2. `js_render_poc.py` - Proof of Concept
|
||||
- **Purpose**: Learning and testing
|
||||
- **Features**: Simple SVG creation demo, `__event_call__` pattern
|
||||
|
||||
## Configuration (Valves)
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `SHOW_STATUS` | bool | `true` | Show operation status updates |
|
||||
| `MODEL_ID` | string | `""` | LLM model ID (empty = use current model) |
|
||||
| `MIN_TEXT_LENGTH` | int | `50` | Minimum text length required |
|
||||
| `MESSAGE_COUNT` | int | `1` | Number of recent messages to use |
|
||||
| `SVG_WIDTH` | int | `800` | Width of generated SVG (pixels) |
|
||||
| `EXPORT_FORMAT` | string | `"svg"` | Export format: `svg` or `png` |
|
||||
|
||||
## Supported Templates
|
||||
|
||||
| Category | Template | Description |
|
||||
|----------|----------|-------------|
|
||||
| List | `list-grid` | Grid cards |
|
||||
| List | `list-vertical` | Vertical list |
|
||||
| Tree | `tree-vertical` | Vertical tree |
|
||||
| Tree | `tree-horizontal` | Horizontal tree |
|
||||
| Mind Map | `mindmap` | Mind map |
|
||||
| Process | `sequence-roadmap` | Roadmap |
|
||||
| Process | `sequence-zigzag` | Zigzag process |
|
||||
| Relation | `relation-sankey` | Sankey diagram |
|
||||
| Relation | `relation-circle` | Circular relation |
|
||||
| Compare | `compare-binary` | Binary comparison |
|
||||
| Analysis | `compare-swot` | SWOT analysis |
|
||||
| Quadrant | `quadrant-quarter` | Quadrant chart |
|
||||
| Chart | `chart-bar` | Bar chart |
|
||||
| Chart | `chart-column` | Column chart |
|
||||
| Chart | `chart-line` | Line chart |
|
||||
| Chart | `chart-pie` | Pie chart |
|
||||
| Chart | `chart-doughnut` | Doughnut chart |
|
||||
| Chart | `chart-area` | Area chart |
|
||||
|
||||
## Syntax Examples
|
||||
|
||||
### Grid List
|
||||
```infographic
|
||||
infographic list-grid
|
||||
data
|
||||
title Project Overview
|
||||
items
|
||||
- label Module A
|
||||
desc Description of module A
|
||||
- label Module B
|
||||
desc Description of module B
|
||||
```
|
||||
|
||||
### Binary Comparison
|
||||
```infographic
|
||||
infographic compare-binary
|
||||
data
|
||||
title Pros vs Cons
|
||||
items
|
||||
- label Pros
|
||||
children
|
||||
- label Strong R&D
|
||||
desc Technology leadership
|
||||
- label Cons
|
||||
children
|
||||
- label Weak brand
|
||||
desc Insufficient marketing
|
||||
```
|
||||
|
||||
### Bar Chart
|
||||
```infographic
|
||||
infographic chart-bar
|
||||
data
|
||||
title Quarterly Revenue
|
||||
items
|
||||
- label Q1
|
||||
value 120
|
||||
- label Q2
|
||||
value 150
|
||||
```
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Data URL Embedding
|
||||
```javascript
|
||||
// SVG to Base64 Data URL
|
||||
const svgData = new XMLSerializer().serializeToString(svg);
|
||||
const base64 = btoa(unescape(encodeURIComponent(svgData)));
|
||||
const dataUri = "data:image/svg+xml;base64," + base64;
|
||||
|
||||
// Markdown image syntax
|
||||
const markdownImage = ``;
|
||||
```
|
||||
|
||||
### AntV toDataURL API
|
||||
```javascript
|
||||
// Export as SVG (recommended, supports embedded resources)
|
||||
const svgUrl = await instance.toDataURL({
|
||||
type: 'svg',
|
||||
embedResources: true
|
||||
});
|
||||
|
||||
// Export as PNG (more compatible but larger)
|
||||
const pngUrl = await instance.toDataURL({
|
||||
type: 'png',
|
||||
dpr: 2
|
||||
});
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
1. **Browser Compatibility**: Requires modern browsers with ES6+ and Fetch API support
|
||||
2. **Network Dependency**: First use requires loading AntV library from CDN
|
||||
3. **Data URL Size**: Base64 encoding increases size by ~33%
|
||||
4. **Chinese Fonts**: SVG export embeds fonts for correct display
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [AntV Infographic Documentation](https://infographic.antv.vision/)
|
||||
- [Infographic API Reference](https://infographic.antv.vision/reference/infographic-api)
|
||||
- [Infographic Syntax Guide](https://infographic.antv.vision/learn/infographic-syntax)
|
||||
|
||||
## License
|
||||
|
||||
MIT License
|
||||
@@ -1,174 +0,0 @@
|
||||
# 信息图转 Markdown
|
||||
|
||||
> **版本:** 1.0.0
|
||||
|
||||
AI 驱动的信息图生成器,在前端渲染 SVG 并以 Data URL 图片格式直接嵌入到 Markdown 中。
|
||||
|
||||
## 概述
|
||||
|
||||
这个插件结合了 AI 文本分析能力和 AntV Infographic 可视化引擎,生成精美的信息图并以 Markdown 图片格式直接嵌入到聊天消息中。
|
||||
|
||||
### 工作原理
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Open WebUI 插件 │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 1. Python Action │
|
||||
│ ├── 接收消息内容 │
|
||||
│ ├── 调用 LLM 生成 Infographic 语法 │
|
||||
│ └── 发送 __event_call__ 执行前端 JS │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 2. 浏览器 JS (通过 __event_call__) │
|
||||
│ ├── 动态加载 AntV Infographic 库 │
|
||||
│ ├── 离屏渲染 SVG │
|
||||
│ ├── 使用 toDataURL() 导出 Data URL │
|
||||
│ └── 通过 REST API 更新消息内容 │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 3. Markdown 渲染 │
|
||||
│ └── 显示  │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## 功能特点
|
||||
|
||||
- 🤖 **AI 驱动**: 自动分析文本并选择最佳的信息图模板
|
||||
- 📊 **多种模板**: 支持 18+ 种信息图模板(列表、图表、对比等)
|
||||
- 🖼️ **自包含**: SVG/PNG 以 Data URL 嵌入,无外部依赖
|
||||
- 📝 **Markdown 原生**: 结果是纯 Markdown 图片,兼容任何平台
|
||||
- 🔄 **API 回写**: 通过 REST API 更新消息内容实现持久化
|
||||
|
||||
## 目录中的插件
|
||||
|
||||
### 1. `infographic_markdown.py` - 主插件 ⭐
|
||||
- **用途**: 生产使用
|
||||
- **功能**: 完整的 AI + AntV Infographic + Data URL 嵌入
|
||||
|
||||
### 2. `infographic_markdown_cn.py` - 主插件(中文版)
|
||||
- **用途**: 生产使用
|
||||
- **功能**: 与英文版相同,界面文字为中文
|
||||
|
||||
### 3. `js_render_poc.py` - 概念验证
|
||||
- **用途**: 学习和测试
|
||||
- **功能**: 简单的 SVG 创建演示,`__event_call__` 模式
|
||||
|
||||
## 配置选项 (Valves)
|
||||
|
||||
| 参数 | 类型 | 默认值 | 描述 |
|
||||
|------|------|--------|------|
|
||||
| `SHOW_STATUS` | bool | `true` | 是否显示操作状态 |
|
||||
| `MODEL_ID` | string | `""` | LLM 模型 ID(空则使用当前模型) |
|
||||
| `MIN_TEXT_LENGTH` | int | `50` | 最小文本长度要求 |
|
||||
| `MESSAGE_COUNT` | int | `1` | 用于生成的最近消息数量 |
|
||||
| `SVG_WIDTH` | int | `800` | 生成的 SVG 宽度(像素) |
|
||||
| `EXPORT_FORMAT` | string | `"svg"` | 导出格式:`svg` 或 `png` |
|
||||
|
||||
## 支持的模板
|
||||
|
||||
| 类别 | 模板名称 | 描述 |
|
||||
|------|----------|------|
|
||||
| 列表 | `list-grid` | 网格卡片 |
|
||||
| 列表 | `list-vertical` | 垂直列表 |
|
||||
| 树形 | `tree-vertical` | 垂直树 |
|
||||
| 树形 | `tree-horizontal` | 水平树 |
|
||||
| 思维导图 | `mindmap` | 思维导图 |
|
||||
| 流程 | `sequence-roadmap` | 路线图 |
|
||||
| 流程 | `sequence-zigzag` | 折线流程 |
|
||||
| 关系 | `relation-sankey` | 桑基图 |
|
||||
| 关系 | `relation-circle` | 圆形关系 |
|
||||
| 对比 | `compare-binary` | 二元对比 |
|
||||
| 分析 | `compare-swot` | SWOT 分析 |
|
||||
| 象限 | `quadrant-quarter` | 四象限图 |
|
||||
| 图表 | `chart-bar` | 条形图 |
|
||||
| 图表 | `chart-column` | 柱状图 |
|
||||
| 图表 | `chart-line` | 折线图 |
|
||||
| 图表 | `chart-pie` | 饼图 |
|
||||
| 图表 | `chart-doughnut` | 环形图 |
|
||||
| 图表 | `chart-area` | 面积图 |
|
||||
|
||||
## 语法示例
|
||||
|
||||
### 网格列表
|
||||
```infographic
|
||||
infographic list-grid
|
||||
data
|
||||
title 项目概览
|
||||
items
|
||||
- label 模块一
|
||||
desc 这是第一个模块的描述
|
||||
- label 模块二
|
||||
desc 这是第二个模块的描述
|
||||
```
|
||||
|
||||
### 二元对比
|
||||
```infographic
|
||||
infographic compare-binary
|
||||
data
|
||||
title 优劣对比
|
||||
items
|
||||
- label 优势
|
||||
children
|
||||
- label 研发能力强
|
||||
desc 技术领先
|
||||
- label 劣势
|
||||
children
|
||||
- label 品牌曝光不足
|
||||
desc 营销力度不够
|
||||
```
|
||||
|
||||
### 条形图
|
||||
```infographic
|
||||
infographic chart-bar
|
||||
data
|
||||
title 季度收入
|
||||
items
|
||||
- label Q1
|
||||
value 120
|
||||
- label Q2
|
||||
value 150
|
||||
```
|
||||
|
||||
## 技术细节
|
||||
|
||||
### Data URL 嵌入
|
||||
```javascript
|
||||
// SVG 转 Base64 Data URL
|
||||
const svgData = new XMLSerializer().serializeToString(svg);
|
||||
const base64 = btoa(unescape(encodeURIComponent(svgData)));
|
||||
const dataUri = "data:image/svg+xml;base64," + base64;
|
||||
|
||||
// Markdown 图片语法
|
||||
const markdownImage = ``;
|
||||
```
|
||||
|
||||
### AntV toDataURL API
|
||||
```javascript
|
||||
// 导出 SVG(推荐,支持嵌入资源)
|
||||
const svgUrl = await instance.toDataURL({
|
||||
type: 'svg',
|
||||
embedResources: true
|
||||
});
|
||||
|
||||
// 导出 PNG(更兼容但体积更大)
|
||||
const pngUrl = await instance.toDataURL({
|
||||
type: 'png',
|
||||
dpr: 2
|
||||
});
|
||||
```
|
||||
|
||||
## 注意事项
|
||||
|
||||
1. **浏览器兼容性**: 需要现代浏览器支持 ES6+ 和 Fetch API
|
||||
2. **网络依赖**: 首次使用需要从 CDN 加载 AntV Infographic 库
|
||||
3. **Data URL 大小**: Base64 编码会增加约 33% 的体积
|
||||
4. **中文字体**: SVG 导出时会嵌入字体以确保正确显示
|
||||
|
||||
## 相关资源
|
||||
|
||||
- [AntV Infographic 官方文档](https://infographic.antv.vision/)
|
||||
- [Infographic API 参考](https://infographic.antv.vision/reference/infographic-api)
|
||||
- [Infographic 语法规范](https://infographic.antv.vision/learn/infographic-syntax)
|
||||
|
||||
## 许可证
|
||||
|
||||
MIT License
|
||||
@@ -1,592 +0,0 @@
|
||||
"""
|
||||
title: 📊 Infographic to Markdown
|
||||
author: Fu-Jie
|
||||
version: 1.0.0
|
||||
description: AI生成信息图语法,前端渲染SVG并转换为Markdown图片格式嵌入消息。支持AntV Infographic模板。
|
||||
"""
|
||||
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional, Callable, Awaitable, Any, Dict
|
||||
from pydantic import BaseModel, Field
|
||||
from fastapi import Request
|
||||
from datetime import datetime
|
||||
|
||||
from open_webui.utils.chat import generate_chat_completion
|
||||
from open_webui.models.users import Users
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =================================================================
|
||||
# LLM Prompts
|
||||
# =================================================================
|
||||
|
||||
SYSTEM_PROMPT_INFOGRAPHIC = """
|
||||
You are a professional infographic design expert who can analyze user-provided text content and convert it into AntV Infographic syntax format.
|
||||
|
||||
## Infographic Syntax Specification
|
||||
|
||||
Infographic syntax is a Mermaid-like declarative syntax for describing infographic templates, data, and themes.
|
||||
|
||||
### Syntax Rules
|
||||
- Entry uses `infographic <template-name>`
|
||||
- Key-value pairs are separated by spaces, **absolutely NO colons allowed**
|
||||
- Use two spaces for indentation
|
||||
- Object arrays use `-` with line breaks
|
||||
|
||||
⚠️ **IMPORTANT WARNING: This is NOT YAML format!**
|
||||
- ❌ Wrong: `children:` `items:` `data:` (with colons)
|
||||
- ✅ Correct: `children` `items` `data` (without colons)
|
||||
|
||||
### Template Library & Selection Guide
|
||||
|
||||
Choose the most appropriate template based on the content structure:
|
||||
|
||||
#### 1. List & Hierarchy
|
||||
- **List**: `list-grid` (Grid Cards), `list-vertical` (Vertical List)
|
||||
- **Tree**: `tree-vertical` (Vertical Tree), `tree-horizontal` (Horizontal Tree)
|
||||
- **Mindmap**: `mindmap` (Mind Map)
|
||||
|
||||
#### 2. Sequence & Relationship
|
||||
- **Process**: `sequence-roadmap` (Roadmap), `sequence-zigzag` (Zigzag Process)
|
||||
- **Relationship**: `relation-sankey` (Sankey Diagram), `relation-circle` (Circular)
|
||||
|
||||
#### 3. Comparison & Analysis
|
||||
- **Comparison**: `compare-binary` (Binary Comparison)
|
||||
- **Analysis**: `compare-swot` (SWOT Analysis), `quadrant-quarter` (Quadrant Chart)
|
||||
|
||||
#### 4. Charts & Data
|
||||
- **Charts**: `chart-bar`, `chart-column`, `chart-line`, `chart-pie`, `chart-doughnut`, `chart-area`
|
||||
|
||||
### Data Structure Examples
|
||||
|
||||
#### A. Standard List/Tree
|
||||
```infographic
|
||||
infographic list-grid
|
||||
data
|
||||
title Project Modules
|
||||
items
|
||||
- label Module A
|
||||
desc Description of A
|
||||
- label Module B
|
||||
desc Description of B
|
||||
```
|
||||
|
||||
#### B. Binary Comparison
|
||||
```infographic
|
||||
infographic compare-binary
|
||||
data
|
||||
title Advantages vs Disadvantages
|
||||
items
|
||||
- label Advantages
|
||||
children
|
||||
- label Strong R&D
|
||||
desc Leading technology
|
||||
- label Disadvantages
|
||||
children
|
||||
- label Weak brand
|
||||
desc Insufficient marketing
|
||||
```
|
||||
|
||||
#### C. Charts
|
||||
```infographic
|
||||
infographic chart-bar
|
||||
data
|
||||
title Quarterly Revenue
|
||||
items
|
||||
- label Q1
|
||||
value 120
|
||||
- label Q2
|
||||
value 150
|
||||
```
|
||||
|
||||
### Common Data Fields
|
||||
- `label`: Main title/label (Required)
|
||||
- `desc`: Description text (max 30 Chinese chars / 60 English chars for `list-grid`)
|
||||
- `value`: Numeric value (for charts)
|
||||
- `children`: Nested items
|
||||
|
||||
## Output Requirements
|
||||
1. **Language**: Output content in the user's language.
|
||||
2. **Format**: Wrap output in ```infographic ... ```.
|
||||
3. **No Colons**: Do NOT use colons after keys.
|
||||
4. **Indentation**: Use 2 spaces.
|
||||
"""
|
||||
|
||||
USER_PROMPT_GENERATE = """
|
||||
Please analyze the following text content and convert its core information into AntV Infographic syntax format.
|
||||
|
||||
---
|
||||
**User Context:**
|
||||
User Name: {user_name}
|
||||
Current Date/Time: {current_date_time_str}
|
||||
User Language: {user_language}
|
||||
---
|
||||
|
||||
**Text Content:**
|
||||
{long_text_content}
|
||||
|
||||
Please select the most appropriate infographic template based on text characteristics and output standard infographic syntax.
|
||||
|
||||
**Important Note:**
|
||||
- If using `list-grid` format, ensure each card's `desc` description is limited to **maximum 30 Chinese characters** (or **approximately 60 English characters**).
|
||||
- Descriptions should be concise and highlight key points.
|
||||
"""
|
||||
|
||||
|
||||
class Action:
|
||||
class Valves(BaseModel):
|
||||
SHOW_STATUS: bool = Field(
|
||||
default=True, description="Show operation status updates in chat interface."
|
||||
)
|
||||
MODEL_ID: str = Field(
|
||||
default="",
|
||||
description="LLM model ID for text analysis. If empty, uses current conversation model.",
|
||||
)
|
||||
MIN_TEXT_LENGTH: int = Field(
|
||||
default=50,
|
||||
description="Minimum text length (characters) required for infographic analysis.",
|
||||
)
|
||||
MESSAGE_COUNT: int = Field(
|
||||
default=1,
|
||||
description="Number of recent messages to use for generation.",
|
||||
)
|
||||
SVG_WIDTH: int = Field(
|
||||
default=800,
|
||||
description="Width of generated SVG in pixels.",
|
||||
)
|
||||
EXPORT_FORMAT: str = Field(
|
||||
default="svg",
|
||||
description="Export format: 'svg' or 'png'.",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _extract_chat_id(self, body: dict, metadata: Optional[dict]) -> str:
|
||||
"""Extract chat_id from body or metadata"""
|
||||
if isinstance(body, dict):
|
||||
chat_id = body.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
body_metadata = body.get("metadata", {})
|
||||
if isinstance(body_metadata, dict):
|
||||
chat_id = body_metadata.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
if isinstance(metadata, dict):
|
||||
chat_id = metadata.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
return ""
|
||||
|
||||
def _extract_message_id(self, body: dict, metadata: Optional[dict]) -> str:
|
||||
"""Extract message_id from body or metadata"""
|
||||
if isinstance(body, dict):
|
||||
message_id = body.get("id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
body_metadata = body.get("metadata", {})
|
||||
if isinstance(body_metadata, dict):
|
||||
message_id = body_metadata.get("message_id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
if isinstance(metadata, dict):
|
||||
message_id = metadata.get("message_id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
return ""
|
||||
|
||||
def _extract_infographic_syntax(self, llm_output: str) -> str:
|
||||
"""Extract infographic syntax from LLM output"""
|
||||
match = re.search(r"```infographic\s*(.*?)\s*```", llm_output, re.DOTALL)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
else:
|
||||
logger.warning("LLM output did not follow expected format, treating entire output as syntax.")
|
||||
return llm_output.strip()
|
||||
|
||||
def _extract_text_content(self, content) -> str:
|
||||
"""Extract text from message content, supporting multimodal formats"""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "text":
|
||||
text_parts.append(item.get("text", ""))
|
||||
elif isinstance(item, str):
|
||||
text_parts.append(item)
|
||||
return "\n".join(text_parts)
|
||||
return str(content) if content else ""
|
||||
|
||||
async def _emit_status(self, emitter, description: str, done: bool = False):
|
||||
"""Send status update event"""
|
||||
if self.valves.SHOW_STATUS and emitter:
|
||||
await emitter(
|
||||
{"type": "status", "data": {"description": description, "done": done}}
|
||||
)
|
||||
|
||||
def _generate_js_code(
|
||||
self,
|
||||
unique_id: str,
|
||||
chat_id: str,
|
||||
message_id: str,
|
||||
infographic_syntax: str,
|
||||
svg_width: int,
|
||||
export_format: str,
|
||||
) -> str:
|
||||
"""Generate JavaScript code for frontend SVG rendering"""
|
||||
|
||||
# Escape the syntax for JS embedding
|
||||
syntax_escaped = (
|
||||
infographic_syntax
|
||||
.replace("\\", "\\\\")
|
||||
.replace("`", "\\`")
|
||||
.replace("${", "\\${")
|
||||
.replace("</script>", "<\\/script>")
|
||||
)
|
||||
|
||||
# Template mapping (same as infographic.py)
|
||||
template_mapping_js = """
|
||||
const TEMPLATE_MAPPING = {
|
||||
'list-grid': 'list-grid-compact-card',
|
||||
'list-vertical': 'list-column-simple-vertical-arrow',
|
||||
'tree-vertical': 'hierarchy-tree-tech-style-capsule-item',
|
||||
'tree-horizontal': 'hierarchy-tree-lr-tech-style-capsule-item',
|
||||
'mindmap': 'hierarchy-mindmap-branch-gradient-capsule-item',
|
||||
'sequence-roadmap': 'sequence-roadmap-vertical-simple',
|
||||
'sequence-zigzag': 'sequence-horizontal-zigzag-simple',
|
||||
'sequence-horizontal': 'sequence-horizontal-zigzag-simple',
|
||||
'relation-sankey': 'relation-sankey-simple',
|
||||
'relation-circle': 'relation-circle-icon-badge',
|
||||
'compare-binary': 'compare-binary-horizontal-simple-vs',
|
||||
'compare-swot': 'compare-swot',
|
||||
'quadrant-quarter': 'quadrant-quarter-simple-card',
|
||||
'statistic-card': 'list-grid-compact-card',
|
||||
'chart-bar': 'chart-bar-plain-text',
|
||||
'chart-column': 'chart-column-simple',
|
||||
'chart-line': 'chart-line-plain-text',
|
||||
'chart-area': 'chart-area-simple',
|
||||
'chart-pie': 'chart-pie-plain-text',
|
||||
'chart-doughnut': 'chart-pie-donut-plain-text'
|
||||
};
|
||||
"""
|
||||
|
||||
return f"""
|
||||
(async function() {{
|
||||
const uniqueId = "{unique_id}";
|
||||
const chatId = "{chat_id}";
|
||||
const messageId = "{message_id}";
|
||||
const svgWidth = {svg_width};
|
||||
const exportFormat = "{export_format}";
|
||||
|
||||
console.log("[Infographic Markdown] Starting render...");
|
||||
console.log("[Infographic Markdown] chatId:", chatId, "messageId:", messageId);
|
||||
|
||||
try {{
|
||||
// Load AntV Infographic if not loaded
|
||||
if (typeof AntVInfographic === 'undefined') {{
|
||||
console.log("[Infographic Markdown] Loading AntV Infographic library...");
|
||||
await new Promise((resolve, reject) => {{
|
||||
const script = document.createElement('script');
|
||||
script.src = 'https://unpkg.com/@antv/infographic@latest/dist/infographic.min.js';
|
||||
script.onload = resolve;
|
||||
script.onerror = reject;
|
||||
document.head.appendChild(script);
|
||||
}});
|
||||
console.log("[Infographic Markdown] Library loaded.");
|
||||
}}
|
||||
|
||||
const {{ Infographic }} = AntVInfographic;
|
||||
|
||||
// Get infographic syntax
|
||||
let syntaxContent = `{syntax_escaped}`;
|
||||
console.log("[Infographic Markdown] Original syntax:", syntaxContent.substring(0, 200) + "...");
|
||||
|
||||
// Clean up syntax
|
||||
const backtick = String.fromCharCode(96);
|
||||
const prefix = backtick + backtick + backtick + 'infographic';
|
||||
const simplePrefix = backtick + backtick + backtick;
|
||||
|
||||
if (syntaxContent.toLowerCase().startsWith(prefix)) {{
|
||||
syntaxContent = syntaxContent.substring(prefix.length).trim();
|
||||
}} else if (syntaxContent.startsWith(simplePrefix)) {{
|
||||
syntaxContent = syntaxContent.substring(simplePrefix.length).trim();
|
||||
}}
|
||||
|
||||
if (syntaxContent.endsWith(simplePrefix)) {{
|
||||
syntaxContent = syntaxContent.substring(0, syntaxContent.length - simplePrefix.length).trim();
|
||||
}}
|
||||
|
||||
// Fix colons after keywords
|
||||
syntaxContent = syntaxContent.replace(/^(data|items|children|theme|config):/gm, '$1');
|
||||
syntaxContent = syntaxContent.replace(/(\\s)(children|items):/g, '$1$2');
|
||||
|
||||
// Ensure infographic prefix
|
||||
if (!syntaxContent.trim().toLowerCase().startsWith('infographic')) {{
|
||||
syntaxContent = 'infographic list-grid\\n' + syntaxContent;
|
||||
}}
|
||||
|
||||
// Apply template mapping
|
||||
{template_mapping_js}
|
||||
|
||||
for (const [key, value] of Object.entries(TEMPLATE_MAPPING)) {{
|
||||
const regex = new RegExp(`infographic\\\\s+${{key}}(?=\\\\s|$)`, 'i');
|
||||
if (regex.test(syntaxContent)) {{
|
||||
console.log(`[Infographic Markdown] Auto-mapping: ${{key}} -> ${{value}}`);
|
||||
syntaxContent = syntaxContent.replace(regex, `infographic ${{value}}`);
|
||||
break;
|
||||
}}
|
||||
}}
|
||||
|
||||
console.log("[Infographic Markdown] Cleaned syntax:", syntaxContent.substring(0, 200) + "...");
|
||||
|
||||
// Create offscreen container
|
||||
const container = document.createElement('div');
|
||||
container.id = 'infographic-offscreen-' + uniqueId;
|
||||
container.style.cssText = 'position:absolute;left:-9999px;top:-9999px;width:' + svgWidth + 'px;';
|
||||
document.body.appendChild(container);
|
||||
|
||||
// Create and render infographic
|
||||
const instance = new Infographic({{
|
||||
container: '#' + container.id,
|
||||
width: svgWidth,
|
||||
padding: 24,
|
||||
}});
|
||||
|
||||
console.log("[Infographic Markdown] Rendering infographic...");
|
||||
instance.render(syntaxContent);
|
||||
|
||||
// Wait for render and export
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
let dataUrl;
|
||||
if (exportFormat === 'png') {{
|
||||
dataUrl = await instance.toDataURL({{ type: 'png', dpr: 2 }});
|
||||
}} else {{
|
||||
dataUrl = await instance.toDataURL({{ type: 'svg', embedResources: true }});
|
||||
}}
|
||||
|
||||
console.log("[Infographic Markdown] Data URL generated, length:", dataUrl.length);
|
||||
|
||||
// Cleanup
|
||||
instance.destroy();
|
||||
document.body.removeChild(container);
|
||||
|
||||
// Generate markdown image
|
||||
const markdownImage = ``;
|
||||
|
||||
// Update message via API
|
||||
if (chatId && messageId) {{
|
||||
const token = localStorage.getItem("token");
|
||||
|
||||
// Get current message content
|
||||
const getResponse = await fetch(`/api/v1/chats/${{chatId}}`, {{
|
||||
method: "GET",
|
||||
headers: {{ "Authorization": `Bearer ${{token}}` }}
|
||||
}});
|
||||
|
||||
if (!getResponse.ok) {{
|
||||
throw new Error("Failed to get chat data: " + getResponse.status);
|
||||
}}
|
||||
|
||||
const chatData = await getResponse.json();
|
||||
let originalContent = "";
|
||||
|
||||
if (chatData.chat && chatData.chat.messages) {{
|
||||
const targetMsg = chatData.chat.messages.find(m => m.id === messageId);
|
||||
if (targetMsg && targetMsg.content) {{
|
||||
originalContent = targetMsg.content;
|
||||
}}
|
||||
}}
|
||||
|
||||
// Remove existing infographic images
|
||||
const infographicPattern = /\\n*!\\[📊[^\\]]*\\]\\(data:image\\/[^)]+\\)/g;
|
||||
let cleanedContent = originalContent.replace(infographicPattern, "");
|
||||
cleanedContent = cleanedContent.replace(/\\n{{3,}}/g, "\\n\\n").trim();
|
||||
|
||||
// Append new image
|
||||
const newContent = cleanedContent + "\\n\\n" + markdownImage;
|
||||
|
||||
// Update message
|
||||
const updateResponse = await fetch(`/api/v1/chats/${{chatId}}/messages/${{messageId}}/event`, {{
|
||||
method: "POST",
|
||||
headers: {{
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${{token}}`
|
||||
}},
|
||||
body: JSON.stringify({{
|
||||
type: "chat:message",
|
||||
data: {{ content: newContent }}
|
||||
}})
|
||||
}});
|
||||
|
||||
if (updateResponse.ok) {{
|
||||
console.log("[Infographic Markdown] ✅ Message updated successfully!");
|
||||
}} else {{
|
||||
console.error("[Infographic Markdown] API error:", updateResponse.status);
|
||||
}}
|
||||
}} else {{
|
||||
console.warn("[Infographic Markdown] ⚠️ Missing chatId or messageId");
|
||||
}}
|
||||
|
||||
}} catch (error) {{
|
||||
console.error("[Infographic Markdown] Error:", error);
|
||||
}}
|
||||
}})();
|
||||
"""
|
||||
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: dict = None,
|
||||
__event_emitter__=None,
|
||||
__event_call__: Optional[Callable[[Any], Awaitable[None]]] = None,
|
||||
__metadata__: Optional[dict] = None,
|
||||
__request__: Request = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Generate infographic using AntV and embed as Markdown image.
|
||||
"""
|
||||
logger.info("Action: Infographic to Markdown started")
|
||||
|
||||
# Get user information
|
||||
if isinstance(__user__, (list, tuple)):
|
||||
user_language = __user__[0].get("language", "en") if __user__ else "en"
|
||||
user_name = __user__[0].get("name", "User") if __user__[0] else "User"
|
||||
user_id = __user__[0].get("id", "unknown_user") if __user__ else "unknown_user"
|
||||
elif isinstance(__user__, dict):
|
||||
user_language = __user__.get("language", "en")
|
||||
user_name = __user__.get("name", "User")
|
||||
user_id = __user__.get("id", "unknown_user")
|
||||
else:
|
||||
user_language = "en"
|
||||
user_name = "User"
|
||||
user_id = "unknown_user"
|
||||
|
||||
# Get current time
|
||||
now = datetime.now()
|
||||
current_date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
try:
|
||||
messages = body.get("messages", [])
|
||||
if not messages:
|
||||
raise ValueError("No messages available.")
|
||||
|
||||
# Get recent messages
|
||||
message_count = min(self.valves.MESSAGE_COUNT, len(messages))
|
||||
recent_messages = messages[-message_count:]
|
||||
|
||||
# Aggregate content
|
||||
aggregated_parts = []
|
||||
for msg in recent_messages:
|
||||
text_content = self._extract_text_content(msg.get("content"))
|
||||
if text_content:
|
||||
aggregated_parts.append(text_content)
|
||||
|
||||
if not aggregated_parts:
|
||||
raise ValueError("No text content found in messages.")
|
||||
|
||||
long_text_content = "\n\n---\n\n".join(aggregated_parts)
|
||||
|
||||
# Remove existing HTML blocks
|
||||
parts = re.split(r"```html.*?```", long_text_content, flags=re.DOTALL)
|
||||
clean_content = ""
|
||||
for part in reversed(parts):
|
||||
if part.strip():
|
||||
clean_content = part.strip()
|
||||
break
|
||||
|
||||
if not clean_content:
|
||||
clean_content = long_text_content.strip()
|
||||
|
||||
# Check minimum length
|
||||
if len(clean_content) < self.valves.MIN_TEXT_LENGTH:
|
||||
await self._emit_status(
|
||||
__event_emitter__,
|
||||
f"⚠️ 内容太短 ({len(clean_content)} 字符),至少需要 {self.valves.MIN_TEXT_LENGTH} 字符",
|
||||
True,
|
||||
)
|
||||
return body
|
||||
|
||||
await self._emit_status(__event_emitter__, "📊 正在分析内容...", False)
|
||||
|
||||
# Generate infographic syntax via LLM
|
||||
formatted_user_prompt = USER_PROMPT_GENERATE.format(
|
||||
user_name=user_name,
|
||||
current_date_time_str=current_date_time_str,
|
||||
user_language=user_language,
|
||||
long_text_content=clean_content,
|
||||
)
|
||||
|
||||
target_model = self.valves.MODEL_ID or body.get("model")
|
||||
|
||||
llm_payload = {
|
||||
"model": target_model,
|
||||
"messages": [
|
||||
{"role": "system", "content": SYSTEM_PROMPT_INFOGRAPHIC},
|
||||
{"role": "user", "content": formatted_user_prompt},
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
user_obj = Users.get_user_by_id(user_id)
|
||||
if not user_obj:
|
||||
raise ValueError(f"Unable to get user object: {user_id}")
|
||||
|
||||
await self._emit_status(__event_emitter__, "📊 AI 正在生成信息图语法...", False)
|
||||
|
||||
llm_response = await generate_chat_completion(__request__, llm_payload, user_obj)
|
||||
|
||||
if not llm_response or "choices" not in llm_response or not llm_response["choices"]:
|
||||
raise ValueError("Invalid LLM response.")
|
||||
|
||||
assistant_content = llm_response["choices"][0]["message"]["content"]
|
||||
infographic_syntax = self._extract_infographic_syntax(assistant_content)
|
||||
|
||||
logger.info(f"Generated syntax: {infographic_syntax[:200]}...")
|
||||
|
||||
# Extract IDs for API callback
|
||||
chat_id = self._extract_chat_id(body, __metadata__)
|
||||
message_id = self._extract_message_id(body, __metadata__)
|
||||
unique_id = f"ig_{int(time.time() * 1000)}"
|
||||
|
||||
await self._emit_status(__event_emitter__, "📊 正在渲染 SVG...", False)
|
||||
|
||||
# Execute JS to render and embed
|
||||
if __event_call__:
|
||||
js_code = self._generate_js_code(
|
||||
unique_id=unique_id,
|
||||
chat_id=chat_id,
|
||||
message_id=message_id,
|
||||
infographic_syntax=infographic_syntax,
|
||||
svg_width=self.valves.SVG_WIDTH,
|
||||
export_format=self.valves.EXPORT_FORMAT,
|
||||
)
|
||||
|
||||
await __event_call__(
|
||||
{
|
||||
"type": "execute",
|
||||
"data": {"code": js_code},
|
||||
}
|
||||
)
|
||||
|
||||
await self._emit_status(__event_emitter__, "✅ 信息图生成完成!", True)
|
||||
logger.info("Infographic to Markdown completed")
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"Infographic generation failed: {str(e)}"
|
||||
logger.error(error_message, exc_info=True)
|
||||
await self._emit_status(__event_emitter__, f"❌ {error_message}", True)
|
||||
|
||||
return body
|
||||
@@ -1,592 +0,0 @@
|
||||
"""
|
||||
title: 📊 信息图转 Markdown
|
||||
author: Fu-Jie
|
||||
version: 1.0.0
|
||||
description: AI 生成信息图语法,前端渲染 SVG 并转换为 Markdown 图片格式嵌入消息。支持 AntV Infographic 模板。
|
||||
"""
|
||||
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional, Callable, Awaitable, Any, Dict
|
||||
from pydantic import BaseModel, Field
|
||||
from fastapi import Request
|
||||
from datetime import datetime
|
||||
|
||||
from open_webui.utils.chat import generate_chat_completion
|
||||
from open_webui.models.users import Users
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =================================================================
|
||||
# LLM 提示词
|
||||
# =================================================================
|
||||
|
||||
SYSTEM_PROMPT_INFOGRAPHIC = """
|
||||
你是一位专业的信息图设计专家,能够分析用户提供的文本内容并将其转换为 AntV Infographic 语法格式。
|
||||
|
||||
## 信息图语法规范
|
||||
|
||||
信息图语法是一种类似 Mermaid 的声明式语法,用于描述信息图模板、数据和主题。
|
||||
|
||||
### 语法规则
|
||||
- 入口使用 `infographic <模板名>`
|
||||
- 键值对用空格分隔,**绝对不允许使用冒号**
|
||||
- 使用两个空格缩进
|
||||
- 对象数组使用 `-` 加换行
|
||||
|
||||
⚠️ **重要警告:这不是 YAML 格式!**
|
||||
- ❌ 错误:`children:` `items:` `data:`(带冒号)
|
||||
- ✅ 正确:`children` `items` `data`(不带冒号)
|
||||
|
||||
### 模板库与选择指南
|
||||
|
||||
根据内容结构选择最合适的模板:
|
||||
|
||||
#### 1. 列表与层级
|
||||
- **列表**:`list-grid`(网格卡片)、`list-vertical`(垂直列表)
|
||||
- **树形**:`tree-vertical`(垂直树)、`tree-horizontal`(水平树)
|
||||
- **思维导图**:`mindmap`(思维导图)
|
||||
|
||||
#### 2. 序列与关系
|
||||
- **流程**:`sequence-roadmap`(路线图)、`sequence-zigzag`(折线流程)
|
||||
- **关系**:`relation-sankey`(桑基图)、`relation-circle`(圆形关系)
|
||||
|
||||
#### 3. 对比与分析
|
||||
- **对比**:`compare-binary`(二元对比)
|
||||
- **分析**:`compare-swot`(SWOT 分析)、`quadrant-quarter`(象限图)
|
||||
|
||||
#### 4. 图表与数据
|
||||
- **图表**:`chart-bar`、`chart-column`、`chart-line`、`chart-pie`、`chart-doughnut`、`chart-area`
|
||||
|
||||
### 数据结构示例
|
||||
|
||||
#### A. 标准列表/树形
|
||||
```infographic
|
||||
infographic list-grid
|
||||
data
|
||||
title 项目模块
|
||||
items
|
||||
- label 模块 A
|
||||
desc 模块 A 的描述
|
||||
- label 模块 B
|
||||
desc 模块 B 的描述
|
||||
```
|
||||
|
||||
#### B. 二元对比
|
||||
```infographic
|
||||
infographic compare-binary
|
||||
data
|
||||
title 优势与劣势
|
||||
items
|
||||
- label 优势
|
||||
children
|
||||
- label 研发能力强
|
||||
desc 技术领先
|
||||
- label 劣势
|
||||
children
|
||||
- label 品牌曝光弱
|
||||
desc 营销不足
|
||||
```
|
||||
|
||||
#### C. 图表
|
||||
```infographic
|
||||
infographic chart-bar
|
||||
data
|
||||
title 季度收入
|
||||
items
|
||||
- label Q1
|
||||
value 120
|
||||
- label Q2
|
||||
value 150
|
||||
```
|
||||
|
||||
### 常用数据字段
|
||||
- `label`:主标题/标签(必填)
|
||||
- `desc`:描述文字(`list-grid` 最多 30 个中文字符)
|
||||
- `value`:数值(用于图表)
|
||||
- `children`:嵌套项
|
||||
|
||||
## 输出要求
|
||||
1. **语言**:使用用户的语言输出内容。
|
||||
2. **格式**:用 ```infographic ... ``` 包裹输出。
|
||||
3. **无冒号**:键后面不要使用冒号。
|
||||
4. **缩进**:使用 2 个空格。
|
||||
"""
|
||||
|
||||
USER_PROMPT_GENERATE = """
|
||||
请分析以下文本内容,将其核心信息转换为 AntV Infographic 语法格式。
|
||||
|
||||
---
|
||||
**用户上下文:**
|
||||
用户名:{user_name}
|
||||
当前时间:{current_date_time_str}
|
||||
用户语言:{user_language}
|
||||
---
|
||||
|
||||
**文本内容:**
|
||||
{long_text_content}
|
||||
|
||||
请根据文本特征选择最合适的信息图模板,输出标准的信息图语法。
|
||||
|
||||
**重要提示:**
|
||||
- 如果使用 `list-grid` 格式,确保每个卡片的 `desc` 描述限制在 **最多 30 个中文字符**。
|
||||
- 描述应简洁,突出重点。
|
||||
"""
|
||||
|
||||
|
||||
class Action:
|
||||
class Valves(BaseModel):
|
||||
SHOW_STATUS: bool = Field(
|
||||
default=True, description="在聊天界面显示操作状态更新。"
|
||||
)
|
||||
MODEL_ID: str = Field(
|
||||
default="",
|
||||
description="用于文本分析的 LLM 模型 ID。留空则使用当前对话模型。",
|
||||
)
|
||||
MIN_TEXT_LENGTH: int = Field(
|
||||
default=50,
|
||||
description="信息图分析所需的最小文本长度(字符数)。",
|
||||
)
|
||||
MESSAGE_COUNT: int = Field(
|
||||
default=1,
|
||||
description="用于生成的最近消息数量。",
|
||||
)
|
||||
SVG_WIDTH: int = Field(
|
||||
default=800,
|
||||
description="生成的 SVG 宽度(像素)。",
|
||||
)
|
||||
EXPORT_FORMAT: str = Field(
|
||||
default="svg",
|
||||
description="导出格式:'svg' 或 'png'。",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _extract_chat_id(self, body: dict, metadata: Optional[dict]) -> str:
|
||||
"""从 body 或 metadata 中提取 chat_id"""
|
||||
if isinstance(body, dict):
|
||||
chat_id = body.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
body_metadata = body.get("metadata", {})
|
||||
if isinstance(body_metadata, dict):
|
||||
chat_id = body_metadata.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
if isinstance(metadata, dict):
|
||||
chat_id = metadata.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
return ""
|
||||
|
||||
def _extract_message_id(self, body: dict, metadata: Optional[dict]) -> str:
|
||||
"""从 body 或 metadata 中提取 message_id"""
|
||||
if isinstance(body, dict):
|
||||
message_id = body.get("id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
body_metadata = body.get("metadata", {})
|
||||
if isinstance(body_metadata, dict):
|
||||
message_id = body_metadata.get("message_id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
if isinstance(metadata, dict):
|
||||
message_id = metadata.get("message_id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
return ""
|
||||
|
||||
def _extract_infographic_syntax(self, llm_output: str) -> str:
|
||||
"""从 LLM 输出中提取信息图语法"""
|
||||
match = re.search(r"```infographic\s*(.*?)\s*```", llm_output, re.DOTALL)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
else:
|
||||
logger.warning("LLM 输出未遵循预期格式,将整个输出作为语法处理。")
|
||||
return llm_output.strip()
|
||||
|
||||
def _extract_text_content(self, content) -> str:
|
||||
"""从消息内容中提取文本,支持多模态格式"""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "text":
|
||||
text_parts.append(item.get("text", ""))
|
||||
elif isinstance(item, str):
|
||||
text_parts.append(item)
|
||||
return "\n".join(text_parts)
|
||||
return str(content) if content else ""
|
||||
|
||||
async def _emit_status(self, emitter, description: str, done: bool = False):
|
||||
"""发送状态更新事件"""
|
||||
if self.valves.SHOW_STATUS and emitter:
|
||||
await emitter(
|
||||
{"type": "status", "data": {"description": description, "done": done}}
|
||||
)
|
||||
|
||||
def _generate_js_code(
|
||||
self,
|
||||
unique_id: str,
|
||||
chat_id: str,
|
||||
message_id: str,
|
||||
infographic_syntax: str,
|
||||
svg_width: int,
|
||||
export_format: str,
|
||||
) -> str:
|
||||
"""生成用于前端 SVG 渲染的 JavaScript 代码"""
|
||||
|
||||
# 转义语法以便嵌入 JS
|
||||
syntax_escaped = (
|
||||
infographic_syntax
|
||||
.replace("\\", "\\\\")
|
||||
.replace("`", "\\`")
|
||||
.replace("${", "\\${")
|
||||
.replace("</script>", "<\\/script>")
|
||||
)
|
||||
|
||||
# 模板映射
|
||||
template_mapping_js = """
|
||||
const TEMPLATE_MAPPING = {
|
||||
'list-grid': 'list-grid-compact-card',
|
||||
'list-vertical': 'list-column-simple-vertical-arrow',
|
||||
'tree-vertical': 'hierarchy-tree-tech-style-capsule-item',
|
||||
'tree-horizontal': 'hierarchy-tree-lr-tech-style-capsule-item',
|
||||
'mindmap': 'hierarchy-mindmap-branch-gradient-capsule-item',
|
||||
'sequence-roadmap': 'sequence-roadmap-vertical-simple',
|
||||
'sequence-zigzag': 'sequence-horizontal-zigzag-simple',
|
||||
'sequence-horizontal': 'sequence-horizontal-zigzag-simple',
|
||||
'relation-sankey': 'relation-sankey-simple',
|
||||
'relation-circle': 'relation-circle-icon-badge',
|
||||
'compare-binary': 'compare-binary-horizontal-simple-vs',
|
||||
'compare-swot': 'compare-swot',
|
||||
'quadrant-quarter': 'quadrant-quarter-simple-card',
|
||||
'statistic-card': 'list-grid-compact-card',
|
||||
'chart-bar': 'chart-bar-plain-text',
|
||||
'chart-column': 'chart-column-simple',
|
||||
'chart-line': 'chart-line-plain-text',
|
||||
'chart-area': 'chart-area-simple',
|
||||
'chart-pie': 'chart-pie-plain-text',
|
||||
'chart-doughnut': 'chart-pie-donut-plain-text'
|
||||
};
|
||||
"""
|
||||
|
||||
return f"""
|
||||
(async function() {{
|
||||
const uniqueId = "{unique_id}";
|
||||
const chatId = "{chat_id}";
|
||||
const messageId = "{message_id}";
|
||||
const svgWidth = {svg_width};
|
||||
const exportFormat = "{export_format}";
|
||||
|
||||
console.log("[信息图 Markdown] 开始渲染...");
|
||||
console.log("[信息图 Markdown] chatId:", chatId, "messageId:", messageId);
|
||||
|
||||
try {{
|
||||
// 加载 AntV Infographic(如果尚未加载)
|
||||
if (typeof AntVInfographic === 'undefined') {{
|
||||
console.log("[信息图 Markdown] 正在加载 AntV Infographic 库...");
|
||||
await new Promise((resolve, reject) => {{
|
||||
const script = document.createElement('script');
|
||||
script.src = 'https://unpkg.com/@antv/infographic@latest/dist/infographic.min.js';
|
||||
script.onload = resolve;
|
||||
script.onerror = reject;
|
||||
document.head.appendChild(script);
|
||||
}});
|
||||
console.log("[信息图 Markdown] 库加载完成。");
|
||||
}}
|
||||
|
||||
const {{ Infographic }} = AntVInfographic;
|
||||
|
||||
// 获取信息图语法
|
||||
let syntaxContent = `{syntax_escaped}`;
|
||||
console.log("[信息图 Markdown] 原始语法:", syntaxContent.substring(0, 200) + "...");
|
||||
|
||||
// 清理语法
|
||||
const backtick = String.fromCharCode(96);
|
||||
const prefix = backtick + backtick + backtick + 'infographic';
|
||||
const simplePrefix = backtick + backtick + backtick;
|
||||
|
||||
if (syntaxContent.toLowerCase().startsWith(prefix)) {{
|
||||
syntaxContent = syntaxContent.substring(prefix.length).trim();
|
||||
}} else if (syntaxContent.startsWith(simplePrefix)) {{
|
||||
syntaxContent = syntaxContent.substring(simplePrefix.length).trim();
|
||||
}}
|
||||
|
||||
if (syntaxContent.endsWith(simplePrefix)) {{
|
||||
syntaxContent = syntaxContent.substring(0, syntaxContent.length - simplePrefix.length).trim();
|
||||
}}
|
||||
|
||||
// 修复关键字后的冒号
|
||||
syntaxContent = syntaxContent.replace(/^(data|items|children|theme|config):/gm, '$1');
|
||||
syntaxContent = syntaxContent.replace(/(\\s)(children|items):/g, '$1$2');
|
||||
|
||||
// 确保有 infographic 前缀
|
||||
if (!syntaxContent.trim().toLowerCase().startsWith('infographic')) {{
|
||||
syntaxContent = 'infographic list-grid\\n' + syntaxContent;
|
||||
}}
|
||||
|
||||
// 应用模板映射
|
||||
{template_mapping_js}
|
||||
|
||||
for (const [key, value] of Object.entries(TEMPLATE_MAPPING)) {{
|
||||
const regex = new RegExp(`infographic\\\\s+${{key}}(?=\\\\s|$)`, 'i');
|
||||
if (regex.test(syntaxContent)) {{
|
||||
console.log(`[信息图 Markdown] 自动映射: ${{key}} -> ${{value}}`);
|
||||
syntaxContent = syntaxContent.replace(regex, `infographic ${{value}}`);
|
||||
break;
|
||||
}}
|
||||
}}
|
||||
|
||||
console.log("[信息图 Markdown] 清理后语法:", syntaxContent.substring(0, 200) + "...");
|
||||
|
||||
// 创建离屏容器
|
||||
const container = document.createElement('div');
|
||||
container.id = 'infographic-offscreen-' + uniqueId;
|
||||
container.style.cssText = 'position:absolute;left:-9999px;top:-9999px;width:' + svgWidth + 'px;';
|
||||
document.body.appendChild(container);
|
||||
|
||||
// 创建并渲染信息图
|
||||
const instance = new Infographic({{
|
||||
container: '#' + container.id,
|
||||
width: svgWidth,
|
||||
padding: 24,
|
||||
}});
|
||||
|
||||
console.log("[信息图 Markdown] 正在渲染信息图...");
|
||||
instance.render(syntaxContent);
|
||||
|
||||
// 等待渲染完成并导出
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
let dataUrl;
|
||||
if (exportFormat === 'png') {{
|
||||
dataUrl = await instance.toDataURL({{ type: 'png', dpr: 2 }});
|
||||
}} else {{
|
||||
dataUrl = await instance.toDataURL({{ type: 'svg', embedResources: true }});
|
||||
}}
|
||||
|
||||
console.log("[信息图 Markdown] Data URL 已生成,长度:", dataUrl.length);
|
||||
|
||||
// 清理
|
||||
instance.destroy();
|
||||
document.body.removeChild(container);
|
||||
|
||||
// 生成 Markdown 图片
|
||||
const markdownImage = ``;
|
||||
|
||||
// 通过 API 更新消息
|
||||
if (chatId && messageId) {{
|
||||
const token = localStorage.getItem("token");
|
||||
|
||||
// 获取当前消息内容
|
||||
const getResponse = await fetch(`/api/v1/chats/${{chatId}}`, {{
|
||||
method: "GET",
|
||||
headers: {{ "Authorization": `Bearer ${{token}}` }}
|
||||
}});
|
||||
|
||||
if (!getResponse.ok) {{
|
||||
throw new Error("获取对话数据失败: " + getResponse.status);
|
||||
}}
|
||||
|
||||
const chatData = await getResponse.json();
|
||||
let originalContent = "";
|
||||
|
||||
if (chatData.chat && chatData.chat.messages) {{
|
||||
const targetMsg = chatData.chat.messages.find(m => m.id === messageId);
|
||||
if (targetMsg && targetMsg.content) {{
|
||||
originalContent = targetMsg.content;
|
||||
}}
|
||||
}}
|
||||
|
||||
// 移除已有的信息图图片
|
||||
const infographicPattern = /\\n*!\\[📊[^\\]]*\\]\\(data:image\\/[^)]+\\)/g;
|
||||
let cleanedContent = originalContent.replace(infographicPattern, "");
|
||||
cleanedContent = cleanedContent.replace(/\\n{{3,}}/g, "\\n\\n").trim();
|
||||
|
||||
// 追加新图片
|
||||
const newContent = cleanedContent + "\\n\\n" + markdownImage;
|
||||
|
||||
// 更新消息
|
||||
const updateResponse = await fetch(`/api/v1/chats/${{chatId}}/messages/${{messageId}}/event`, {{
|
||||
method: "POST",
|
||||
headers: {{
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${{token}}`
|
||||
}},
|
||||
body: JSON.stringify({{
|
||||
type: "chat:message",
|
||||
data: {{ content: newContent }}
|
||||
}})
|
||||
}});
|
||||
|
||||
if (updateResponse.ok) {{
|
||||
console.log("[信息图 Markdown] ✅ 消息更新成功!");
|
||||
}} else {{
|
||||
console.error("[信息图 Markdown] API 错误:", updateResponse.status);
|
||||
}}
|
||||
}} else {{
|
||||
console.warn("[信息图 Markdown] ⚠️ 缺少 chatId 或 messageId");
|
||||
}}
|
||||
|
||||
}} catch (error) {{
|
||||
console.error("[信息图 Markdown] 错误:", error);
|
||||
}}
|
||||
}})();
|
||||
"""
|
||||
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: dict = None,
|
||||
__event_emitter__=None,
|
||||
__event_call__: Optional[Callable[[Any], Awaitable[None]]] = None,
|
||||
__metadata__: Optional[dict] = None,
|
||||
__request__: Request = None,
|
||||
) -> dict:
|
||||
"""
|
||||
使用 AntV 生成信息图并作为 Markdown 图片嵌入。
|
||||
"""
|
||||
logger.info("动作:信息图转 Markdown 开始")
|
||||
|
||||
# 获取用户信息
|
||||
if isinstance(__user__, (list, tuple)):
|
||||
user_language = __user__[0].get("language", "zh") if __user__ else "zh"
|
||||
user_name = __user__[0].get("name", "用户") if __user__[0] else "用户"
|
||||
user_id = __user__[0].get("id", "unknown_user") if __user__ else "unknown_user"
|
||||
elif isinstance(__user__, dict):
|
||||
user_language = __user__.get("language", "zh")
|
||||
user_name = __user__.get("name", "用户")
|
||||
user_id = __user__.get("id", "unknown_user")
|
||||
else:
|
||||
user_language = "zh"
|
||||
user_name = "用户"
|
||||
user_id = "unknown_user"
|
||||
|
||||
# 获取当前时间
|
||||
now = datetime.now()
|
||||
current_date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
try:
|
||||
messages = body.get("messages", [])
|
||||
if not messages:
|
||||
raise ValueError("没有可用的消息。")
|
||||
|
||||
# 获取最近的消息
|
||||
message_count = min(self.valves.MESSAGE_COUNT, len(messages))
|
||||
recent_messages = messages[-message_count:]
|
||||
|
||||
# 聚合内容
|
||||
aggregated_parts = []
|
||||
for msg in recent_messages:
|
||||
text_content = self._extract_text_content(msg.get("content"))
|
||||
if text_content:
|
||||
aggregated_parts.append(text_content)
|
||||
|
||||
if not aggregated_parts:
|
||||
raise ValueError("消息中未找到文本内容。")
|
||||
|
||||
long_text_content = "\n\n---\n\n".join(aggregated_parts)
|
||||
|
||||
# 移除已有的 HTML 块
|
||||
parts = re.split(r"```html.*?```", long_text_content, flags=re.DOTALL)
|
||||
clean_content = ""
|
||||
for part in reversed(parts):
|
||||
if part.strip():
|
||||
clean_content = part.strip()
|
||||
break
|
||||
|
||||
if not clean_content:
|
||||
clean_content = long_text_content.strip()
|
||||
|
||||
# 检查最小长度
|
||||
if len(clean_content) < self.valves.MIN_TEXT_LENGTH:
|
||||
await self._emit_status(
|
||||
__event_emitter__,
|
||||
f"⚠️ 内容太短({len(clean_content)} 字符),至少需要 {self.valves.MIN_TEXT_LENGTH} 字符",
|
||||
True,
|
||||
)
|
||||
return body
|
||||
|
||||
await self._emit_status(__event_emitter__, "📊 正在分析内容...", False)
|
||||
|
||||
# 通过 LLM 生成信息图语法
|
||||
formatted_user_prompt = USER_PROMPT_GENERATE.format(
|
||||
user_name=user_name,
|
||||
current_date_time_str=current_date_time_str,
|
||||
user_language=user_language,
|
||||
long_text_content=clean_content,
|
||||
)
|
||||
|
||||
target_model = self.valves.MODEL_ID or body.get("model")
|
||||
|
||||
llm_payload = {
|
||||
"model": target_model,
|
||||
"messages": [
|
||||
{"role": "system", "content": SYSTEM_PROMPT_INFOGRAPHIC},
|
||||
{"role": "user", "content": formatted_user_prompt},
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
user_obj = Users.get_user_by_id(user_id)
|
||||
if not user_obj:
|
||||
raise ValueError(f"无法获取用户对象:{user_id}")
|
||||
|
||||
await self._emit_status(__event_emitter__, "📊 AI 正在生成信息图语法...", False)
|
||||
|
||||
llm_response = await generate_chat_completion(__request__, llm_payload, user_obj)
|
||||
|
||||
if not llm_response or "choices" not in llm_response or not llm_response["choices"]:
|
||||
raise ValueError("无效的 LLM 响应。")
|
||||
|
||||
assistant_content = llm_response["choices"][0]["message"]["content"]
|
||||
infographic_syntax = self._extract_infographic_syntax(assistant_content)
|
||||
|
||||
logger.info(f"生成的语法:{infographic_syntax[:200]}...")
|
||||
|
||||
# 提取 API 回调所需的 ID
|
||||
chat_id = self._extract_chat_id(body, __metadata__)
|
||||
message_id = self._extract_message_id(body, __metadata__)
|
||||
unique_id = f"ig_{int(time.time() * 1000)}"
|
||||
|
||||
await self._emit_status(__event_emitter__, "📊 正在渲染 SVG...", False)
|
||||
|
||||
# 执行 JS 进行渲染和嵌入
|
||||
if __event_call__:
|
||||
js_code = self._generate_js_code(
|
||||
unique_id=unique_id,
|
||||
chat_id=chat_id,
|
||||
message_id=message_id,
|
||||
infographic_syntax=infographic_syntax,
|
||||
svg_width=self.valves.SVG_WIDTH,
|
||||
export_format=self.valves.EXPORT_FORMAT,
|
||||
)
|
||||
|
||||
await __event_call__(
|
||||
{
|
||||
"type": "execute",
|
||||
"data": {"code": js_code},
|
||||
}
|
||||
)
|
||||
|
||||
await self._emit_status(__event_emitter__, "✅ 信息图生成完成!", True)
|
||||
logger.info("信息图转 Markdown 完成")
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"信息图生成失败:{str(e)}"
|
||||
logger.error(error_message, exc_info=True)
|
||||
await self._emit_status(__event_emitter__, f"❌ {error_message}", True)
|
||||
|
||||
return body
|
||||
@@ -1,257 +0,0 @@
|
||||
"""
|
||||
title: JS Render PoC
|
||||
author: Fu-Jie
|
||||
version: 0.6.0
|
||||
description: Proof of concept for JS rendering + API write-back pattern. JS renders SVG and updates message via API.
|
||||
"""
|
||||
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Callable, Awaitable, Any
|
||||
from pydantic import BaseModel, Field
|
||||
from fastapi import Request
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Action:
|
||||
class Valves(BaseModel):
|
||||
pass
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _extract_chat_id(self, body: dict, metadata: Optional[dict]) -> str:
|
||||
"""Extract chat_id from body or metadata"""
|
||||
if isinstance(body, dict):
|
||||
# body["chat_id"] 是 chat_id
|
||||
chat_id = body.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
body_metadata = body.get("metadata", {})
|
||||
if isinstance(body_metadata, dict):
|
||||
chat_id = body_metadata.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
if isinstance(metadata, dict):
|
||||
chat_id = metadata.get("chat_id")
|
||||
if isinstance(chat_id, str) and chat_id.strip():
|
||||
return chat_id.strip()
|
||||
|
||||
return ""
|
||||
|
||||
def _extract_message_id(self, body: dict, metadata: Optional[dict]) -> str:
|
||||
"""Extract message_id from body or metadata"""
|
||||
if isinstance(body, dict):
|
||||
# body["id"] 是 message_id
|
||||
message_id = body.get("id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
body_metadata = body.get("metadata", {})
|
||||
if isinstance(body_metadata, dict):
|
||||
message_id = body_metadata.get("message_id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
if isinstance(metadata, dict):
|
||||
message_id = metadata.get("message_id")
|
||||
if isinstance(message_id, str) and message_id.strip():
|
||||
return message_id.strip()
|
||||
|
||||
return ""
|
||||
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: dict = None,
|
||||
__event_emitter__=None,
|
||||
__event_call__: Optional[Callable[[Any], Awaitable[None]]] = None,
|
||||
__metadata__: Optional[dict] = None,
|
||||
__request__: Request = None,
|
||||
) -> dict:
|
||||
"""
|
||||
PoC: Use __event_call__ to execute JS that renders SVG and updates message via API.
|
||||
"""
|
||||
# 准备调试数据
|
||||
body_for_log = {}
|
||||
for k, v in body.items():
|
||||
if k == "messages":
|
||||
body_for_log[k] = f"[{len(v)} messages]"
|
||||
else:
|
||||
body_for_log[k] = v
|
||||
|
||||
body_json = json.dumps(body_for_log, ensure_ascii=False, default=str)
|
||||
metadata_json = (
|
||||
json.dumps(__metadata__, ensure_ascii=False, default=str)
|
||||
if __metadata__
|
||||
else "null"
|
||||
)
|
||||
|
||||
# 转义 JSON 中的特殊字符以便嵌入 JS
|
||||
body_json_escaped = (
|
||||
body_json.replace("\\", "\\\\").replace("`", "\\`").replace("${", "\\${")
|
||||
)
|
||||
metadata_json_escaped = (
|
||||
metadata_json.replace("\\", "\\\\")
|
||||
.replace("`", "\\`")
|
||||
.replace("${", "\\${")
|
||||
)
|
||||
|
||||
chat_id = self._extract_chat_id(body, __metadata__)
|
||||
message_id = self._extract_message_id(body, __metadata__)
|
||||
|
||||
unique_id = f"poc_{int(time.time() * 1000)}"
|
||||
|
||||
if __event_emitter__:
|
||||
await __event_emitter__(
|
||||
{
|
||||
"type": "status",
|
||||
"data": {"description": "🔄 正在渲染...", "done": False},
|
||||
}
|
||||
)
|
||||
|
||||
if __event_call__:
|
||||
await __event_call__(
|
||||
{
|
||||
"type": "execute",
|
||||
"data": {
|
||||
"code": f"""
|
||||
(async function() {{
|
||||
const uniqueId = "{unique_id}";
|
||||
const chatId = "{chat_id}";
|
||||
const messageId = "{message_id}";
|
||||
|
||||
// ===== DEBUG: 输出 Python 端的数据 =====
|
||||
console.log("[JS Render PoC] ===== DEBUG INFO (from Python) =====");
|
||||
console.log("[JS Render PoC] body:", `{body_json_escaped}`);
|
||||
console.log("[JS Render PoC] __metadata__:", `{metadata_json_escaped}`);
|
||||
console.log("[JS Render PoC] Extracted: chatId=", chatId, "messageId=", messageId);
|
||||
console.log("[JS Render PoC] =========================================");
|
||||
|
||||
try {{
|
||||
console.log("[JS Render PoC] Starting SVG render...");
|
||||
|
||||
// Create SVG
|
||||
const svg = document.createElementNS("http://www.w3.org/2000/svg", "svg");
|
||||
svg.setAttribute("width", "200");
|
||||
svg.setAttribute("height", "200");
|
||||
svg.setAttribute("viewBox", "0 0 200 200");
|
||||
svg.setAttribute("xmlns", "http://www.w3.org/2000/svg");
|
||||
|
||||
const defs = document.createElementNS("http://www.w3.org/2000/svg", "defs");
|
||||
const gradient = document.createElementNS("http://www.w3.org/2000/svg", "linearGradient");
|
||||
gradient.setAttribute("id", "grad-" + uniqueId);
|
||||
gradient.innerHTML = `
|
||||
<stop offset="0%" style="stop-color:#1e88e5;stop-opacity:1" />
|
||||
<stop offset="100%" style="stop-color:#43a047;stop-opacity:1" />
|
||||
`;
|
||||
defs.appendChild(gradient);
|
||||
svg.appendChild(defs);
|
||||
|
||||
const circle = document.createElementNS("http://www.w3.org/2000/svg", "circle");
|
||||
circle.setAttribute("cx", "100");
|
||||
circle.setAttribute("cy", "100");
|
||||
circle.setAttribute("r", "80");
|
||||
circle.setAttribute("fill", `url(#grad-${{uniqueId}})`);
|
||||
svg.appendChild(circle);
|
||||
|
||||
const text = document.createElementNS("http://www.w3.org/2000/svg", "text");
|
||||
text.setAttribute("x", "100");
|
||||
text.setAttribute("y", "105");
|
||||
text.setAttribute("text-anchor", "middle");
|
||||
text.setAttribute("fill", "white");
|
||||
text.setAttribute("font-size", "16");
|
||||
text.setAttribute("font-weight", "bold");
|
||||
text.textContent = "PoC Success!";
|
||||
svg.appendChild(text);
|
||||
|
||||
// Convert to Base64 Data URI
|
||||
const svgData = new XMLSerializer().serializeToString(svg);
|
||||
const base64 = btoa(unescape(encodeURIComponent(svgData)));
|
||||
const dataUri = "data:image/svg+xml;base64," + base64;
|
||||
|
||||
console.log("[JS Render PoC] SVG rendered, data URI length:", dataUri.length);
|
||||
|
||||
// Call API - 完全替换方案(更稳定)
|
||||
if (chatId && messageId) {{
|
||||
const token = localStorage.getItem("token");
|
||||
|
||||
// 1. 获取当前消息内容
|
||||
const getResponse = await fetch(`/api/v1/chats/${{chatId}}`, {{
|
||||
method: "GET",
|
||||
headers: {{ "Authorization": `Bearer ${{token}}` }}
|
||||
}});
|
||||
|
||||
if (!getResponse.ok) {{
|
||||
throw new Error("Failed to get chat data: " + getResponse.status);
|
||||
}}
|
||||
|
||||
const chatData = await getResponse.json();
|
||||
console.log("[JS Render PoC] Got chat data");
|
||||
|
||||
let originalContent = "";
|
||||
if (chatData.chat && chatData.chat.messages) {{
|
||||
const targetMsg = chatData.chat.messages.find(m => m.id === messageId);
|
||||
if (targetMsg && targetMsg.content) {{
|
||||
originalContent = targetMsg.content;
|
||||
console.log("[JS Render PoC] Found original content, length:", originalContent.length);
|
||||
}}
|
||||
}}
|
||||
|
||||
// 2. 移除已存在的 PoC 图片(如果有的话)
|
||||
// 匹配  格式
|
||||
const pocImagePattern = /\\n*!\\[JS Render PoC[^\\]]*\\]\\(data:image\\/svg\\+xml;base64,[^)]+\\)/g;
|
||||
let cleanedContent = originalContent.replace(pocImagePattern, "");
|
||||
// 移除可能残留的多余空行
|
||||
cleanedContent = cleanedContent.replace(/\\n{{3,}}/g, "\\n\\n").trim();
|
||||
|
||||
if (cleanedContent !== originalContent) {{
|
||||
console.log("[JS Render PoC] Removed existing PoC image(s)");
|
||||
}}
|
||||
|
||||
// 3. 添加新的 Markdown 图片
|
||||
const markdownImage = ``;
|
||||
const newContent = cleanedContent + "\\n\\n" + markdownImage;
|
||||
|
||||
// 3. 使用 chat:message 完全替换
|
||||
const updateResponse = await fetch(`/api/v1/chats/${{chatId}}/messages/${{messageId}}/event`, {{
|
||||
method: "POST",
|
||||
headers: {{
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${{token}}`
|
||||
}},
|
||||
body: JSON.stringify({{
|
||||
type: "chat:message",
|
||||
data: {{ content: newContent }}
|
||||
}})
|
||||
}});
|
||||
|
||||
if (updateResponse.ok) {{
|
||||
console.log("[JS Render PoC] ✅ Message updated successfully!");
|
||||
}} else {{
|
||||
console.error("[JS Render PoC] API error:", updateResponse.status, await updateResponse.text());
|
||||
}}
|
||||
}} else {{
|
||||
console.warn("[JS Render PoC] ⚠️ Missing chatId or messageId, cannot persist.");
|
||||
}}
|
||||
|
||||
}} catch (error) {{
|
||||
console.error("[JS Render PoC] Error:", error);
|
||||
}}
|
||||
}})();
|
||||
"""
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
if __event_emitter__:
|
||||
await __event_emitter__(
|
||||
{"type": "status", "data": {"description": "✅ 渲染完成", "done": True}}
|
||||
)
|
||||
|
||||
return body
|
||||
BIN
plugins/actions/smart-mind-map/smart_mind_map.png
Normal file
|
After Width: | Height: | Size: 752 KiB |
BIN
plugins/actions/smart-mind-map/smart_mind_map_cn.png
Normal file
|
After Width: | Height: | Size: 216 KiB |
@@ -1,30 +0,0 @@
|
||||
# Deep Reading & Summary
|
||||
|
||||
A powerful tool for analyzing long texts, generating detailed summaries, key points, and actionable insights.
|
||||
|
||||
## Features
|
||||
|
||||
- **Deep Analysis**: Goes beyond simple summarization to understand the core message.
|
||||
- **Key Point Extraction**: Identifies and lists the most important information.
|
||||
- **Actionable Advice**: Provides practical suggestions based on the text content.
|
||||
|
||||
## Usage
|
||||
|
||||
1. Install the plugin.
|
||||
2. Send a long text or article to the chat.
|
||||
3. Click the "Deep Reading" button (or trigger via command).
|
||||
|
||||
## Author
|
||||
|
||||
Fu-Jie
|
||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||
|
||||
## License
|
||||
|
||||
MIT License
|
||||
|
||||
## Changelog
|
||||
|
||||
### v0.1.2
|
||||
|
||||
- Removed debug messages from output
|
||||
@@ -1,30 +0,0 @@
|
||||
# 深度阅读与摘要 (Deep Reading & Summary)
|
||||
|
||||
一个强大的长文本分析工具,用于生成详细摘要、关键信息点和可执行的行动建议。
|
||||
|
||||
## 功能特点
|
||||
|
||||
- **深度分析**:超越简单的总结,深入理解核心信息。
|
||||
- **关键点提取**:识别并列出最重要的信息点。
|
||||
- **行动建议**:基于文本内容提供切实可行的建议。
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 安装插件。
|
||||
2. 发送长文本或文章到聊天框。
|
||||
3. 点击“精读”按钮(或通过命令触发)。
|
||||
|
||||
## 作者
|
||||
|
||||
Fu-Jie
|
||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||
|
||||
## 许可证
|
||||
|
||||
MIT License
|
||||
|
||||
## 更新日志
|
||||
|
||||
### v0.1.2
|
||||
|
||||
- 移除输出中的调试信息
|
||||
@@ -1,674 +0,0 @@
|
||||
"""
|
||||
title: Deep Reading & Summary
|
||||
author: Fu-Jie
|
||||
author_url: https://github.com/Fu-Jie
|
||||
funding_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||
version: 0.1.2
|
||||
icon_url: data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9ImN1cnJlbnRDb2xvciIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiPjxwYXRoIGQ9Ik0xNSAxMmgtNSIvPjxwYXRoIGQ9Ik0xNSA4aC01Ii8+PHBhdGggZD0iTTE5IDE3VjVhMiAyIDAgMCAwLTItMkg0Ii8+PHBhdGggZD0iTTggMjFoMTJhMiAyIDAgMCAwIDItMnYtMWExIDEgMCAwIDAtMS0xSDExYTEgMSAwIDAgMC0xIDF2MWEyIDIgMCAxIDEtNCAwVjVhMiAyIDAgMSAwLTQgMHYyYTEgMSAwIDAgMCAxIDFoMyIvPjwvc3ZnPg==
|
||||
description: Provides deep reading analysis and summarization for long texts.
|
||||
requirements: jinja2, markdown
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, Dict, Any
|
||||
import logging
|
||||
import re
|
||||
from fastapi import Request
|
||||
from datetime import datetime
|
||||
import pytz
|
||||
import markdown
|
||||
from jinja2 import Template
|
||||
|
||||
from open_webui.utils.chat import generate_chat_completion
|
||||
from open_webui.models.users import Users
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =================================================================
|
||||
# HTML Wrapper Template (supports multiple plugins and grid layout)
|
||||
# =================================================================
|
||||
HTML_WRAPPER_TEMPLATE = """
|
||||
<!-- OPENWEBUI_PLUGIN_OUTPUT -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="{user_language}">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 10px;
|
||||
background-color: transparent;
|
||||
}
|
||||
#main-container {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 20px;
|
||||
align-items: flex-start;
|
||||
width: 100%;
|
||||
}
|
||||
.plugin-item {
|
||||
flex: 1 1 400px; /* Default width, allows shrinking/growing */
|
||||
min-width: 300px;
|
||||
background: white;
|
||||
border-radius: 12px;
|
||||
box-shadow: 0 4px 6px rgba(0,0,0,0.05);
|
||||
overflow: hidden;
|
||||
border: 1px solid #e5e7eb;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
.plugin-item:hover {
|
||||
box-shadow: 0 10px 15px rgba(0,0,0,0.1);
|
||||
}
|
||||
@media (max-width: 768px) {
|
||||
.plugin-item { flex: 1 1 100%; }
|
||||
}
|
||||
/* STYLES_INSERTION_POINT */
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="main-container">
|
||||
<!-- CONTENT_INSERTION_POINT -->
|
||||
</div>
|
||||
<!-- SCRIPTS_INSERTION_POINT -->
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# Internal LLM Prompts
|
||||
# =================================================================
|
||||
|
||||
SYSTEM_PROMPT_READING_ASSISTANT = """
|
||||
You are a professional Deep Text Analysis Expert, specializing in reading long texts and extracting the essence. Your task is to conduct a comprehensive and in-depth analysis.
|
||||
|
||||
Please provide the following:
|
||||
1. **Detailed Summary**: Summarize the core content of the text in 2-3 paragraphs, ensuring accuracy and completeness. Do not be too brief; ensure the reader fully understands the main idea.
|
||||
2. **Key Information Points**: List 5-8 most important facts, viewpoints, or arguments. Each point should:
|
||||
- Be specific and insightful
|
||||
- Include necessary details and context
|
||||
- Use Markdown list format
|
||||
3. **Actionable Advice**: Identify and refine specific, actionable items from the text. Each suggestion should:
|
||||
- Be clear and actionable
|
||||
- Include execution priority or timing suggestions
|
||||
- If there are no clear action items, provide learning suggestions or thinking directions
|
||||
|
||||
Please strictly follow these guidelines:
|
||||
- **Language**: All output must be in the user's specified language.
|
||||
- **Format**: Please strictly follow the Markdown format below, ensuring each section has a clear header:
|
||||
## Summary
|
||||
[Detailed summary content here, 2-3 paragraphs, use Markdown **bold** or *italic* to emphasize key points]
|
||||
|
||||
## Key Information Points
|
||||
- [Key Point 1: Include specific details and context]
|
||||
- [Key Point 2: Include specific details and context]
|
||||
- [Key Point 3: Include specific details and context]
|
||||
- [At least 5, at most 8 key points]
|
||||
|
||||
## Actionable Advice
|
||||
- [Action Item 1: Specific, actionable, include priority]
|
||||
- [Action Item 2: Specific, actionable, include priority]
|
||||
- [If no clear action items, provide learning suggestions or thinking directions]
|
||||
- **Depth First**: Analysis should be deep and comprehensive, not superficial.
|
||||
- **Action Oriented**: Focus on actionable suggestions and next steps.
|
||||
- **Analysis Results Only**: Do not include any extra pleasantries, explanations, or leading text.
|
||||
"""
|
||||
|
||||
USER_PROMPT_GENERATE_SUMMARY = """
|
||||
Please conduct a deep analysis of the following long text, providing:
|
||||
1. Detailed Summary (2-3 paragraphs, comprehensive overview)
|
||||
2. Key Information Points List (5-8 items, including specific details)
|
||||
3. Actionable Advice (Specific, clear, including priority)
|
||||
|
||||
---
|
||||
**User Context:**
|
||||
User Name: {user_name}
|
||||
Current Date/Time: {current_date_time_str}
|
||||
Weekday: {current_weekday}
|
||||
Timezone: {current_timezone_str}
|
||||
User Language: {user_language}
|
||||
---
|
||||
|
||||
**Long Text Content:**
|
||||
```
|
||||
{long_text_content}
|
||||
```
|
||||
|
||||
Please conduct a deep and comprehensive analysis, focusing on actionable advice.
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# Frontend HTML Template (Jinja2 Syntax)
|
||||
# =================================================================
|
||||
|
||||
CSS_TEMPLATE_SUMMARY = """
|
||||
:root {
|
||||
--primary-color: #4285f4;
|
||||
--secondary-color: #1e88e5;
|
||||
--action-color: #34a853;
|
||||
--background-color: #f8f9fa;
|
||||
--card-bg-color: #ffffff;
|
||||
--text-color: #202124;
|
||||
--muted-text-color: #5f6368;
|
||||
--border-color: #dadce0;
|
||||
--header-gradient: linear-gradient(135deg, #4285f4, #1e88e5);
|
||||
--shadow: 0 1px 3px rgba(60,64,67,.3);
|
||||
--border-radius: 8px;
|
||||
--font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
||||
}
|
||||
.summary-container-wrapper {
|
||||
font-family: var(--font-family);
|
||||
line-height: 1.8;
|
||||
color: var(--text-color);
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.summary-container-wrapper .header {
|
||||
background: var(--header-gradient);
|
||||
color: white;
|
||||
padding: 20px 24px;
|
||||
text-align: center;
|
||||
}
|
||||
.summary-container-wrapper .header h1 {
|
||||
margin: 0;
|
||||
font-size: 1.5em;
|
||||
font-weight: 500;
|
||||
letter-spacing: -0.5px;
|
||||
}
|
||||
.summary-container-wrapper .user-context {
|
||||
font-size: 0.8em;
|
||||
color: var(--muted-text-color);
|
||||
background-color: #f1f3f4;
|
||||
padding: 8px 16px;
|
||||
display: flex;
|
||||
justify-content: space-around;
|
||||
flex-wrap: wrap;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
}
|
||||
.summary-container-wrapper .user-context span { margin: 2px 8px; }
|
||||
.summary-container-wrapper .content { padding: 20px; flex-grow: 1; }
|
||||
.summary-container-wrapper .section {
|
||||
margin-bottom: 16px;
|
||||
padding-bottom: 16px;
|
||||
border-bottom: 1px solid #e8eaed;
|
||||
}
|
||||
.summary-container-wrapper .section:last-child {
|
||||
border-bottom: none;
|
||||
margin-bottom: 0;
|
||||
padding-bottom: 0;
|
||||
}
|
||||
.summary-container-wrapper .section h2 {
|
||||
margin-top: 0;
|
||||
margin-bottom: 12px;
|
||||
font-size: 1.2em;
|
||||
font-weight: 500;
|
||||
color: var(--text-color);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding-bottom: 8px;
|
||||
border-bottom: 2px solid var(--primary-color);
|
||||
}
|
||||
.summary-container-wrapper .section h2 .icon {
|
||||
margin-right: 8px;
|
||||
font-size: 1.1em;
|
||||
line-height: 1;
|
||||
}
|
||||
.summary-container-wrapper .summary-section h2 { border-bottom-color: var(--primary-color); }
|
||||
.summary-container-wrapper .keypoints-section h2 { border-bottom-color: var(--secondary-color); }
|
||||
.summary-container-wrapper .actions-section h2 { border-bottom-color: var(--action-color); }
|
||||
.summary-container-wrapper .html-content {
|
||||
font-size: 0.95em;
|
||||
line-height: 1.7;
|
||||
}
|
||||
.summary-container-wrapper .html-content p:first-child { margin-top: 0; }
|
||||
.summary-container-wrapper .html-content p:last-child { margin-bottom: 0; }
|
||||
.summary-container-wrapper .html-content ul {
|
||||
list-style: none;
|
||||
padding-left: 0;
|
||||
margin: 12px 0;
|
||||
}
|
||||
.summary-container-wrapper .html-content li {
|
||||
padding: 8px 0 8px 24px;
|
||||
position: relative;
|
||||
margin-bottom: 6px;
|
||||
line-height: 1.6;
|
||||
}
|
||||
.summary-container-wrapper .html-content li::before {
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 8px;
|
||||
font-family: 'Arial';
|
||||
font-weight: bold;
|
||||
font-size: 1em;
|
||||
}
|
||||
.summary-container-wrapper .keypoints-section .html-content li::before {
|
||||
content: '•';
|
||||
color: var(--secondary-color);
|
||||
font-size: 1.3em;
|
||||
top: 5px;
|
||||
}
|
||||
.summary-container-wrapper .actions-section .html-content li::before {
|
||||
content: '▸';
|
||||
color: var(--action-color);
|
||||
}
|
||||
.summary-container-wrapper .no-content {
|
||||
color: var(--muted-text-color);
|
||||
font-style: italic;
|
||||
padding: 12px;
|
||||
background: #f8f9fa;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.summary-container-wrapper .footer {
|
||||
text-align: center;
|
||||
padding: 16px;
|
||||
font-size: 0.8em;
|
||||
color: #5f6368;
|
||||
background-color: #f8f9fa;
|
||||
border-top: 1px solid var(--border-color);
|
||||
}
|
||||
"""
|
||||
|
||||
CONTENT_TEMPLATE_SUMMARY = """
|
||||
<div class="summary-container-wrapper">
|
||||
<div class="header">
|
||||
<h1>📖 Deep Reading: Analysis Report</h1>
|
||||
</div>
|
||||
<div class="user-context">
|
||||
<span><strong>User:</strong> {user_name}</span>
|
||||
<span><strong>Time:</strong> {current_date_time_str}</span>
|
||||
</div>
|
||||
<div class="content">
|
||||
<div class="section summary-section">
|
||||
<h2><span class="icon">📝</span>Detailed Summary</h2>
|
||||
<div class="html-content">{summary_html}</div>
|
||||
</div>
|
||||
<div class="section keypoints-section">
|
||||
<h2><span class="icon">💡</span>Key Information Points</h2>
|
||||
<div class="html-content">{keypoints_html}</div>
|
||||
</div>
|
||||
<div class="section actions-section">
|
||||
<h2><span class="icon">🎯</span>Actionable Advice</h2>
|
||||
<div class="html-content">{actions_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="footer">
|
||||
<p>© {current_year} Deep Reading - Text Analysis Service</p>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
class Action:
|
||||
class Valves(BaseModel):
|
||||
SHOW_STATUS: bool = Field(
|
||||
default=True,
|
||||
description="Whether to show operation status updates in the chat interface.",
|
||||
)
|
||||
MODEL_ID: str = Field(
|
||||
default="",
|
||||
description="Built-in LLM Model ID used for text analysis. If empty, uses the current conversation's model.",
|
||||
)
|
||||
MIN_TEXT_LENGTH: int = Field(
|
||||
default=200,
|
||||
description="Minimum text length required for deep analysis (characters). Recommended 200+.",
|
||||
)
|
||||
RECOMMENDED_MIN_LENGTH: int = Field(
|
||||
default=500,
|
||||
description="Recommended minimum text length for best analysis results.",
|
||||
)
|
||||
CLEAR_PREVIOUS_HTML: bool = Field(
|
||||
default=False,
|
||||
description="Whether to force clear previous plugin results (if True, overwrites instead of merging).",
|
||||
)
|
||||
MESSAGE_COUNT: int = Field(
|
||||
default=1,
|
||||
description="Number of recent messages to use for generation. Set to 1 for just the last message, or higher for more context.",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _process_llm_output(self, llm_output: str) -> Dict[str, str]:
|
||||
"""
|
||||
Parse LLM Markdown output and convert to HTML fragments.
|
||||
"""
|
||||
summary_match = re.search(
|
||||
r"##\s*Summary\s*\n(.*?)(?=\n##|$)", llm_output, re.DOTALL | re.IGNORECASE
|
||||
)
|
||||
keypoints_match = re.search(
|
||||
r"##\s*Key Information Points\s*\n(.*?)(?=\n##|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
actions_match = re.search(
|
||||
r"##\s*Actionable Advice\s*\n(.*?)(?=\n##|$)",
|
||||
llm_output,
|
||||
re.DOTALL | re.IGNORECASE,
|
||||
)
|
||||
|
||||
summary_md = summary_match.group(1).strip() if summary_match else ""
|
||||
keypoints_md = keypoints_match.group(1).strip() if keypoints_match else ""
|
||||
actions_md = actions_match.group(1).strip() if actions_match else ""
|
||||
|
||||
if not any([summary_md, keypoints_md, actions_md]):
|
||||
summary_md = llm_output.strip()
|
||||
logger.warning(
|
||||
"LLM output did not follow expected Markdown format. Treating entire output as summary."
|
||||
)
|
||||
|
||||
# Use 'nl2br' extension to convert newlines \n to <br>
|
||||
md_extensions = ["nl2br"]
|
||||
summary_html = (
|
||||
markdown.markdown(summary_md, extensions=md_extensions)
|
||||
if summary_md
|
||||
else '<p class="no-content">Failed to extract summary.</p>'
|
||||
)
|
||||
keypoints_html = (
|
||||
markdown.markdown(keypoints_md, extensions=md_extensions)
|
||||
if keypoints_md
|
||||
else '<p class="no-content">Failed to extract key information points.</p>'
|
||||
)
|
||||
actions_html = (
|
||||
markdown.markdown(actions_md, extensions=md_extensions)
|
||||
if actions_md
|
||||
else '<p class="no-content">No explicit actionable advice.</p>'
|
||||
)
|
||||
|
||||
return {
|
||||
"summary_html": summary_html,
|
||||
"keypoints_html": keypoints_html,
|
||||
"actions_html": actions_html,
|
||||
}
|
||||
|
||||
async def _emit_status(self, emitter, description: str, done: bool = False):
|
||||
"""Emits a status update event."""
|
||||
if self.valves.SHOW_STATUS and emitter:
|
||||
await emitter(
|
||||
{"type": "status", "data": {"description": description, "done": done}}
|
||||
)
|
||||
|
||||
async def _emit_notification(self, emitter, content: str, ntype: str = "info"):
|
||||
"""Emits a notification event (info/success/warning/error)."""
|
||||
if emitter:
|
||||
await emitter(
|
||||
{"type": "notification", "data": {"type": ntype, "content": content}}
|
||||
)
|
||||
|
||||
def _remove_existing_html(self, content: str) -> str:
|
||||
"""Removes existing plugin-generated HTML code blocks from the content."""
|
||||
pattern = r"```html\s*<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?```"
|
||||
return re.sub(pattern, "", content).strip()
|
||||
|
||||
def _extract_text_content(self, content) -> str:
|
||||
"""Extract text from message content, supporting multimodal message formats"""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
# Multimodal message: [{"type": "text", "text": "..."}, {"type": "image_url", ...}]
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "text":
|
||||
text_parts.append(item.get("text", ""))
|
||||
elif isinstance(item, str):
|
||||
text_parts.append(item)
|
||||
return "\n".join(text_parts)
|
||||
return str(content) if content else ""
|
||||
|
||||
def _merge_html(
|
||||
self,
|
||||
existing_html_code: str,
|
||||
new_content: str,
|
||||
new_styles: str = "",
|
||||
new_scripts: str = "",
|
||||
user_language: str = "en-US",
|
||||
) -> str:
|
||||
"""
|
||||
Merges new content into an existing HTML container, or creates a new one.
|
||||
"""
|
||||
if (
|
||||
"<!-- OPENWEBUI_PLUGIN_OUTPUT -->" in existing_html_code
|
||||
and "<!-- CONTENT_INSERTION_POINT -->" in existing_html_code
|
||||
):
|
||||
base_html = existing_html_code
|
||||
base_html = re.sub(r"^```html\s*", "", base_html)
|
||||
base_html = re.sub(r"\s*```$", "", base_html)
|
||||
else:
|
||||
base_html = HTML_WRAPPER_TEMPLATE.replace("{user_language}", user_language)
|
||||
|
||||
wrapped_content = f'<div class="plugin-item">\n{new_content}\n</div>'
|
||||
|
||||
if new_styles:
|
||||
base_html = base_html.replace(
|
||||
"/* STYLES_INSERTION_POINT */",
|
||||
f"{new_styles}\n/* STYLES_INSERTION_POINT */",
|
||||
)
|
||||
|
||||
base_html = base_html.replace(
|
||||
"<!-- CONTENT_INSERTION_POINT -->",
|
||||
f"{wrapped_content}\n<!-- CONTENT_INSERTION_POINT -->",
|
||||
)
|
||||
|
||||
if new_scripts:
|
||||
base_html = base_html.replace(
|
||||
"<!-- SCRIPTS_INSERTION_POINT -->",
|
||||
f"{new_scripts}\n<!-- SCRIPTS_INSERTION_POINT -->",
|
||||
)
|
||||
|
||||
return base_html.strip()
|
||||
|
||||
def _build_content_html(self, context: dict) -> str:
|
||||
"""
|
||||
Build content HTML using context data.
|
||||
"""
|
||||
return (
|
||||
CONTENT_TEMPLATE_SUMMARY.replace(
|
||||
"{user_name}", context.get("user_name", "User")
|
||||
)
|
||||
.replace(
|
||||
"{current_date_time_str}", context.get("current_date_time_str", "")
|
||||
)
|
||||
.replace("{current_year}", context.get("current_year", ""))
|
||||
.replace("{summary_html}", context.get("summary_html", ""))
|
||||
.replace("{keypoints_html}", context.get("keypoints_html", ""))
|
||||
.replace("{actions_html}", context.get("actions_html", ""))
|
||||
)
|
||||
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: Optional[Dict[str, Any]] = None,
|
||||
__event_emitter__: Optional[Any] = None,
|
||||
__request__: Optional[Request] = None,
|
||||
) -> Optional[dict]:
|
||||
logger.info("Action: Deep Reading Started (v2.0.0)")
|
||||
|
||||
if isinstance(__user__, (list, tuple)):
|
||||
user_language = (
|
||||
__user__[0].get("language", "en-US") if __user__ else "en-US"
|
||||
)
|
||||
user_name = __user__[0].get("name", "User") if __user__[0] else "User"
|
||||
user_id = (
|
||||
__user__[0]["id"]
|
||||
if __user__ and "id" in __user__[0]
|
||||
else "unknown_user"
|
||||
)
|
||||
elif isinstance(__user__, dict):
|
||||
user_language = __user__.get("language", "en-US")
|
||||
user_name = __user__.get("name", "User")
|
||||
user_id = __user__.get("id", "unknown_user")
|
||||
|
||||
now = datetime.now()
|
||||
current_date_time_str = now.strftime("%B %d, %Y %H:%M:%S")
|
||||
current_weekday = now.strftime("%A")
|
||||
current_year = now.strftime("%Y")
|
||||
current_timezone_str = "Unknown Timezone"
|
||||
|
||||
original_content = ""
|
||||
try:
|
||||
messages = body.get("messages", [])
|
||||
if not messages:
|
||||
raise ValueError("Unable to get valid user message content.")
|
||||
|
||||
# Get last N messages based on MESSAGE_COUNT
|
||||
message_count = min(self.valves.MESSAGE_COUNT, len(messages))
|
||||
recent_messages = messages[-message_count:]
|
||||
|
||||
# Aggregate content from selected messages with labels
|
||||
aggregated_parts = []
|
||||
for i, msg in enumerate(recent_messages, 1):
|
||||
text_content = self._extract_text_content(msg.get("content"))
|
||||
if text_content:
|
||||
role = msg.get("role", "unknown")
|
||||
role_label = (
|
||||
"User"
|
||||
if role == "user"
|
||||
else "Assistant" if role == "assistant" else role
|
||||
)
|
||||
aggregated_parts.append(f"{text_content}")
|
||||
|
||||
if not aggregated_parts:
|
||||
raise ValueError("Unable to get valid user message content.")
|
||||
|
||||
original_content = "\n\n---\n\n".join(aggregated_parts)
|
||||
|
||||
if len(original_content) < self.valves.MIN_TEXT_LENGTH:
|
||||
short_text_message = f"Text content too short ({len(original_content)} chars), recommended at least {self.valves.MIN_TEXT_LENGTH} chars for effective deep analysis.\n\n💡 Tip: For short texts, consider using '⚡ Flash Card' for quick refinement."
|
||||
await self._emit_notification(
|
||||
__event_emitter__, short_text_message, "warning"
|
||||
)
|
||||
return {
|
||||
"messages": [
|
||||
{"role": "assistant", "content": f"⚠️ {short_text_message}"}
|
||||
]
|
||||
}
|
||||
|
||||
# Recommend for longer texts
|
||||
if len(original_content) < self.valves.RECOMMENDED_MIN_LENGTH:
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
f"Text length is {len(original_content)} chars. Recommended {self.valves.RECOMMENDED_MIN_LENGTH}+ chars for best analysis results.",
|
||||
"info",
|
||||
)
|
||||
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
"📖 Deep Reading started, analyzing deeply...",
|
||||
"info",
|
||||
)
|
||||
await self._emit_status(
|
||||
__event_emitter__,
|
||||
"📖 Deep Reading: Analyzing text, extracting essence...",
|
||||
False,
|
||||
)
|
||||
|
||||
formatted_user_prompt = USER_PROMPT_GENERATE_SUMMARY.format(
|
||||
user_name=user_name,
|
||||
current_date_time_str=current_date_time_str,
|
||||
current_weekday=current_weekday,
|
||||
current_timezone_str=current_timezone_str,
|
||||
user_language=user_language,
|
||||
long_text_content=original_content,
|
||||
)
|
||||
|
||||
# Determine model to use
|
||||
target_model = self.valves.MODEL_ID
|
||||
if not target_model:
|
||||
target_model = body.get("model")
|
||||
|
||||
llm_payload = {
|
||||
"model": target_model,
|
||||
"messages": [
|
||||
{"role": "system", "content": SYSTEM_PROMPT_READING_ASSISTANT},
|
||||
{"role": "user", "content": formatted_user_prompt},
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
user_obj = Users.get_user_by_id(user_id)
|
||||
if not user_obj:
|
||||
raise ValueError(f"Unable to get user object, User ID: {user_id}")
|
||||
|
||||
llm_response = await generate_chat_completion(
|
||||
__request__, llm_payload, user_obj
|
||||
)
|
||||
assistant_response_content = llm_response["choices"][0]["message"][
|
||||
"content"
|
||||
]
|
||||
|
||||
processed_content = self._process_llm_output(assistant_response_content)
|
||||
|
||||
context = {
|
||||
"user_language": user_language,
|
||||
"user_name": user_name,
|
||||
"current_date_time_str": current_date_time_str,
|
||||
"current_weekday": current_weekday,
|
||||
"current_year": current_year,
|
||||
**processed_content,
|
||||
}
|
||||
|
||||
content_html = self._build_content_html(context)
|
||||
|
||||
# Extract existing HTML if any
|
||||
existing_html_block = ""
|
||||
match = re.search(
|
||||
r"```html\s*(<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?)```",
|
||||
original_content,
|
||||
)
|
||||
if match:
|
||||
existing_html_block = match.group(1)
|
||||
|
||||
if self.valves.CLEAR_PREVIOUS_HTML:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
"", content_html, CSS_TEMPLATE_SUMMARY, "", user_language
|
||||
)
|
||||
else:
|
||||
if existing_html_block:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
existing_html_block,
|
||||
content_html,
|
||||
CSS_TEMPLATE_SUMMARY,
|
||||
"",
|
||||
user_language,
|
||||
)
|
||||
else:
|
||||
final_html = self._merge_html(
|
||||
"", content_html, CSS_TEMPLATE_SUMMARY, "", user_language
|
||||
)
|
||||
|
||||
html_embed_tag = f"```html\n{final_html}\n```"
|
||||
body["messages"][-1]["content"] = f"{original_content}\n\n{html_embed_tag}"
|
||||
|
||||
await self._emit_status(
|
||||
__event_emitter__, "📖 Deep Reading: Analysis complete!", True
|
||||
)
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
f"📖 Deep Reading complete, {user_name}! Deep analysis report generated.",
|
||||
"success",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"Deep Reading processing failed: {str(e)}"
|
||||
logger.error(f"Deep Reading Error: {error_message}", exc_info=True)
|
||||
user_facing_error = f"Sorry, Deep Reading encountered an error while processing: {str(e)}.\nPlease check Open WebUI backend logs for more details."
|
||||
body["messages"][-1][
|
||||
"content"
|
||||
] = f"{original_content}\n\n❌ **Error:** {user_facing_error}"
|
||||
|
||||
await self._emit_status(
|
||||
__event_emitter__, "Deep Reading: Processing failed.", True
|
||||
)
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
f"Deep Reading processing failed, {user_name}!",
|
||||
"error",
|
||||
)
|
||||
|
||||
return body
|
||||
@@ -1,663 +0,0 @@
|
||||
"""
|
||||
title: 精读 (Deep Reading)
|
||||
icon_url: data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0Ij48ZGVmcz48bGluZWFyR3JhZGllbnQgaWQ9ImciIHgxPSIwIiB5MT0iMCIgeDI9IjEiIHkyPSIxIj48c3RvcCBvZmZzZXQ9IjAlIiBzdG9wLWNvbG9yPSIjNDI4NWY0Ii8+PHN0b3Agb2Zmc2V0PSIxMDAlIiBzdG9wLWNvbG9yPSIjMWU4OGU1Ii8+PC9saW5lYXJHcmFkaWVudD48L2RlZnM+PHBhdGggZD0iTTYgMmg4bDYgNnYxMmEyIDIgMCAwIDEtMiAySDZhMiAyIDAgMCAxLTItMlY0YTIgMiAwIDAgMSAyLTJ6IiBmaWxsPSJ1cmwoI2cpIi8+PHBhdGggZD0iTTE0IDJsNiA2aC02eiIgZmlsbD0iIzFlODhlNSIgb3BhY2l0eT0iMC42Ii8+PGxpbmUgeDE9IjgiIHkxPSIxMyIgeDI9IjE2IiB5Mj0iMTMiIHN0cm9rZT0iI2ZmZiIgc3Ryb2tlLXdpZHRoPSIxLjUiLz48bGluZSB4MT0iOCIgeTE9IjE3IiB4Mj0iMTQiIHkyPSIxNyIgc3Ryb2tlPSIjZmZmIiBzdHJva2Utd2lkdGg9IjEuNSIvPjxjaXJjbGUgY3g9IjE2IiBjeT0iMTgiIHI9IjMiIGZpbGw9IiNmZmQ3MDAiLz48cGF0aCBkPSJNMTYgMTZsMS41IDEuNSIgc3Ryb2tlPSIjNDI4NWY0IiBzdHJva2Utd2lkdGg9IjIiIHN0cm9rZS1saW5lY2FwPSJyb3VuZCIvPjwvc3ZnPg==
|
||||
version: 0.1.2
|
||||
description: 深度分析长篇文本,提炼详细摘要、关键信息点和可执行的行动建议,适合工作和学习场景。
|
||||
requirements: jinja2, markdown
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, Dict, Any
|
||||
import logging
|
||||
import re
|
||||
from fastapi import Request
|
||||
from datetime import datetime
|
||||
import pytz
|
||||
import markdown
|
||||
from jinja2 import Template
|
||||
|
||||
from open_webui.utils.chat import generate_chat_completion
|
||||
from open_webui.models.users import Users
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =================================================================
|
||||
# HTML 容器模板 (支持多插件共存与网格布局)
|
||||
# =================================================================
|
||||
HTML_WRAPPER_TEMPLATE = """
|
||||
<!-- OPENWEBUI_PLUGIN_OUTPUT -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="{user_language}">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 10px;
|
||||
background-color: transparent;
|
||||
}
|
||||
#main-container {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 20px;
|
||||
align-items: flex-start;
|
||||
width: 100%;
|
||||
}
|
||||
.plugin-item {
|
||||
flex: 1 1 400px; /* 默认宽度,允许伸缩 */
|
||||
min-width: 300px;
|
||||
background: white;
|
||||
border-radius: 12px;
|
||||
box-shadow: 0 4px 6px rgba(0,0,0,0.05);
|
||||
overflow: hidden;
|
||||
border: 1px solid #e5e7eb;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
.plugin-item:hover {
|
||||
box-shadow: 0 10px 15px rgba(0,0,0,0.1);
|
||||
}
|
||||
@media (max-width: 768px) {
|
||||
.plugin-item { flex: 1 1 100%; }
|
||||
}
|
||||
/* STYLES_INSERTION_POINT */
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="main-container">
|
||||
<!-- CONTENT_INSERTION_POINT -->
|
||||
</div>
|
||||
<!-- SCRIPTS_INSERTION_POINT -->
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# 内部 LLM 提示词设计
|
||||
# =================================================================
|
||||
|
||||
SYSTEM_PROMPT_READING_ASSISTANT = """
|
||||
你是一个专业的深度文本分析专家,擅长精读长篇文本并提炼精华。你的任务是进行全面、深入的分析。
|
||||
|
||||
请提供以下内容:
|
||||
1. **详细摘要**:用 2-3 段话全面总结文本的核心内容,确保准确性和完整性。不要过于简略,要让读者充分理解文本主旨。
|
||||
2. **关键信息点**:列出 5-8 个最重要的事实、观点或论据。每个信息点应该:
|
||||
- 具体且有深度
|
||||
- 包含必要的细节和背景
|
||||
- 使用 Markdown 列表格式
|
||||
3. **行动建议**:从文本中识别并提炼出具体的、可执行的行动项。每个建议应该:
|
||||
- 明确且可操作
|
||||
- 包含执行的优先级或时间建议
|
||||
- 如果没有明确的行动项,可以提供学习建议或思考方向
|
||||
|
||||
请严格遵循以下指导原则:
|
||||
- **语言**:所有输出必须使用用户指定的语言。
|
||||
- **格式**:请严格按照以下 Markdown 格式输出,确保每个部分都有明确的标题:
|
||||
## 摘要
|
||||
[这里是详细的摘要内容,2-3段话,可以使用 Markdown 进行**加粗**或*斜体*强调重点]
|
||||
|
||||
## 关键信息点
|
||||
- [关键点1:包含具体细节和背景]
|
||||
- [关键点2:包含具体细节和背景]
|
||||
- [关键点3:包含具体细节和背景]
|
||||
- [至少5个,最多8个关键点]
|
||||
|
||||
## 行动建议
|
||||
- [行动项1:具体、可执行,包含优先级]
|
||||
- [行动项2:具体、可执行,包含优先级]
|
||||
- [如果没有明确行动项,提供学习建议或思考方向]
|
||||
- **深度优先**:分析要深入、全面,不要浮于表面。
|
||||
- **行动导向**:重点关注可执行的建议和下一步行动。
|
||||
- **只输出分析结果**:不要包含任何额外的寒暄、解释或引导性文字。
|
||||
"""
|
||||
|
||||
USER_PROMPT_GENERATE_SUMMARY = """
|
||||
请对以下长篇文本进行深度分析,提供:
|
||||
1. 详细的摘要(2-3段话,全面概括文本内容)
|
||||
2. 关键信息点列表(5-8个,包含具体细节)
|
||||
3. 可执行的行动建议(具体、明确,包含优先级)
|
||||
|
||||
---
|
||||
**用户上下文信息:**
|
||||
用户姓名: {user_name}
|
||||
当前日期时间: {current_date_time_str}
|
||||
当前星期: {current_weekday}
|
||||
当前时区: {current_timezone_str}
|
||||
用户语言: {user_language}
|
||||
---
|
||||
|
||||
**长篇文本内容:**
|
||||
```
|
||||
{long_text_content}
|
||||
```
|
||||
|
||||
请进行深入、全面的分析,重点关注可执行的行动建议。
|
||||
"""
|
||||
|
||||
# =================================================================
|
||||
# 前端 HTML 模板 (Jinja2 语法)
|
||||
# =================================================================
|
||||
|
||||
CSS_TEMPLATE_SUMMARY = """
|
||||
:root {
|
||||
--primary-color: #4285f4;
|
||||
--secondary-color: #1e88e5;
|
||||
--action-color: #34a853;
|
||||
--background-color: #f8f9fa;
|
||||
--card-bg-color: #ffffff;
|
||||
--text-color: #202124;
|
||||
--muted-text-color: #5f6368;
|
||||
--border-color: #dadce0;
|
||||
--header-gradient: linear-gradient(135deg, #4285f4, #1e88e5);
|
||||
--shadow: 0 1px 3px rgba(60,64,67,.3);
|
||||
--border-radius: 8px;
|
||||
--font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
||||
}
|
||||
.summary-container-wrapper {
|
||||
font-family: var(--font-family);
|
||||
line-height: 1.8;
|
||||
color: var(--text-color);
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.summary-container-wrapper .header {
|
||||
background: var(--header-gradient);
|
||||
color: white;
|
||||
padding: 20px 24px;
|
||||
text-align: center;
|
||||
}
|
||||
.summary-container-wrapper .header h1 {
|
||||
margin: 0;
|
||||
font-size: 1.5em;
|
||||
font-weight: 500;
|
||||
letter-spacing: -0.5px;
|
||||
}
|
||||
.summary-container-wrapper .user-context {
|
||||
font-size: 0.8em;
|
||||
color: var(--muted-text-color);
|
||||
background-color: #f1f3f4;
|
||||
padding: 8px 16px;
|
||||
display: flex;
|
||||
justify-content: space-around;
|
||||
flex-wrap: wrap;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
}
|
||||
.summary-container-wrapper .user-context span { margin: 2px 8px; }
|
||||
.summary-container-wrapper .content { padding: 20px; flex-grow: 1; }
|
||||
.summary-container-wrapper .section {
|
||||
margin-bottom: 16px;
|
||||
padding-bottom: 16px;
|
||||
border-bottom: 1px solid #e8eaed;
|
||||
}
|
||||
.summary-container-wrapper .section:last-child {
|
||||
border-bottom: none;
|
||||
margin-bottom: 0;
|
||||
padding-bottom: 0;
|
||||
}
|
||||
.summary-container-wrapper .section h2 {
|
||||
margin-top: 0;
|
||||
margin-bottom: 12px;
|
||||
font-size: 1.2em;
|
||||
font-weight: 500;
|
||||
color: var(--text-color);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding-bottom: 8px;
|
||||
border-bottom: 2px solid var(--primary-color);
|
||||
}
|
||||
.summary-container-wrapper .section h2 .icon {
|
||||
margin-right: 8px;
|
||||
font-size: 1.1em;
|
||||
line-height: 1;
|
||||
}
|
||||
.summary-container-wrapper .summary-section h2 { border-bottom-color: var(--primary-color); }
|
||||
.summary-container-wrapper .keypoints-section h2 { border-bottom-color: var(--secondary-color); }
|
||||
.summary-container-wrapper .actions-section h2 { border-bottom-color: var(--action-color); }
|
||||
.summary-container-wrapper .html-content {
|
||||
font-size: 0.95em;
|
||||
line-height: 1.7;
|
||||
}
|
||||
.summary-container-wrapper .html-content p:first-child { margin-top: 0; }
|
||||
.summary-container-wrapper .html-content p:last-child { margin-bottom: 0; }
|
||||
.summary-container-wrapper .html-content ul {
|
||||
list-style: none;
|
||||
padding-left: 0;
|
||||
margin: 12px 0;
|
||||
}
|
||||
.summary-container-wrapper .html-content li {
|
||||
padding: 8px 0 8px 24px;
|
||||
position: relative;
|
||||
margin-bottom: 6px;
|
||||
line-height: 1.6;
|
||||
}
|
||||
.summary-container-wrapper .html-content li::before {
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 8px;
|
||||
font-family: 'Arial';
|
||||
font-weight: bold;
|
||||
font-size: 1em;
|
||||
}
|
||||
.summary-container-wrapper .keypoints-section .html-content li::before {
|
||||
content: '•';
|
||||
color: var(--secondary-color);
|
||||
font-size: 1.3em;
|
||||
top: 5px;
|
||||
}
|
||||
.summary-container-wrapper .actions-section .html-content li::before {
|
||||
content: '▸';
|
||||
color: var(--action-color);
|
||||
}
|
||||
.summary-container-wrapper .no-content {
|
||||
color: var(--muted-text-color);
|
||||
font-style: italic;
|
||||
padding: 12px;
|
||||
background: #f8f9fa;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.summary-container-wrapper .footer {
|
||||
text-align: center;
|
||||
padding: 16px;
|
||||
font-size: 0.8em;
|
||||
color: #5f6368;
|
||||
background-color: #f8f9fa;
|
||||
border-top: 1px solid var(--border-color);
|
||||
}
|
||||
"""
|
||||
|
||||
CONTENT_TEMPLATE_SUMMARY = """
|
||||
<div class="summary-container-wrapper">
|
||||
<div class="header">
|
||||
<h1>📖 精读:深度分析报告</h1>
|
||||
</div>
|
||||
<div class="user-context">
|
||||
<span><strong>用户:</strong> {user_name}</span>
|
||||
<span><strong>时间:</strong> {current_date_time_str}</span>
|
||||
</div>
|
||||
<div class="content">
|
||||
<div class="section summary-section">
|
||||
<h2><span class="icon">📝</span>详细摘要</h2>
|
||||
<div class="html-content">{summary_html}</div>
|
||||
</div>
|
||||
<div class="section keypoints-section">
|
||||
<h2><span class="icon">💡</span>关键信息点</h2>
|
||||
<div class="html-content">{keypoints_html}</div>
|
||||
</div>
|
||||
<div class="section actions-section">
|
||||
<h2><span class="icon">🎯</span>行动建议</h2>
|
||||
<div class="html-content">{actions_html}</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="footer">
|
||||
<p>© {current_year} 精读 - 深度文本分析服务</p>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
class Action:
|
||||
class Valves(BaseModel):
|
||||
SHOW_STATUS: bool = Field(
|
||||
default=True, description="是否在聊天界面显示操作状态更新。"
|
||||
)
|
||||
MODEL_ID: str = Field(
|
||||
default="",
|
||||
description="用于文本分析的内置LLM模型ID。如果为空,则使用当前对话的模型。",
|
||||
)
|
||||
MIN_TEXT_LENGTH: int = Field(
|
||||
default=200,
|
||||
description="进行深度分析所需的最小文本长度(字符数)。建议200字符以上。",
|
||||
)
|
||||
RECOMMENDED_MIN_LENGTH: int = Field(
|
||||
default=500, description="建议的最小文本长度,以获得最佳分析效果。"
|
||||
)
|
||||
CLEAR_PREVIOUS_HTML: bool = Field(
|
||||
default=False,
|
||||
description="是否强制清除旧的插件结果(如果为 True,则不合并,直接覆盖)。",
|
||||
)
|
||||
MESSAGE_COUNT: int = Field(
|
||||
default=1,
|
||||
description="用于生成的最近消息数量。设置为1仅使用最后一条消息,更大值可包含更多上下文。",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
self.weekday_map = {
|
||||
"Monday": "星期一",
|
||||
"Tuesday": "星期二",
|
||||
"Wednesday": "星期三",
|
||||
"Thursday": "星期四",
|
||||
"Friday": "星期五",
|
||||
"Saturday": "星期六",
|
||||
"Sunday": "星期日",
|
||||
}
|
||||
|
||||
def _process_llm_output(self, llm_output: str) -> Dict[str, str]:
|
||||
"""
|
||||
解析LLM的Markdown输出,将其转换为HTML片段。
|
||||
"""
|
||||
summary_match = re.search(
|
||||
r"##\s*摘要\s*\n(.*?)(?=\n##|$)", llm_output, re.DOTALL
|
||||
)
|
||||
keypoints_match = re.search(
|
||||
r"##\s*关键信息点\s*\n(.*?)(?=\n##|$)", llm_output, re.DOTALL
|
||||
)
|
||||
actions_match = re.search(
|
||||
r"##\s*行动建议\s*\n(.*?)(?=\n##|$)", llm_output, re.DOTALL
|
||||
)
|
||||
|
||||
summary_md = summary_match.group(1).strip() if summary_match else ""
|
||||
keypoints_md = keypoints_match.group(1).strip() if keypoints_match else ""
|
||||
actions_md = actions_match.group(1).strip() if actions_match else ""
|
||||
|
||||
if not any([summary_md, keypoints_md, actions_md]):
|
||||
summary_md = llm_output.strip()
|
||||
logger.warning("LLM输出未遵循预期的Markdown格式。将整个输出视为摘要。")
|
||||
|
||||
# 使用 'nl2br' 扩展将换行符 \n 转换为 <br>
|
||||
md_extensions = ["nl2br"]
|
||||
summary_html = (
|
||||
markdown.markdown(summary_md, extensions=md_extensions)
|
||||
if summary_md
|
||||
else '<p class="no-content">未能提取摘要信息。</p>'
|
||||
)
|
||||
keypoints_html = (
|
||||
markdown.markdown(keypoints_md, extensions=md_extensions)
|
||||
if keypoints_md
|
||||
else '<p class="no-content">未能提取关键信息点。</p>'
|
||||
)
|
||||
actions_html = (
|
||||
markdown.markdown(actions_md, extensions=md_extensions)
|
||||
if actions_md
|
||||
else '<p class="no-content">暂无明确的行动建议。</p>'
|
||||
)
|
||||
|
||||
return {
|
||||
"summary_html": summary_html,
|
||||
"keypoints_html": keypoints_html,
|
||||
"actions_html": actions_html,
|
||||
}
|
||||
|
||||
async def _emit_status(self, emitter, description: str, done: bool = False):
|
||||
"""发送状态更新事件。"""
|
||||
if self.valves.SHOW_STATUS and emitter:
|
||||
await emitter(
|
||||
{"type": "status", "data": {"description": description, "done": done}}
|
||||
)
|
||||
|
||||
async def _emit_notification(self, emitter, content: str, ntype: str = "info"):
|
||||
"""发送通知事件 (info/success/warning/error)。"""
|
||||
if emitter:
|
||||
await emitter(
|
||||
{"type": "notification", "data": {"type": ntype, "content": content}}
|
||||
)
|
||||
|
||||
def _remove_existing_html(self, content: str) -> str:
|
||||
"""移除内容中已有的插件生成 HTML 代码块 (通过标记识别)。"""
|
||||
pattern = r"```html\s*<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?```"
|
||||
return re.sub(pattern, "", content).strip()
|
||||
|
||||
def _extract_text_content(self, content) -> str:
|
||||
"""从消息内容中提取文本,支持多模态消息格式"""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, list):
|
||||
# 多模态消息: [{"type": "text", "text": "..."}, {"type": "image_url", ...}]
|
||||
text_parts = []
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "text":
|
||||
text_parts.append(item.get("text", ""))
|
||||
elif isinstance(item, str):
|
||||
text_parts.append(item)
|
||||
return "\n".join(text_parts)
|
||||
return str(content) if content else ""
|
||||
|
||||
def _merge_html(
|
||||
self,
|
||||
existing_html_code: str,
|
||||
new_content: str,
|
||||
new_styles: str = "",
|
||||
new_scripts: str = "",
|
||||
user_language: str = "zh-CN",
|
||||
) -> str:
|
||||
"""
|
||||
将新内容合并到现有的 HTML 容器中,或者创建一个新的容器。
|
||||
"""
|
||||
if (
|
||||
"<!-- OPENWEBUI_PLUGIN_OUTPUT -->" in existing_html_code
|
||||
and "<!-- CONTENT_INSERTION_POINT -->" in existing_html_code
|
||||
):
|
||||
base_html = existing_html_code
|
||||
base_html = re.sub(r"^```html\s*", "", base_html)
|
||||
base_html = re.sub(r"\s*```$", "", base_html)
|
||||
else:
|
||||
base_html = HTML_WRAPPER_TEMPLATE.replace("{user_language}", user_language)
|
||||
|
||||
wrapped_content = f'<div class="plugin-item">\n{new_content}\n</div>'
|
||||
|
||||
if new_styles:
|
||||
base_html = base_html.replace(
|
||||
"/* STYLES_INSERTION_POINT */",
|
||||
f"{new_styles}\n/* STYLES_INSERTION_POINT */",
|
||||
)
|
||||
|
||||
base_html = base_html.replace(
|
||||
"<!-- CONTENT_INSERTION_POINT -->",
|
||||
f"{wrapped_content}\n<!-- CONTENT_INSERTION_POINT -->",
|
||||
)
|
||||
|
||||
if new_scripts:
|
||||
base_html = base_html.replace(
|
||||
"<!-- SCRIPTS_INSERTION_POINT -->",
|
||||
f"{new_scripts}\n<!-- SCRIPTS_INSERTION_POINT -->",
|
||||
)
|
||||
|
||||
return base_html.strip()
|
||||
|
||||
def _build_content_html(self, context: dict) -> str:
|
||||
"""
|
||||
使用上下文数据构建内容 HTML。
|
||||
"""
|
||||
return (
|
||||
CONTENT_TEMPLATE_SUMMARY.replace(
|
||||
"{user_name}", context.get("user_name", "用户")
|
||||
)
|
||||
.replace(
|
||||
"{current_date_time_str}", context.get("current_date_time_str", "")
|
||||
)
|
||||
.replace("{current_year}", context.get("current_year", ""))
|
||||
.replace("{summary_html}", context.get("summary_html", ""))
|
||||
.replace("{keypoints_html}", context.get("keypoints_html", ""))
|
||||
.replace("{actions_html}", context.get("actions_html", ""))
|
||||
)
|
||||
|
||||
async def action(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: Optional[Dict[str, Any]] = None,
|
||||
__event_emitter__: Optional[Any] = None,
|
||||
__request__: Optional[Request] = None,
|
||||
) -> Optional[dict]:
|
||||
logger.info("Action: 精读启动 (v2.0.0 - Deep Reading)")
|
||||
|
||||
if isinstance(__user__, (list, tuple)):
|
||||
user_language = (
|
||||
__user__[0].get("language", "zh-CN") if __user__ else "zh-CN"
|
||||
)
|
||||
user_name = __user__[0].get("name", "用户") if __user__[0] else "用户"
|
||||
user_id = (
|
||||
__user__[0]["id"]
|
||||
if __user__ and "id" in __user__[0]
|
||||
else "unknown_user"
|
||||
)
|
||||
elif isinstance(__user__, dict):
|
||||
user_language = __user__.get("language", "zh-CN")
|
||||
user_name = __user__.get("name", "用户")
|
||||
user_id = __user__.get("id", "unknown_user")
|
||||
|
||||
now = datetime.now()
|
||||
current_date_time_str = now.strftime("%Y年%m月%d日 %H:%M:%S")
|
||||
current_weekday_en = now.strftime("%A")
|
||||
current_weekday = self.weekday_map.get(current_weekday_en, current_weekday_en)
|
||||
current_year = now.strftime("%Y")
|
||||
current_timezone_str = "未知时区"
|
||||
|
||||
original_content = ""
|
||||
try:
|
||||
messages = body.get("messages", [])
|
||||
if not messages:
|
||||
raise ValueError("无法获取有效的用户消息内容。")
|
||||
|
||||
# Get last N messages based on MESSAGE_COUNT
|
||||
message_count = min(self.valves.MESSAGE_COUNT, len(messages))
|
||||
recent_messages = messages[-message_count:]
|
||||
|
||||
# Aggregate content from selected messages with labels
|
||||
aggregated_parts = []
|
||||
for i, msg in enumerate(recent_messages, 1):
|
||||
text_content = self._extract_text_content(msg.get("content"))
|
||||
if text_content:
|
||||
role = msg.get("role", "unknown")
|
||||
role_label = (
|
||||
"用户"
|
||||
if role == "user"
|
||||
else "助手" if role == "assistant" else role
|
||||
)
|
||||
aggregated_parts.append(f"{text_content}")
|
||||
|
||||
if not aggregated_parts:
|
||||
raise ValueError("无法获取有效的用户消息内容。")
|
||||
|
||||
original_content = "\n\n---\n\n".join(aggregated_parts)
|
||||
|
||||
if len(original_content) < self.valves.MIN_TEXT_LENGTH:
|
||||
short_text_message = f"文本内容过短({len(original_content)}字符),建议至少{self.valves.MIN_TEXT_LENGTH}字符以获得有效的深度分析。\n\n💡 提示:对于短文本,建议使用'⚡ 闪记卡'进行快速提炼。"
|
||||
await self._emit_notification(
|
||||
__event_emitter__, short_text_message, "warning"
|
||||
)
|
||||
return {
|
||||
"messages": [
|
||||
{"role": "assistant", "content": f"⚠️ {short_text_message}"}
|
||||
]
|
||||
}
|
||||
|
||||
# Recommend for longer texts
|
||||
if len(original_content) < self.valves.RECOMMENDED_MIN_LENGTH:
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
f"文本长度为{len(original_content)}字符。建议{self.valves.RECOMMENDED_MIN_LENGTH}字符以上可获得更好的分析效果。",
|
||||
"info",
|
||||
)
|
||||
|
||||
await self._emit_notification(
|
||||
__event_emitter__, "📖 精读已启动,正在进行深度分析...", "info"
|
||||
)
|
||||
await self._emit_status(
|
||||
__event_emitter__, "📖 精读: 深入分析文本,提炼精华...", False
|
||||
)
|
||||
|
||||
formatted_user_prompt = USER_PROMPT_GENERATE_SUMMARY.format(
|
||||
user_name=user_name,
|
||||
current_date_time_str=current_date_time_str,
|
||||
current_weekday=current_weekday,
|
||||
current_timezone_str=current_timezone_str,
|
||||
user_language=user_language,
|
||||
long_text_content=original_content,
|
||||
)
|
||||
|
||||
# 确定使用的模型
|
||||
target_model = self.valves.MODEL_ID
|
||||
if not target_model:
|
||||
target_model = body.get("model")
|
||||
|
||||
llm_payload = {
|
||||
"model": target_model,
|
||||
"messages": [
|
||||
{"role": "system", "content": SYSTEM_PROMPT_READING_ASSISTANT},
|
||||
{"role": "user", "content": formatted_user_prompt},
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
user_obj = Users.get_user_by_id(user_id)
|
||||
if not user_obj:
|
||||
raise ValueError(f"无法获取用户对象, 用户ID: {user_id}")
|
||||
|
||||
llm_response = await generate_chat_completion(
|
||||
__request__, llm_payload, user_obj
|
||||
)
|
||||
assistant_response_content = llm_response["choices"][0]["message"][
|
||||
"content"
|
||||
]
|
||||
|
||||
processed_content = self._process_llm_output(assistant_response_content)
|
||||
|
||||
context = {
|
||||
"user_language": user_language,
|
||||
"user_name": user_name,
|
||||
"current_date_time_str": current_date_time_str,
|
||||
"current_weekday": current_weekday,
|
||||
"current_year": current_year,
|
||||
**processed_content,
|
||||
}
|
||||
|
||||
content_html = self._build_content_html(context)
|
||||
|
||||
# Extract existing HTML if any
|
||||
existing_html_block = ""
|
||||
match = re.search(
|
||||
r"```html\s*(<!-- OPENWEBUI_PLUGIN_OUTPUT -->[\s\S]*?)```",
|
||||
original_content,
|
||||
)
|
||||
if match:
|
||||
existing_html_block = match.group(1)
|
||||
|
||||
if self.valves.CLEAR_PREVIOUS_HTML:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
"", content_html, CSS_TEMPLATE_SUMMARY, "", user_language
|
||||
)
|
||||
else:
|
||||
if existing_html_block:
|
||||
original_content = self._remove_existing_html(original_content)
|
||||
final_html = self._merge_html(
|
||||
existing_html_block,
|
||||
content_html,
|
||||
CSS_TEMPLATE_SUMMARY,
|
||||
"",
|
||||
user_language,
|
||||
)
|
||||
else:
|
||||
final_html = self._merge_html(
|
||||
"", content_html, CSS_TEMPLATE_SUMMARY, "", user_language
|
||||
)
|
||||
|
||||
html_embed_tag = f"```html\n{final_html}\n```"
|
||||
body["messages"][-1]["content"] = f"{original_content}\n\n{html_embed_tag}"
|
||||
|
||||
await self._emit_status(__event_emitter__, "📖 精读: 分析完成!", True)
|
||||
await self._emit_notification(
|
||||
__event_emitter__,
|
||||
f"📖 精读完成,{user_name}!深度分析报告已生成。",
|
||||
"success",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"精读处理失败: {str(e)}"
|
||||
logger.error(f"精读错误: {error_message}", exc_info=True)
|
||||
user_facing_error = f"抱歉, 精读在处理时遇到错误: {str(e)}。\n请检查Open WebUI后端日志获取更多详情。"
|
||||
body["messages"][-1][
|
||||
"content"
|
||||
] = f"{original_content}\n\n❌ **错误:** {user_facing_error}"
|
||||
|
||||
await self._emit_status(__event_emitter__, "精读: 处理失败。", True)
|
||||
await self._emit_notification(
|
||||
__event_emitter__, f"精读处理失败, {user_name}!", "error"
|
||||
)
|
||||
|
||||
return body
|
||||
519
plugins/filters/markdown_normalizer/markdown_normalizer.py
Normal file
@@ -0,0 +1,519 @@
|
||||
"""
|
||||
title: Markdown Normalizer
|
||||
author: Fu-Jie
|
||||
author_url: https://github.com/Fu-Jie
|
||||
funding_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||
version: 1.0.0
|
||||
description: A production-grade content normalizer filter that fixes common Markdown formatting issues in LLM outputs, such as broken code blocks, LaTeX formulas, and list formatting.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, List, Callable
|
||||
import re
|
||||
import logging
|
||||
import logging
|
||||
import asyncio
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NormalizerConfig:
|
||||
"""Configuration class for enabling/disabling specific normalization rules"""
|
||||
|
||||
enable_escape_fix: bool = True # Fix excessive escape characters
|
||||
enable_thought_tag_fix: bool = True # Normalize thought tags
|
||||
enable_code_block_fix: bool = True # Fix code block formatting
|
||||
enable_latex_fix: bool = True # Fix LaTeX formula formatting
|
||||
enable_list_fix: bool = (
|
||||
False # Fix list item newlines (default off as it can be aggressive)
|
||||
)
|
||||
enable_unclosed_block_fix: bool = True # Auto-close unclosed code blocks
|
||||
enable_fullwidth_symbol_fix: bool = False # Fix full-width symbols in code blocks
|
||||
enable_mermaid_fix: bool = True # Fix common Mermaid syntax errors
|
||||
enable_heading_fix: bool = (
|
||||
True # Fix missing space in headings (#Header -> # Header)
|
||||
)
|
||||
enable_table_fix: bool = True # Fix missing closing pipe in tables
|
||||
enable_xml_tag_cleanup: bool = True # Cleanup leftover XML tags
|
||||
|
||||
# Custom cleaner functions (for advanced extension)
|
||||
custom_cleaners: List[Callable[[str], str]] = field(default_factory=list)
|
||||
|
||||
|
||||
class ContentNormalizer:
|
||||
"""LLM Output Content Normalizer - Production Grade Implementation"""
|
||||
|
||||
# --- 1. Pre-compiled Regex Patterns (Performance Optimization) ---
|
||||
_PATTERNS = {
|
||||
# Code block prefix: if ``` is not at start of line or file
|
||||
"code_block_prefix": re.compile(r"(?<!^)(?<!\n)(```)", re.MULTILINE),
|
||||
# Code block suffix: ```lang followed by non-whitespace (no newline)
|
||||
"code_block_suffix": re.compile(r"(```[\w\+\-\.]*)[ \t]+([^\n\r])"),
|
||||
# Code block indent: whitespace at start of line + ```
|
||||
"code_block_indent": re.compile(r"^[ \t]+(```)", re.MULTILINE),
|
||||
# Thought tag: </thought> followed by optional whitespace/newlines
|
||||
"thought_end": re.compile(
|
||||
r"</(thought|think|thinking)>[ \t]*\n*", re.IGNORECASE
|
||||
),
|
||||
"thought_start": re.compile(r"<(thought|think|thinking)>", re.IGNORECASE),
|
||||
# LaTeX block: \[ ... \]
|
||||
"latex_bracket_block": re.compile(r"\\\[(.+?)\\\]", re.DOTALL),
|
||||
# LaTeX inline: \( ... \)
|
||||
"latex_paren_inline": re.compile(r"\\\((.+?)\\\)"),
|
||||
# List item: non-newline + digit + dot + space
|
||||
"list_item": re.compile(r"([^\n])(\d+\. )"),
|
||||
# XML artifacts (e.g. Claude's)
|
||||
"xml_artifacts": re.compile(
|
||||
r"</?(?:antArtifact|antThinking|artifact)[^>]*>", re.IGNORECASE
|
||||
),
|
||||
# Mermaid: Match various node shapes and quote unquoted labels
|
||||
# Fix "reverse optimization": Must precisely match shape delimiters to avoid breaking structure
|
||||
# Priority: Longer delimiters match first
|
||||
"mermaid_node": re.compile(
|
||||
r"(\w+)\s*(?:"
|
||||
r"(\(\(\()(?![\"])(.*?)(?<![\"])(\)\)\))|" # (((...))) Double Circle
|
||||
r"(\(\()(?![\"])(.*?)(?<![\"])(\)\))|" # ((...)) Circle
|
||||
r"(\(\[)(?![\"])(.*?)(?<![\"])(\]\))|" # ([...]) Stadium
|
||||
r"(\[\()(?![\"])(.*?)(?<![\"])(\)\])|" # [(...)] Cylinder
|
||||
r"(\[\[)(?![\"])(.*?)(?<![\"])(\]\])|" # [[...]] Subroutine
|
||||
r"(\{\{)(?![\"])(.*?)(?<![\"])(\}\})|" # {{...}} Hexagon
|
||||
r"(\[/)(?![\"])(.*?)(?<![\"])(/\])|" # [/.../] Parallelogram
|
||||
r"(\[\\)(?![\"])(.*?)(?<![\"])(\\\])|" # [\...\] Parallelogram Alt
|
||||
r"(\[/)(?![\"])(.*?)(?<![\"])(\\\])|" # [/...\] Trapezoid
|
||||
r"(\[\\)(?![\"])(.*?)(?<![\"])(/\])|" # [\.../] Trapezoid Alt
|
||||
r"(\()(?![\"])(.*?)(?<![\"])(\))|" # (...) Round
|
||||
r"(\[)(?![\"])(.*?)(?<![\"])(\])|" # [...] Square
|
||||
r"(\{)(?![\"])(.*?)(?<![\"])(\})|" # {...} Rhombus
|
||||
r"(>)(?![\"])(.*?)(?<![\"])(\])" # >...] Asymmetric
|
||||
r")"
|
||||
),
|
||||
# Heading: #Heading -> # Heading
|
||||
"heading_space": re.compile(r"^(#+)([^ \n#])", re.MULTILINE),
|
||||
# Table: | col1 | col2 -> | col1 | col2 |
|
||||
"table_pipe": re.compile(r"^(\|.*[^|\n])$", re.MULTILINE),
|
||||
}
|
||||
|
||||
def __init__(self, config: Optional[NormalizerConfig] = None):
|
||||
self.config = config or NormalizerConfig()
|
||||
self.applied_fixes = []
|
||||
|
||||
def normalize(self, content: str) -> str:
|
||||
"""Main entry point: apply all normalization rules in order"""
|
||||
self.applied_fixes = []
|
||||
if not content:
|
||||
return content
|
||||
|
||||
original_content = content # Keep a copy for logging
|
||||
|
||||
try:
|
||||
# 1. Escape character fix (Must be first)
|
||||
if self.config.enable_escape_fix:
|
||||
original = content
|
||||
content = self._fix_escape_characters(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Escape Chars")
|
||||
|
||||
# 2. Thought tag normalization
|
||||
if self.config.enable_thought_tag_fix:
|
||||
original = content
|
||||
content = self._fix_thought_tags(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Normalize Thought Tags")
|
||||
|
||||
# 3. Code block formatting fix
|
||||
if self.config.enable_code_block_fix:
|
||||
original = content
|
||||
content = self._fix_code_blocks(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Code Blocks")
|
||||
|
||||
# 4. LaTeX formula normalization
|
||||
if self.config.enable_latex_fix:
|
||||
original = content
|
||||
content = self._fix_latex_formulas(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Normalize LaTeX")
|
||||
|
||||
# 5. List formatting fix
|
||||
if self.config.enable_list_fix:
|
||||
original = content
|
||||
content = self._fix_list_formatting(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix List Format")
|
||||
|
||||
# 6. Unclosed code block fix
|
||||
if self.config.enable_unclosed_block_fix:
|
||||
original = content
|
||||
content = self._fix_unclosed_code_blocks(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Close Code Blocks")
|
||||
|
||||
# 7. Full-width symbol fix (in code blocks only)
|
||||
if self.config.enable_fullwidth_symbol_fix:
|
||||
original = content
|
||||
content = self._fix_fullwidth_symbols_in_code(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Full-width Symbols")
|
||||
|
||||
# 8. Mermaid syntax fix
|
||||
if self.config.enable_mermaid_fix:
|
||||
original = content
|
||||
content = self._fix_mermaid_syntax(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Mermaid Syntax")
|
||||
|
||||
# 9. Heading fix
|
||||
if self.config.enable_heading_fix:
|
||||
original = content
|
||||
content = self._fix_headings(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Headings")
|
||||
|
||||
# 10. Table fix
|
||||
if self.config.enable_table_fix:
|
||||
original = content
|
||||
content = self._fix_tables(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Tables")
|
||||
|
||||
# 11. XML tag cleanup
|
||||
if self.config.enable_xml_tag_cleanup:
|
||||
original = content
|
||||
content = self._cleanup_xml_tags(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Cleanup XML Tags")
|
||||
|
||||
# 9. Custom cleaners
|
||||
for cleaner in self.config.custom_cleaners:
|
||||
original = content
|
||||
content = cleaner(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Custom Cleaner")
|
||||
|
||||
if self.applied_fixes:
|
||||
logger.info(f"Markdown Normalizer Applied Fixes: {self.applied_fixes}")
|
||||
logger.debug(
|
||||
f"--- Original Content ---\n{original_content}\n------------------------"
|
||||
)
|
||||
logger.debug(
|
||||
f"--- Normalized Content ---\n{content}\n--------------------------"
|
||||
)
|
||||
|
||||
return content
|
||||
|
||||
except Exception as e:
|
||||
# Production safeguard: return original content on error
|
||||
logger.error(f"Content normalization failed: {e}", exc_info=True)
|
||||
return content
|
||||
|
||||
def _fix_escape_characters(self, content: str) -> str:
|
||||
"""Fix excessive escape characters"""
|
||||
content = content.replace("\\r\\n", "\n")
|
||||
content = content.replace("\\n", "\n")
|
||||
content = content.replace("\\t", "\t")
|
||||
content = content.replace("\\\\", "\\")
|
||||
return content
|
||||
|
||||
def _fix_thought_tags(self, content: str) -> str:
|
||||
"""Normalize thought tags: unify naming and fix spacing"""
|
||||
# 1. Standardize start tag: <think>, <thinking> -> <thought>
|
||||
content = self._PATTERNS["thought_start"].sub("<thought>", content)
|
||||
# 2. Standardize end tag and ensure newlines: </think> -> </thought>\n\n
|
||||
return self._PATTERNS["thought_end"].sub("</thought>\n\n", content)
|
||||
|
||||
def _fix_code_blocks(self, content: str) -> str:
|
||||
"""Fix code block formatting (prefixes, suffixes, indentation)"""
|
||||
# Remove indentation before code blocks
|
||||
content = self._PATTERNS["code_block_indent"].sub(r"\1", content)
|
||||
# Ensure newline before ```
|
||||
content = self._PATTERNS["code_block_prefix"].sub(r"\n\1", content)
|
||||
# Ensure newline after ```lang
|
||||
content = self._PATTERNS["code_block_suffix"].sub(r"\1\n\2", content)
|
||||
return content
|
||||
|
||||
def _fix_latex_formulas(self, content: str) -> str:
|
||||
"""Normalize LaTeX formulas: \[ -> $$ (block), \( -> $ (inline)"""
|
||||
content = self._PATTERNS["latex_bracket_block"].sub(r"$$\1$$", content)
|
||||
content = self._PATTERNS["latex_paren_inline"].sub(r"$\1$", content)
|
||||
return content
|
||||
|
||||
def _fix_list_formatting(self, content: str) -> str:
|
||||
"""Fix missing newlines in lists (e.g., 'text1. item' -> 'text\\n1. item')"""
|
||||
return self._PATTERNS["list_item"].sub(r"\1\n\2", content)
|
||||
|
||||
def _fix_unclosed_code_blocks(self, content: str) -> str:
|
||||
"""Auto-close unclosed code blocks"""
|
||||
if content.count("```") % 2 != 0:
|
||||
content += "\n```"
|
||||
return content
|
||||
|
||||
def _fix_fullwidth_symbols_in_code(self, content: str) -> str:
|
||||
"""Convert full-width symbols to half-width inside code blocks"""
|
||||
FULLWIDTH_MAP = {
|
||||
",": ",",
|
||||
"。": ".",
|
||||
"(": "(",
|
||||
")": ")",
|
||||
"【": "[",
|
||||
"】": "]",
|
||||
";": ";",
|
||||
":": ":",
|
||||
"?": "?",
|
||||
"!": "!",
|
||||
'"': '"',
|
||||
'"': '"',
|
||||
""": "'", """: "'",
|
||||
}
|
||||
|
||||
parts = content.split("```")
|
||||
# Code block content is at odd indices: 1, 3, 5...
|
||||
for i in range(1, len(parts), 2):
|
||||
for full, half in FULLWIDTH_MAP.items():
|
||||
parts[i] = parts[i].replace(full, half)
|
||||
|
||||
return "```".join(parts)
|
||||
|
||||
def _fix_mermaid_syntax(self, content: str) -> str:
|
||||
"""Fix common Mermaid syntax errors while preserving node shapes"""
|
||||
|
||||
def replacer(match):
|
||||
# Group 1 is ID
|
||||
id_str = match.group(1)
|
||||
|
||||
# Find matching shape group
|
||||
# Groups start at index 2, each shape has 3 groups (Open, Content, Close)
|
||||
# We iterate to find the non-None one
|
||||
groups = match.groups()
|
||||
for i in range(1, len(groups), 3):
|
||||
if groups[i] is not None:
|
||||
open_char = groups[i]
|
||||
content = groups[i + 1]
|
||||
close_char = groups[i + 2]
|
||||
|
||||
# Escape quotes in content
|
||||
content = content.replace('"', '\\"')
|
||||
|
||||
return f'{id_str}{open_char}"{content}"{close_char}'
|
||||
|
||||
return match.group(0)
|
||||
|
||||
parts = content.split("```")
|
||||
for i in range(1, len(parts), 2):
|
||||
# Check if it's a mermaid block
|
||||
lang_line = parts[i].split("\n", 1)[0].strip().lower()
|
||||
if "mermaid" in lang_line:
|
||||
# Apply the comprehensive regex fix
|
||||
parts[i] = self._PATTERNS["mermaid_node"].sub(replacer, parts[i])
|
||||
|
||||
# Auto-close subgraphs
|
||||
subgraph_count = len(
|
||||
re.findall(r"\bsubgraph\b", parts[i], re.IGNORECASE)
|
||||
)
|
||||
end_count = len(re.findall(r"\bend\b", parts[i], re.IGNORECASE))
|
||||
|
||||
if subgraph_count > end_count:
|
||||
missing_ends = subgraph_count - end_count
|
||||
parts[i] = parts[i].rstrip() + ("\n end" * missing_ends) + "\n"
|
||||
|
||||
return "```".join(parts)
|
||||
|
||||
def _fix_headings(self, content: str) -> str:
|
||||
"""Fix missing space in headings: #Heading -> # Heading"""
|
||||
# We only fix if it's not inside a code block.
|
||||
# But splitting by code block is expensive.
|
||||
# Given headings usually don't appear inside code blocks without space in valid code (except comments),
|
||||
# we might risk false positives in comments like `#TODO`.
|
||||
# To be safe, let's split by code blocks.
|
||||
|
||||
parts = content.split("```")
|
||||
for i in range(0, len(parts), 2): # Even indices are markdown text
|
||||
parts[i] = self._PATTERNS["heading_space"].sub(r"\1 \2", parts[i])
|
||||
return "```".join(parts)
|
||||
|
||||
def _fix_tables(self, content: str) -> str:
|
||||
"""Fix tables missing closing pipe"""
|
||||
parts = content.split("```")
|
||||
for i in range(0, len(parts), 2):
|
||||
parts[i] = self._PATTERNS["table_pipe"].sub(r"\1|", parts[i])
|
||||
return "```".join(parts)
|
||||
|
||||
def _cleanup_xml_tags(self, content: str) -> str:
|
||||
"""Remove leftover XML tags"""
|
||||
return self._PATTERNS["xml_artifacts"].sub("", content)
|
||||
|
||||
|
||||
class Filter:
|
||||
class Valves(BaseModel):
|
||||
priority: int = Field(
|
||||
default=50,
|
||||
description="Priority level. Higher runs later (recommended to run after other filters).",
|
||||
)
|
||||
enable_escape_fix: bool = Field(
|
||||
default=True, description="Fix excessive escape characters (\\n, \\t, etc.)"
|
||||
)
|
||||
enable_thought_tag_fix: bool = Field(
|
||||
default=True, description="Normalize </thought> tags"
|
||||
)
|
||||
enable_code_block_fix: bool = Field(
|
||||
default=True,
|
||||
description="Fix code block formatting (indentation, newlines)",
|
||||
)
|
||||
enable_latex_fix: bool = Field(
|
||||
default=True, description="Normalize LaTeX formulas (\\[ -> $$, \\( -> $)"
|
||||
)
|
||||
enable_list_fix: bool = Field(
|
||||
default=False, description="Fix list item newlines (Experimental)"
|
||||
)
|
||||
enable_unclosed_block_fix: bool = Field(
|
||||
default=True, description="Auto-close unclosed code blocks"
|
||||
)
|
||||
enable_fullwidth_symbol_fix: bool = Field(
|
||||
default=False, description="Fix full-width symbols in code blocks"
|
||||
)
|
||||
enable_mermaid_fix: bool = Field(
|
||||
default=True,
|
||||
description="Fix common Mermaid syntax errors (e.g. unquoted labels)",
|
||||
)
|
||||
enable_heading_fix: bool = Field(
|
||||
default=True,
|
||||
description="Fix missing space in headings (#Header -> # Header)",
|
||||
)
|
||||
enable_table_fix: bool = Field(
|
||||
default=True, description="Fix missing closing pipe in tables"
|
||||
)
|
||||
enable_xml_tag_cleanup: bool = Field(
|
||||
default=True, description="Cleanup leftover XML tags"
|
||||
)
|
||||
show_status: bool = Field(
|
||||
default=True, description="Show status notification when fixes are applied"
|
||||
)
|
||||
show_debug_log: bool = Field(
|
||||
default=False, description="Print debug logs to browser console (F12)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _contains_html(self, content: str) -> bool:
|
||||
"""Check if content contains HTML tags (to avoid breaking HTML output)"""
|
||||
pattern = r"<\s*/?\s*(?:html|head|body|div|span|p|br|hr|ul|ol|li|table|thead|tbody|tfoot|tr|td|th|img|a|b|i|strong|em|code|pre|blockquote|h[1-6]|script|style|form|input|button|label|select|option|iframe|link|meta|title)\b"
|
||||
return bool(re.search(pattern, content, re.IGNORECASE))
|
||||
|
||||
async def _emit_status(self, __event_emitter__, applied_fixes: List[str]):
|
||||
"""Emit status notification"""
|
||||
if not self.valves.show_status or not applied_fixes:
|
||||
return
|
||||
|
||||
description = "✓ Markdown Normalized"
|
||||
if applied_fixes:
|
||||
description += f": {', '.join(applied_fixes)}"
|
||||
|
||||
try:
|
||||
await __event_emitter__(
|
||||
{
|
||||
"type": "status",
|
||||
"data": {
|
||||
"description": description,
|
||||
"done": True,
|
||||
},
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error emitting status: {e}")
|
||||
|
||||
async def _emit_debug_log(
|
||||
self, __event_call__, applied_fixes: List[str], original: str, normalized: str
|
||||
):
|
||||
"""Emit debug log to browser console via JS execution"""
|
||||
if not self.valves.show_debug_log or not __event_call__:
|
||||
return
|
||||
|
||||
try:
|
||||
# Prepare data for JS
|
||||
log_data = {
|
||||
"fixes": applied_fixes,
|
||||
"original": original,
|
||||
"normalized": normalized,
|
||||
}
|
||||
|
||||
# Construct JS code
|
||||
js_code = f"""
|
||||
(async function() {{
|
||||
console.group("🛠️ Markdown Normalizer Debug");
|
||||
console.log("Applied Fixes:", {json.dumps(applied_fixes, ensure_ascii=False)});
|
||||
console.log("Original Content:", {json.dumps(original, ensure_ascii=False)});
|
||||
console.log("Normalized Content:", {json.dumps(normalized, ensure_ascii=False)});
|
||||
console.groupEnd();
|
||||
}})();
|
||||
"""
|
||||
|
||||
await __event_call__(
|
||||
{
|
||||
"type": "execute",
|
||||
"data": {"code": js_code},
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error emitting debug log: {e}")
|
||||
|
||||
async def outlet(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: Optional[dict] = None,
|
||||
__event_emitter__=None,
|
||||
__event_call__=None,
|
||||
__metadata__: Optional[dict] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Process the response body to normalize Markdown content.
|
||||
"""
|
||||
if "messages" in body and body["messages"]:
|
||||
last = body["messages"][-1]
|
||||
content = last.get("content", "") or ""
|
||||
|
||||
if last.get("role") == "assistant" and isinstance(content, str):
|
||||
# Skip if content looks like HTML to avoid breaking it
|
||||
if self._contains_html(content):
|
||||
return body
|
||||
|
||||
# Configure normalizer based on valves
|
||||
config = NormalizerConfig(
|
||||
enable_escape_fix=self.valves.enable_escape_fix,
|
||||
enable_thought_tag_fix=self.valves.enable_thought_tag_fix,
|
||||
enable_code_block_fix=self.valves.enable_code_block_fix,
|
||||
enable_latex_fix=self.valves.enable_latex_fix,
|
||||
enable_list_fix=self.valves.enable_list_fix,
|
||||
enable_unclosed_block_fix=self.valves.enable_unclosed_block_fix,
|
||||
enable_fullwidth_symbol_fix=self.valves.enable_fullwidth_symbol_fix,
|
||||
enable_mermaid_fix=self.valves.enable_mermaid_fix,
|
||||
enable_heading_fix=self.valves.enable_heading_fix,
|
||||
enable_table_fix=self.valves.enable_table_fix,
|
||||
enable_xml_tag_cleanup=self.valves.enable_xml_tag_cleanup,
|
||||
)
|
||||
|
||||
normalizer = ContentNormalizer(config)
|
||||
|
||||
# Execute normalization
|
||||
new_content = normalizer.normalize(content)
|
||||
|
||||
# Update content if changed
|
||||
if new_content != content:
|
||||
last["content"] = new_content
|
||||
|
||||
# Emit status if enabled
|
||||
if __event_emitter__:
|
||||
await self._emit_status(
|
||||
__event_emitter__, normalizer.applied_fixes
|
||||
)
|
||||
await self._emit_debug_log(
|
||||
__event_call__,
|
||||
normalizer.applied_fixes,
|
||||
content,
|
||||
new_content,
|
||||
)
|
||||
|
||||
return body
|
||||
544
plugins/filters/markdown_normalizer/markdown_normalizer_cn.py
Normal file
@@ -0,0 +1,544 @@
|
||||
"""
|
||||
title: Markdown 格式修复器 (Markdown Normalizer)
|
||||
author: Fu-Jie
|
||||
author_url: https://github.com/Fu-Jie
|
||||
funding_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||
version: 1.0.0
|
||||
description: 生产级内容规范化过滤器,修复 LLM 输出中常见的 Markdown 格式问题,如损坏的代码块、LaTeX 公式、Mermaid 图表和列表格式。
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, List, Callable
|
||||
import re
|
||||
import logging
|
||||
import asyncio
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NormalizerConfig:
|
||||
"""配置类,用于启用/禁用特定的规范化规则"""
|
||||
|
||||
enable_escape_fix: bool = True # 修复过度的转义字符
|
||||
enable_thought_tag_fix: bool = True # 规范化思维链标签
|
||||
enable_code_block_fix: bool = True # 修复代码块格式
|
||||
enable_latex_fix: bool = True # 修复 LaTeX 公式格式
|
||||
enable_list_fix: bool = False # 修复列表项换行 (默认关闭,因为可能过于激进)
|
||||
enable_unclosed_block_fix: bool = True # 自动闭合未闭合的代码块
|
||||
enable_fullwidth_symbol_fix: bool = False # 修复代码块中的全角符号
|
||||
enable_mermaid_fix: bool = True # 修复常见的 Mermaid 语法错误
|
||||
enable_heading_fix: bool = True # 修复标题中缺失的空格 (#Header -> # Header)
|
||||
enable_table_fix: bool = True # 修复表格中缺失的闭合管道符
|
||||
enable_xml_tag_cleanup: bool = True # 清理残留的 XML 标签
|
||||
|
||||
# 自定义清理函数 (用于高级扩展)
|
||||
custom_cleaners: List[Callable[[str], str]] = field(default_factory=list)
|
||||
|
||||
|
||||
class ContentNormalizer:
|
||||
"""LLM Output Content Normalizer - Production Grade Implementation"""
|
||||
|
||||
# --- 1. Pre-compiled Regex Patterns (Performance Optimization) ---
|
||||
_PATTERNS = {
|
||||
# Code block prefix: if ``` is not at start of line or file
|
||||
"code_block_prefix": re.compile(r"(?<!^)(?<!\n)(```)", re.MULTILINE),
|
||||
# Code block suffix: ```lang followed by non-whitespace (no newline)
|
||||
"code_block_suffix": re.compile(r"(```[\w\+\-\.]*)[ \t]+([^\n\r])"),
|
||||
# Code block indent: whitespace at start of line + ```
|
||||
"code_block_indent": re.compile(r"^[ \t]+(```)", re.MULTILINE),
|
||||
# Thought tag: </thought> followed by optional whitespace/newlines
|
||||
"thought_end": re.compile(
|
||||
r"</(thought|think|thinking)>[ \t]*\n*", re.IGNORECASE
|
||||
),
|
||||
"thought_start": re.compile(r"<(thought|think|thinking)>", re.IGNORECASE),
|
||||
# LaTeX block: \[ ... \]
|
||||
"latex_bracket_block": re.compile(r"\\\[(.+?)\\\]", re.DOTALL),
|
||||
# LaTeX inline: \( ... \)
|
||||
"latex_paren_inline": re.compile(r"\\\((.+?)\\\)"),
|
||||
# List item: non-newline + digit + dot + space
|
||||
"list_item": re.compile(r"([^\n])(\d+\. )"),
|
||||
# XML artifacts (e.g. Claude's)
|
||||
"xml_artifacts": re.compile(
|
||||
r"</?(?:antArtifact|antThinking|artifact)[^>]*>", re.IGNORECASE
|
||||
),
|
||||
# Mermaid: 匹配各种形状的节点并为未加引号的标签添加引号
|
||||
# 修复"反向优化"问题:必须精确匹配各种形状的定界符,避免破坏形状结构
|
||||
# 优先级:长定界符优先匹配
|
||||
"mermaid_node": re.compile(
|
||||
r"(\w+)\s*(?:"
|
||||
r"(\(\(\()(?![\"])(.*?)(?<![\"])(\)\)\))|" # (((...))) Double Circle
|
||||
r"(\(\()(?![\"])(.*?)(?<![\"])(\)\))|" # ((...)) Circle
|
||||
r"(\(\[)(?![\"])(.*?)(?<![\"])(\]\))|" # ([...]) Stadium
|
||||
r"(\[\()(?![\"])(.*?)(?<![\"])(\)\])|" # [(...)] Cylinder
|
||||
r"(\[\[)(?![\"])(.*?)(?<![\"])(\]\])|" # [[...]] Subroutine
|
||||
r"(\{\{)(?![\"])(.*?)(?<![\"])(\}\})|" # {{...}} Hexagon
|
||||
r"(\[/)(?![\"])(.*?)(?<![\"])(/\])|" # [/.../] Parallelogram
|
||||
r"(\[\\)(?![\"])(.*?)(?<![\"])(\\\])|" # [\...\] Parallelogram Alt
|
||||
r"(\[/)(?![\"])(.*?)(?<![\"])(\\\])|" # [/...\] Trapezoid
|
||||
r"(\[\\)(?![\"])(.*?)(?<![\"])(/\])|" # [\.../] Trapezoid Alt
|
||||
r"(\()(?![\"])(.*?)(?<![\"])(\))|" # (...) Round
|
||||
r"(\[)(?![\"])(.*?)(?<![\"])(\])|" # [...] Square
|
||||
r"(\{)(?![\"])(.*?)(?<![\"])(\})|" # {...} Rhombus
|
||||
r"(>)(?![\"])(.*?)(?<![\"])(\])" # >...] Asymmetric
|
||||
r")"
|
||||
),
|
||||
# Heading: #Heading -> # Heading
|
||||
"heading_space": re.compile(r"^(#+)([^ \n#])", re.MULTILINE),
|
||||
# Table: | col1 | col2 -> | col1 | col2 |
|
||||
"table_pipe": re.compile(r"^(\|.*[^|\n])$", re.MULTILINE),
|
||||
}
|
||||
|
||||
def __init__(self, config: Optional[NormalizerConfig] = None):
|
||||
self.config = config or NormalizerConfig()
|
||||
self.applied_fixes = []
|
||||
|
||||
def normalize(self, content: str) -> str:
|
||||
"""Main entry point: apply all normalization rules in order"""
|
||||
self.applied_fixes = []
|
||||
if not content:
|
||||
return content
|
||||
|
||||
original_content = content # Keep a copy for logging
|
||||
|
||||
try:
|
||||
# 1. Escape character fix (Must be first)
|
||||
if self.config.enable_escape_fix:
|
||||
original = content
|
||||
content = self._fix_escape_characters(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Escape Chars")
|
||||
|
||||
# 2. Thought tag normalization
|
||||
if self.config.enable_thought_tag_fix:
|
||||
original = content
|
||||
content = self._fix_thought_tags(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Normalize Thought Tags")
|
||||
|
||||
# 3. Code block formatting fix
|
||||
if self.config.enable_code_block_fix:
|
||||
original = content
|
||||
content = self._fix_code_blocks(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Code Blocks")
|
||||
|
||||
# 4. LaTeX formula normalization
|
||||
if self.config.enable_latex_fix:
|
||||
original = content
|
||||
content = self._fix_latex_formulas(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Normalize LaTeX")
|
||||
|
||||
# 5. List formatting fix
|
||||
if self.config.enable_list_fix:
|
||||
original = content
|
||||
content = self._fix_list_formatting(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix List Format")
|
||||
|
||||
# 6. Unclosed code block fix
|
||||
if self.config.enable_unclosed_block_fix:
|
||||
original = content
|
||||
content = self._fix_unclosed_code_blocks(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Close Code Blocks")
|
||||
|
||||
# 7. Full-width symbol fix (in code blocks only)
|
||||
if self.config.enable_fullwidth_symbol_fix:
|
||||
original = content
|
||||
content = self._fix_fullwidth_symbols_in_code(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Full-width Symbols")
|
||||
|
||||
# 8. Mermaid syntax fix
|
||||
if self.config.enable_mermaid_fix:
|
||||
original = content
|
||||
content = self._fix_mermaid_syntax(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Mermaid Syntax")
|
||||
|
||||
# 9. Heading fix
|
||||
if self.config.enable_heading_fix:
|
||||
original = content
|
||||
content = self._fix_headings(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Headings")
|
||||
|
||||
# 10. Table fix
|
||||
if self.config.enable_table_fix:
|
||||
original = content
|
||||
content = self._fix_tables(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Fix Tables")
|
||||
|
||||
# 11. XML tag cleanup
|
||||
if self.config.enable_xml_tag_cleanup:
|
||||
original = content
|
||||
content = self._cleanup_xml_tags(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Cleanup XML Tags")
|
||||
|
||||
# 9. Custom cleaners
|
||||
for cleaner in self.config.custom_cleaners:
|
||||
original = content
|
||||
content = cleaner(content)
|
||||
if content != original:
|
||||
self.applied_fixes.append("Custom Cleaner")
|
||||
|
||||
if self.applied_fixes:
|
||||
print(f"[Markdown Normalizer] Applied fixes: {self.applied_fixes}")
|
||||
print(
|
||||
f"[Markdown Normalizer] --- Original Content ---\n{original_content}\n------------------------"
|
||||
)
|
||||
print(
|
||||
f"[Markdown Normalizer] --- Normalized Content ---\n{content}\n--------------------------"
|
||||
)
|
||||
|
||||
return content
|
||||
|
||||
except Exception as e:
|
||||
# Production safeguard: return original content on error
|
||||
logger.error(f"Content normalization failed: {e}", exc_info=True)
|
||||
return content
|
||||
|
||||
def _fix_escape_characters(self, content: str) -> str:
|
||||
"""Fix excessive escape characters"""
|
||||
content = content.replace("\\r\\n", "\n")
|
||||
content = content.replace("\\n", "\n")
|
||||
content = content.replace("\\t", "\t")
|
||||
content = content.replace("\\\\", "\\")
|
||||
return content
|
||||
|
||||
def _fix_thought_tags(self, content: str) -> str:
|
||||
"""Normalize thought tags: unify naming and fix spacing"""
|
||||
# 1. Standardize start tag: <think>, <thinking> -> <thought>
|
||||
content = self._PATTERNS["thought_start"].sub("<thought>", content)
|
||||
# 2. Standardize end tag and ensure newlines: </think> -> </thought>\n\n
|
||||
return self._PATTERNS["thought_end"].sub("</thought>\n\n", content)
|
||||
|
||||
def _fix_code_blocks(self, content: str) -> str:
|
||||
"""Fix code block formatting (prefixes, suffixes, indentation)"""
|
||||
# Remove indentation before code blocks
|
||||
content = self._PATTERNS["code_block_indent"].sub(r"\1", content)
|
||||
# Ensure newline before ```
|
||||
content = self._PATTERNS["code_block_prefix"].sub(r"\n\1", content)
|
||||
# Ensure newline after ```lang
|
||||
content = self._PATTERNS["code_block_suffix"].sub(r"\1\n\2", content)
|
||||
return content
|
||||
|
||||
def _fix_latex_formulas(self, content: str) -> str:
|
||||
"""Normalize LaTeX formulas: \[ -> $$ (block), \( -> $ (inline)"""
|
||||
content = self._PATTERNS["latex_bracket_block"].sub(r"$$\1$$", content)
|
||||
content = self._PATTERNS["latex_paren_inline"].sub(r"$\1$", content)
|
||||
return content
|
||||
|
||||
def _fix_list_formatting(self, content: str) -> str:
|
||||
"""Fix missing newlines in lists (e.g., 'text1. item' -> 'text\\n1. item')"""
|
||||
return self._PATTERNS["list_item"].sub(r"\1\n\2", content)
|
||||
|
||||
def _fix_unclosed_code_blocks(self, content: str) -> str:
|
||||
"""Auto-close unclosed code blocks"""
|
||||
if content.count("```") % 2 != 0:
|
||||
content += "\n```"
|
||||
return content
|
||||
|
||||
def _fix_fullwidth_symbols_in_code(self, content: str) -> str:
|
||||
"""Convert full-width symbols to half-width inside code blocks"""
|
||||
FULLWIDTH_MAP = {
|
||||
",": ",",
|
||||
"。": ".",
|
||||
"(": "(",
|
||||
")": ")",
|
||||
"【": "[",
|
||||
"】": "]",
|
||||
";": ";",
|
||||
":": ":",
|
||||
"?": "?",
|
||||
"!": "!",
|
||||
'"': '"',
|
||||
'"': '"',
|
||||
""": "'", """: "'",
|
||||
}
|
||||
|
||||
parts = content.split("```")
|
||||
# Code block content is at odd indices: 1, 3, 5...
|
||||
for i in range(1, len(parts), 2):
|
||||
for full, half in FULLWIDTH_MAP.items():
|
||||
parts[i] = parts[i].replace(full, half)
|
||||
|
||||
return "```".join(parts)
|
||||
|
||||
def _fix_mermaid_syntax(self, content: str) -> str:
|
||||
"""修复常见的 Mermaid 语法错误,同时保留节点形状"""
|
||||
|
||||
def replacer(match):
|
||||
# Group 1 是 ID
|
||||
id_str = match.group(1)
|
||||
|
||||
# 查找匹配的形状组
|
||||
# 组从索引 2 开始,每个形状有 3 个组 (Open, Content, Close)
|
||||
# 我们遍历找到非 None 的那一组
|
||||
groups = match.groups()
|
||||
for i in range(1, len(groups), 3):
|
||||
if groups[i] is not None:
|
||||
open_char = groups[i]
|
||||
content = groups[i + 1]
|
||||
close_char = groups[i + 2]
|
||||
|
||||
# 如果内容包含引号,进行转义
|
||||
content = content.replace('"', '\\"')
|
||||
|
||||
return f'{id_str}{open_char}"{content}"{close_char}'
|
||||
|
||||
return match.group(0)
|
||||
|
||||
parts = content.split("```")
|
||||
for i in range(1, len(parts), 2):
|
||||
# Check if it's a mermaid block
|
||||
lang_line = parts[i].split("\n", 1)[0].strip().lower()
|
||||
if "mermaid" in lang_line:
|
||||
# Apply the comprehensive regex fix
|
||||
parts[i] = self._PATTERNS["mermaid_node"].sub(replacer, parts[i])
|
||||
|
||||
# Auto-close subgraphs
|
||||
# Count 'subgraph' and 'end' (case-insensitive)
|
||||
# We use a simple regex to avoid matching words inside labels (though labels are now quoted, so it's safer)
|
||||
# But for simplicity and speed, we just count occurrences in the whole block.
|
||||
# A more robust way would be to strip quoted strings first, but that's expensive.
|
||||
# Given we just quoted everything, let's try to count keywords outside quotes?
|
||||
# Actually, since we just normalized nodes, most text is in quotes.
|
||||
# Let's just do a simple count. It's a heuristic fix.
|
||||
subgraph_count = len(
|
||||
re.findall(r"\bsubgraph\b", parts[i], re.IGNORECASE)
|
||||
)
|
||||
end_count = len(re.findall(r"\bend\b", parts[i], re.IGNORECASE))
|
||||
|
||||
if subgraph_count > end_count:
|
||||
missing_ends = subgraph_count - end_count
|
||||
parts[i] = parts[i].rstrip() + ("\n end" * missing_ends) + "\n"
|
||||
|
||||
return "```".join(parts)
|
||||
|
||||
def _fix_headings(self, content: str) -> str:
|
||||
"""Fix missing space in headings: #Heading -> # Heading"""
|
||||
# We only fix if it's not inside a code block.
|
||||
# But splitting by code block is expensive.
|
||||
# Given headings usually don't appear inside code blocks without space in valid code (except comments),
|
||||
# we might risk false positives in comments like `#TODO`.
|
||||
# To be safe, let's split by code blocks.
|
||||
|
||||
parts = content.split("```")
|
||||
for i in range(0, len(parts), 2): # Even indices are markdown text
|
||||
parts[i] = self._PATTERNS["heading_space"].sub(r"\1 \2", parts[i])
|
||||
return "```".join(parts)
|
||||
|
||||
def _fix_tables(self, content: str) -> str:
|
||||
"""Fix tables missing closing pipe"""
|
||||
parts = content.split("```")
|
||||
for i in range(0, len(parts), 2):
|
||||
parts[i] = self._PATTERNS["table_pipe"].sub(r"\1|", parts[i])
|
||||
return "```".join(parts)
|
||||
|
||||
def _cleanup_xml_tags(self, content: str) -> str:
|
||||
"""Remove leftover XML tags"""
|
||||
return self._PATTERNS["xml_artifacts"].sub("", content)
|
||||
|
||||
|
||||
class Filter:
|
||||
class Valves(BaseModel):
|
||||
priority: int = Field(
|
||||
default=50,
|
||||
description="优先级。数值越高运行越晚 (建议在其他过滤器之后运行)。",
|
||||
)
|
||||
enable_escape_fix: bool = Field(
|
||||
default=True, description="修复过度的转义字符 (\\n, \\t 等)"
|
||||
)
|
||||
enable_thought_tag_fix: bool = Field(
|
||||
default=True, description="规范化思维链标签 (<think> -> <thought>)"
|
||||
)
|
||||
enable_code_block_fix: bool = Field(
|
||||
default=True,
|
||||
description="修复代码块格式 (缩进、换行)",
|
||||
)
|
||||
enable_latex_fix: bool = Field(
|
||||
default=True, description="规范化 LaTeX 公式 (\\[ -> $$, \\( -> $)"
|
||||
)
|
||||
enable_list_fix: bool = Field(
|
||||
default=False, description="修复列表项换行 (实验性)"
|
||||
)
|
||||
enable_unclosed_block_fix: bool = Field(
|
||||
default=True, description="自动闭合未闭合的代码块"
|
||||
)
|
||||
enable_fullwidth_symbol_fix: bool = Field(
|
||||
default=False, description="修复代码块中的全角符号"
|
||||
)
|
||||
enable_mermaid_fix: bool = Field(
|
||||
default=True,
|
||||
description="修复常见的 Mermaid 语法错误 (如未加引号的标签)",
|
||||
)
|
||||
enable_heading_fix: bool = Field(
|
||||
default=True,
|
||||
description="修复标题中缺失的空格 (#Header -> # Header)",
|
||||
)
|
||||
enable_table_fix: bool = Field(
|
||||
default=True, description="修复表格中缺失的闭合管道符"
|
||||
)
|
||||
enable_xml_tag_cleanup: bool = Field(
|
||||
default=True, description="清理残留的 XML 标签"
|
||||
)
|
||||
show_status: bool = Field(default=True, description="应用修复时显示状态通知")
|
||||
show_debug_log: bool = Field(
|
||||
default=False, description="在浏览器控制台打印调试日志 (F12)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.valves = self.Valves()
|
||||
|
||||
def _contains_html(self, content: str) -> bool:
|
||||
"""Check if content contains HTML tags (to avoid breaking HTML output)"""
|
||||
pattern = r"<\s*/?\s*(?:html|head|body|div|span|p|br|hr|ul|ol|li|table|thead|tbody|tfoot|tr|td|th|img|a|b|i|strong|em|code|pre|blockquote|h[1-6]|script|style|form|input|button|label|select|option|iframe|link|meta|title)\b"
|
||||
return bool(re.search(pattern, content, re.IGNORECASE))
|
||||
|
||||
async def _emit_status(self, __event_emitter__, applied_fixes: List[str]):
|
||||
"""Emit status notification"""
|
||||
if not self.valves.show_status or not applied_fixes:
|
||||
return
|
||||
|
||||
description = "✓ Markdown 已修复"
|
||||
if applied_fixes:
|
||||
# Translate fix names for status display
|
||||
fix_map = {
|
||||
"Fix Escape Chars": "转义字符",
|
||||
"Normalize Thought Tags": "思维标签",
|
||||
"Fix Code Blocks": "代码块",
|
||||
"Normalize LaTeX": "LaTeX公式",
|
||||
"Fix List Format": "列表格式",
|
||||
"Close Code Blocks": "闭合代码块",
|
||||
"Fix Full-width Symbols": "全角符号",
|
||||
"Fix Mermaid Syntax": "Mermaid语法",
|
||||
"Fix Headings": "标题格式",
|
||||
"Fix Tables": "表格格式",
|
||||
"Cleanup XML Tags": "XML清理",
|
||||
"Custom Cleaner": "自定义清理",
|
||||
}
|
||||
translated_fixes = [fix_map.get(fix, fix) for fix in applied_fixes]
|
||||
description += f": {', '.join(translated_fixes)}"
|
||||
|
||||
try:
|
||||
await __event_emitter__(
|
||||
{
|
||||
"type": "status",
|
||||
"data": {
|
||||
"description": description,
|
||||
"done": True,
|
||||
},
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error emitting status: {e}")
|
||||
|
||||
async def _emit_debug_log(
|
||||
self,
|
||||
__event_emitter__,
|
||||
applied_fixes: List[str],
|
||||
original: str,
|
||||
normalized: str,
|
||||
):
|
||||
"""Emit debug log to browser console via JS execution"""
|
||||
|
||||
async def _emit_debug_log(
|
||||
self, __event_call__, applied_fixes: List[str], original: str, normalized: str
|
||||
):
|
||||
"""Emit debug log to browser console via JS execution"""
|
||||
if not self.valves.show_debug_log or not __event_call__:
|
||||
return
|
||||
|
||||
try:
|
||||
# Prepare data for JS
|
||||
log_data = {
|
||||
"fixes": applied_fixes,
|
||||
"original": original,
|
||||
"normalized": normalized,
|
||||
}
|
||||
|
||||
# Construct JS code
|
||||
js_code = f"""
|
||||
(async function() {{
|
||||
console.group("🛠️ Markdown Normalizer Debug");
|
||||
console.log("Applied Fixes:", {json.dumps(applied_fixes, ensure_ascii=False)});
|
||||
console.log("Original Content:", {json.dumps(original, ensure_ascii=False)});
|
||||
console.log("Normalized Content:", {json.dumps(normalized, ensure_ascii=False)});
|
||||
console.groupEnd();
|
||||
}})();
|
||||
"""
|
||||
await __event_call__(
|
||||
{
|
||||
"type": "execute",
|
||||
"data": {"code": js_code},
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error emitting debug log: {e}")
|
||||
|
||||
async def outlet(
|
||||
self,
|
||||
body: dict,
|
||||
__user__: Optional[dict] = None,
|
||||
__event_emitter__=None,
|
||||
__event_call__=None,
|
||||
__metadata__: Optional[dict] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Process the response body to normalize Markdown content.
|
||||
"""
|
||||
if "messages" in body and body["messages"]:
|
||||
last = body["messages"][-1]
|
||||
content = last.get("content", "") or ""
|
||||
|
||||
if last.get("role") == "assistant" and isinstance(content, str):
|
||||
# Skip if content looks like HTML to avoid breaking it
|
||||
if self._contains_html(content):
|
||||
return body
|
||||
|
||||
# Configure normalizer based on valves
|
||||
config = NormalizerConfig(
|
||||
enable_escape_fix=self.valves.enable_escape_fix,
|
||||
enable_thought_tag_fix=self.valves.enable_thought_tag_fix,
|
||||
enable_code_block_fix=self.valves.enable_code_block_fix,
|
||||
enable_latex_fix=self.valves.enable_latex_fix,
|
||||
enable_list_fix=self.valves.enable_list_fix,
|
||||
enable_unclosed_block_fix=self.valves.enable_unclosed_block_fix,
|
||||
enable_fullwidth_symbol_fix=self.valves.enable_fullwidth_symbol_fix,
|
||||
enable_mermaid_fix=self.valves.enable_mermaid_fix,
|
||||
enable_heading_fix=self.valves.enable_heading_fix,
|
||||
enable_table_fix=self.valves.enable_table_fix,
|
||||
enable_xml_tag_cleanup=self.valves.enable_xml_tag_cleanup,
|
||||
)
|
||||
|
||||
normalizer = ContentNormalizer(config)
|
||||
|
||||
# Execute normalization
|
||||
new_content = normalizer.normalize(content)
|
||||
|
||||
# Update content if changed
|
||||
if new_content != content:
|
||||
last["content"] = new_content
|
||||
|
||||
# Emit status if enabled
|
||||
if __event_emitter__:
|
||||
await self._emit_status(
|
||||
__event_emitter__, normalizer.applied_fixes
|
||||
)
|
||||
await self._emit_debug_log(
|
||||
__event_call__,
|
||||
normalizer.applied_fixes,
|
||||
content,
|
||||
new_content,
|
||||
)
|
||||
|
||||
return body
|
||||
191
plugins/filters/markdown_normalizer/test_markdown_normalizer.py
Normal file
@@ -0,0 +1,191 @@
|
||||
import unittest
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add the current directory to sys.path to import the module
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append(current_dir)
|
||||
|
||||
from markdown_normalizer import ContentNormalizer, NormalizerConfig
|
||||
|
||||
|
||||
class TestMarkdownNormalizer(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.config = NormalizerConfig(
|
||||
enable_escape_fix=True,
|
||||
enable_thought_tag_fix=True,
|
||||
enable_code_block_fix=True,
|
||||
enable_latex_fix=True,
|
||||
enable_list_fix=True,
|
||||
enable_unclosed_block_fix=True,
|
||||
enable_fullwidth_symbol_fix=True,
|
||||
enable_mermaid_fix=True,
|
||||
enable_xml_tag_cleanup=True,
|
||||
)
|
||||
self.normalizer = ContentNormalizer(self.config)
|
||||
|
||||
def test_escape_fix(self):
|
||||
input_text = "Line 1\\nLine 2\\tTabbed"
|
||||
expected = "Line 1\nLine 2\tTabbed"
|
||||
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||
|
||||
def test_thought_tag_fix(self):
|
||||
# Case 1: Standard tag spacing
|
||||
input_text = "Thinking...</thought>Result"
|
||||
expected = "Thinking...</thought>\n\nResult"
|
||||
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||
|
||||
# Case 2: Tag standardization (<think> -> <thought>)
|
||||
input_text_deepseek = "<think>Deep thinking...</think>Result"
|
||||
expected_deepseek = "<thought>Deep thinking...</thought>\n\nResult"
|
||||
self.assertEqual(
|
||||
self.normalizer.normalize(input_text_deepseek), expected_deepseek
|
||||
)
|
||||
|
||||
def test_code_block_fix(self):
|
||||
# Case 1: Indentation
|
||||
self.assertEqual(self.normalizer._fix_code_blocks(" ```python"), "```python")
|
||||
|
||||
# Case 2: Prefix (newline before block)
|
||||
self.assertEqual(
|
||||
self.normalizer._fix_code_blocks("Text```python"), "Text\n```python"
|
||||
)
|
||||
|
||||
# Case 3: Suffix (newline after lang)
|
||||
self.assertEqual(
|
||||
self.normalizer._fix_code_blocks("```python print('hi')"),
|
||||
"```python\nprint('hi')",
|
||||
)
|
||||
|
||||
def test_latex_fix(self):
|
||||
input_text = "Block: \\[ x^2 \\] Inline: \\( E=mc^2 \\)"
|
||||
expected = "Block: $$ x^2 $$ Inline: $ E=mc^2 $"
|
||||
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||
|
||||
def test_list_fix(self):
|
||||
input_text = "Item 1. First\nItem 2. Second" # This is fine
|
||||
input_text_bad = "Header1. Item 1"
|
||||
expected = "Header\n1. Item 1"
|
||||
self.assertEqual(self.normalizer.normalize(input_text_bad), expected)
|
||||
|
||||
def test_unclosed_code_block_fix(self):
|
||||
input_text = "```python\nprint('hello')"
|
||||
expected = "```python\nprint('hello')\n```"
|
||||
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||
|
||||
def test_fullwidth_symbol_fix(self):
|
||||
input_text = "Outside:Fullwidth ```python\nprint('hello')```"
|
||||
expected = "Outside:Fullwidth \n```python\nprint('hello')\n```"
|
||||
|
||||
normalized = self.normalizer.normalize(input_text)
|
||||
self.assertIn("print('hello')", normalized)
|
||||
self.assertIn("Outside:Fullwidth", normalized)
|
||||
self.assertNotIn("(", normalized)
|
||||
self.assertNotIn(")", normalized)
|
||||
|
||||
def test_mermaid_fix(self):
|
||||
# Test Mermaid syntax fix for unquoted labels
|
||||
# Note: Regex-based fix handles mixed brackets well (e.g. [] inside ())
|
||||
# but cannot perfectly handle same-type nesting (e.g. {} inside {}) without a parser.
|
||||
input_text = """
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Label with (parens)] --> B(Label with [brackets])
|
||||
C{Label with [brackets]}
|
||||
```
|
||||
"""
|
||||
expected_snippet = """
|
||||
```mermaid
|
||||
graph TD
|
||||
A["Label with (parens)"] --> B("Label with [brackets]")
|
||||
C{"Label with [brackets]"}
|
||||
```
|
||||
"""
|
||||
normalized = self.normalizer.normalize(input_text)
|
||||
|
||||
self.assertIn('A["Label with (parens)"]', normalized)
|
||||
self.assertIn('B("Label with [brackets]")', normalized)
|
||||
self.assertIn('C{"Label with [brackets]"}', normalized)
|
||||
|
||||
def test_mermaid_shapes_regression(self):
|
||||
# Regression test for "reverse optimization" where ((...)) was broken into ("(...)")
|
||||
input_text = """
|
||||
```mermaid
|
||||
graph TD
|
||||
Start((开始)) --> Input[[输入]]
|
||||
Input --> Verify{验证}
|
||||
Verify --> End(((结束)))
|
||||
```
|
||||
"""
|
||||
expected_snippet = """
|
||||
```mermaid
|
||||
graph TD
|
||||
Start(("开始")) --> Input[["输入"]]
|
||||
Input --> Verify{"验证"}
|
||||
Verify --> End((("结束")))
|
||||
```
|
||||
"""
|
||||
normalized = self.normalizer.normalize(input_text)
|
||||
self.assertIn('Start(("开始"))', normalized)
|
||||
self.assertIn('Input[["输入"]]', normalized)
|
||||
self.assertIn('Verify{"验证"}', normalized)
|
||||
self.assertIn('End((("结束")))', normalized)
|
||||
|
||||
def test_xml_cleanup(self):
|
||||
input_text = "Some text <antArtifact>hidden</antArtifact> visible"
|
||||
expected = "Some text hidden visible"
|
||||
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||
|
||||
def test_heading_fix(self):
|
||||
input_text = "#Heading 1\n##Heading 2\n### Valid Heading"
|
||||
expected = "# Heading 1\n## Heading 2\n### Valid Heading"
|
||||
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||
|
||||
def test_table_fix(self):
|
||||
input_text = "| Col 1 | Col 2\n| Val 1 | Val 2"
|
||||
expected = "| Col 1 | Col 2|\n| Val 1 | Val 2|"
|
||||
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||
|
||||
def test_mermaid_subgraph_autoclose(self):
|
||||
"""Test auto-closing of Mermaid subgraphs"""
|
||||
# Case 1: Simple unclosed subgraph
|
||||
original = """
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph One
|
||||
A --> B
|
||||
```
|
||||
"""
|
||||
expected = """
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph One
|
||||
A --> B
|
||||
end
|
||||
```
|
||||
"""
|
||||
# Note: The normalizer might add quotes to A and B if they match the node pattern,
|
||||
# but here they are simple IDs. However, our regex is strict about shapes.
|
||||
# Simple IDs like A and B are NOT matched by our mermaid_node regex because it requires a shape delimiter.
|
||||
# So A and B remain A and B.
|
||||
|
||||
normalized = self.normalizer.normalize(original)
|
||||
# We need to be careful about whitespace in comparison
|
||||
self.assertIn("end", normalized)
|
||||
self.assertEqual(normalized.strip(), expected.strip())
|
||||
|
||||
# Case 2: Nested unclosed subgraphs
|
||||
original_nested = """
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph Outer
|
||||
subgraph Inner
|
||||
C --> D
|
||||
```
|
||||
"""
|
||||
normalized_nested = self.normalizer.normalize(original_nested)
|
||||
self.assertEqual(normalized_nested.count("end"), 2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
133
scripts/download_plugin_images.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
Download plugin images from OpenWebUI Community
|
||||
下载远程插件图片到本地目录
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Add current directory to path
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from openwebui_community_client import get_client
|
||||
|
||||
|
||||
def find_local_plugin_by_id(plugins_dir: str, post_id: str) -> str | None:
|
||||
"""根据 post_id 查找本地插件文件"""
|
||||
for root, _, files in os.walk(plugins_dir):
|
||||
for file in files:
|
||||
if file.endswith(".py"):
|
||||
file_path = os.path.join(root, file)
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read(2000)
|
||||
|
||||
id_match = re.search(
|
||||
r"(?:openwebui_id|post_id):\s*([a-z0-9-]+)", content
|
||||
)
|
||||
if id_match and id_match.group(1).strip() == post_id:
|
||||
return file_path
|
||||
return None
|
||||
|
||||
|
||||
def download_image(url: str, save_path: str) -> bool:
|
||||
"""下载图片"""
|
||||
try:
|
||||
response = requests.get(url, timeout=30)
|
||||
response.raise_for_status()
|
||||
with open(save_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f" Error downloading: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def get_image_extension(url: str) -> str:
|
||||
"""从 URL 获取图片扩展名"""
|
||||
parsed = urlparse(url)
|
||||
path = parsed.path
|
||||
ext = os.path.splitext(path)[1].lower()
|
||||
if ext in [".png", ".jpg", ".jpeg", ".gif", ".webp"]:
|
||||
return ext
|
||||
return ".png" # 默认
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
client = get_client()
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
plugins_dir = os.path.join(base_dir, "plugins")
|
||||
|
||||
print("Fetching remote posts from OpenWebUI Community...")
|
||||
posts = client.get_all_posts()
|
||||
print(f"Found {len(posts)} remote posts.\n")
|
||||
|
||||
downloaded = 0
|
||||
skipped = 0
|
||||
not_found = 0
|
||||
|
||||
for post in posts:
|
||||
post_id = post.get("id")
|
||||
title = post.get("title", "Unknown")
|
||||
media = post.get("media", [])
|
||||
|
||||
if not media:
|
||||
continue
|
||||
|
||||
# 只取第一张图片
|
||||
first_media = media[0] if isinstance(media, list) else media
|
||||
|
||||
# 处理字典格式 {'url': '...', 'type': 'image'}
|
||||
if isinstance(first_media, dict):
|
||||
image_url = first_media.get("url")
|
||||
else:
|
||||
image_url = first_media
|
||||
|
||||
if not image_url:
|
||||
continue
|
||||
|
||||
print(f"Processing: {title}")
|
||||
print(f" Image URL: {image_url}")
|
||||
|
||||
# 查找对应的本地插件
|
||||
local_plugin = find_local_plugin_by_id(plugins_dir, post_id)
|
||||
if not local_plugin:
|
||||
print(f" ⚠️ No local plugin found for ID: {post_id}")
|
||||
not_found += 1
|
||||
continue
|
||||
|
||||
# 确定保存路径
|
||||
plugin_dir = os.path.dirname(local_plugin)
|
||||
plugin_name = os.path.splitext(os.path.basename(local_plugin))[0]
|
||||
ext = get_image_extension(image_url)
|
||||
save_path = os.path.join(plugin_dir, plugin_name + ext)
|
||||
|
||||
# 检查是否已存在
|
||||
if os.path.exists(save_path):
|
||||
print(f" ⏭️ Image already exists: {os.path.basename(save_path)}")
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
# 下载
|
||||
print(f" Downloading to: {save_path}")
|
||||
if download_image(image_url, save_path):
|
||||
print(f" ✅ Downloaded: {os.path.basename(save_path)}")
|
||||
downloaded += 1
|
||||
else:
|
||||
print(f" ❌ Failed to download")
|
||||
|
||||
print(f"\n{'='*50}")
|
||||
print(
|
||||
f"Finished: {downloaded} downloaded, {skipped} skipped, {not_found} not found locally"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,3 +1,8 @@
|
||||
"""
|
||||
Fetch remote plugin versions from OpenWebUI Community
|
||||
获取远程插件版本信息
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
@@ -5,22 +10,17 @@ import sys
|
||||
# Add current directory to path
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
try:
|
||||
from openwebui_stats import OpenWebUIStats
|
||||
except ImportError:
|
||||
print("Error: openwebui_stats.py not found.")
|
||||
sys.exit(1)
|
||||
from openwebui_community_client import get_client
|
||||
|
||||
|
||||
def main():
|
||||
# Try to get token from env
|
||||
token = os.environ.get("OPENWEBUI_API_KEY")
|
||||
if not token:
|
||||
print("Error: OPENWEBUI_API_KEY environment variable not set.")
|
||||
try:
|
||||
client = get_client()
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
print("Fetching remote plugins from OpenWebUI...")
|
||||
client = OpenWebUIStats(token)
|
||||
print("Fetching remote plugins from OpenWebUI Community...")
|
||||
try:
|
||||
posts = client.get_all_posts()
|
||||
except Exception as e:
|
||||
@@ -29,9 +29,6 @@ def main():
|
||||
|
||||
formatted_plugins = []
|
||||
for post in posts:
|
||||
# Save the full raw post object to ensure we have "compliant update json data"
|
||||
# We inject a 'type' field just for the comparison script to know it's remote,
|
||||
# but otherwise keep the structure identical to the API response.
|
||||
post["type"] = "remote_plugin"
|
||||
formatted_plugins.append(post)
|
||||
|
||||
|
||||
659
scripts/openwebui_community_client.py
Normal file
@@ -0,0 +1,659 @@
|
||||
"""
|
||||
OpenWebUI Community Client
|
||||
统一封装所有与 OpenWebUI 官方社区 (openwebui.com) 的 API 交互。
|
||||
|
||||
功能:
|
||||
- 获取用户发布的插件/帖子
|
||||
- 更新插件内容和元数据
|
||||
- 版本比较
|
||||
- 同步插件 ID
|
||||
|
||||
使用方法:
|
||||
from openwebui_community_client import OpenWebUICommunityClient
|
||||
|
||||
client = OpenWebUICommunityClient(api_key="your_api_key")
|
||||
posts = client.get_all_posts()
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import base64
|
||||
import requests
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Optional, Dict, List, Any, Tuple
|
||||
|
||||
# 北京时区 (UTC+8)
|
||||
BEIJING_TZ = timezone(timedelta(hours=8))
|
||||
|
||||
|
||||
class OpenWebUICommunityClient:
|
||||
"""OpenWebUI 官方社区 API 客户端"""
|
||||
|
||||
BASE_URL = "https://api.openwebui.com/api/v1"
|
||||
|
||||
def __init__(self, api_key: str, user_id: Optional[str] = None):
|
||||
"""
|
||||
初始化客户端
|
||||
|
||||
Args:
|
||||
api_key: OpenWebUI API Key (JWT Token)
|
||||
user_id: 用户 ID,如果为 None 则从 token 中解析
|
||||
"""
|
||||
self.api_key = api_key
|
||||
self.user_id = user_id or self._parse_user_id_from_token(api_key)
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
# 如果没有 user_id,尝试通过 API 获取
|
||||
if not self.user_id:
|
||||
self.user_id = self._get_user_id_from_api()
|
||||
|
||||
def _parse_user_id_from_token(self, token: str) -> Optional[str]:
|
||||
"""从 JWT Token 中解析用户 ID"""
|
||||
# sk- 开头的是 API Key,无法解析用户 ID
|
||||
if token.startswith("sk-"):
|
||||
return None
|
||||
try:
|
||||
parts = token.split(".")
|
||||
if len(parts) >= 2:
|
||||
payload = parts[1]
|
||||
# 添加 padding
|
||||
padding = 4 - len(payload) % 4
|
||||
if padding != 4:
|
||||
payload += "=" * padding
|
||||
decoded = base64.urlsafe_b64decode(payload)
|
||||
data = json.loads(decoded)
|
||||
return data.get("id") or data.get("sub")
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _get_user_id_from_api(self) -> Optional[str]:
|
||||
"""通过 API 获取当前用户 ID"""
|
||||
try:
|
||||
url = f"{self.BASE_URL}/auths/"
|
||||
response = requests.get(url, headers=self.headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data.get("id")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# ========== 帖子/插件获取 ==========
|
||||
|
||||
def get_user_posts(self, sort: str = "new", page: int = 1) -> List[Dict]:
|
||||
"""
|
||||
获取用户发布的帖子列表
|
||||
|
||||
Args:
|
||||
sort: 排序方式 (new/top/hot)
|
||||
page: 页码
|
||||
|
||||
Returns:
|
||||
帖子列表
|
||||
"""
|
||||
url = f"{self.BASE_URL}/posts/users/{self.user_id}?sort={sort}&page={page}"
|
||||
response = requests.get(url, headers=self.headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_all_posts(self, sort: str = "new") -> List[Dict]:
|
||||
"""获取所有帖子(自动分页)"""
|
||||
all_posts = []
|
||||
page = 1
|
||||
while True:
|
||||
posts = self.get_user_posts(sort=sort, page=page)
|
||||
if not posts:
|
||||
break
|
||||
all_posts.extend(posts)
|
||||
page += 1
|
||||
return all_posts
|
||||
|
||||
def get_post(self, post_id: str) -> Optional[Dict]:
|
||||
"""
|
||||
获取单个帖子详情
|
||||
|
||||
Args:
|
||||
post_id: 帖子 ID
|
||||
|
||||
Returns:
|
||||
帖子数据,如果不存在返回 None
|
||||
"""
|
||||
try:
|
||||
url = f"{self.BASE_URL}/posts/{post_id}"
|
||||
response = requests.get(url, headers=self.headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
return None
|
||||
raise
|
||||
|
||||
# ========== 帖子/插件创建 ==========
|
||||
|
||||
def create_post(
|
||||
self,
|
||||
title: str,
|
||||
content: str,
|
||||
post_type: str = "function",
|
||||
data: Optional[Dict] = None,
|
||||
media: Optional[List[str]] = None,
|
||||
) -> Optional[Dict]:
|
||||
"""
|
||||
创建新帖子
|
||||
|
||||
Args:
|
||||
title: 帖子标题
|
||||
content: 帖子内容(README/描述)
|
||||
post_type: 帖子类型 (function/tool/filter/pipeline)
|
||||
data: 插件数据结构
|
||||
media: 图片 URL 列表
|
||||
|
||||
Returns:
|
||||
创建成功返回帖子数据,失败返回 None
|
||||
"""
|
||||
try:
|
||||
url = f"{self.BASE_URL}/posts/create"
|
||||
|
||||
# 将字符串 URL 转换为字典格式 (API 要求)
|
||||
media_list = []
|
||||
if media:
|
||||
for item in media:
|
||||
if isinstance(item, str):
|
||||
media_list.append({"url": item})
|
||||
elif isinstance(item, dict):
|
||||
media_list.append(item)
|
||||
|
||||
payload = {
|
||||
"title": title,
|
||||
"content": content,
|
||||
"type": post_type,
|
||||
"data": data or {},
|
||||
"media": media_list,
|
||||
}
|
||||
print(f" [DEBUG] Payload keys: {list(payload.keys())}")
|
||||
print(
|
||||
f" [DEBUG] media format: {media_list[:1] if media_list else 'empty'}"
|
||||
)
|
||||
response = requests.post(url, headers=self.headers, json=payload)
|
||||
if response.status_code != 200:
|
||||
print(f" [DEBUG] Response status: {response.status_code}")
|
||||
print(f" [DEBUG] Response body: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except Exception as e:
|
||||
print(f" Error creating post: {e}")
|
||||
return None
|
||||
|
||||
def create_plugin(
|
||||
self,
|
||||
title: str,
|
||||
source_code: str,
|
||||
readme_content: Optional[str] = None,
|
||||
metadata: Optional[Dict] = None,
|
||||
media_urls: Optional[List[str]] = None,
|
||||
plugin_type: str = "action",
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
创建新插件帖子
|
||||
|
||||
Args:
|
||||
title: 插件标题
|
||||
source_code: 插件源代码
|
||||
readme_content: README 内容
|
||||
metadata: 插件元数据
|
||||
media_urls: 图片 URL 列表
|
||||
plugin_type: 插件类型 (action/filter/pipe)
|
||||
|
||||
Returns:
|
||||
创建成功返回帖子 ID,失败返回 None
|
||||
"""
|
||||
# 构建 function 数据结构
|
||||
function_data = {
|
||||
"id": "", # 服务器会生成
|
||||
"name": title,
|
||||
"type": plugin_type,
|
||||
"content": source_code,
|
||||
"meta": {
|
||||
"description": metadata.get("description", "") if metadata else "",
|
||||
"manifest": metadata or {},
|
||||
},
|
||||
}
|
||||
|
||||
data = {"function": function_data}
|
||||
|
||||
result = self.create_post(
|
||||
title=title,
|
||||
content=(
|
||||
readme_content or metadata.get("description", "") if metadata else ""
|
||||
),
|
||||
post_type="function",
|
||||
data=data,
|
||||
media=media_urls,
|
||||
)
|
||||
|
||||
if result:
|
||||
return result.get("id")
|
||||
return None
|
||||
|
||||
# ========== 帖子/插件更新 ==========
|
||||
|
||||
def update_post(self, post_id: str, post_data: Dict) -> bool:
|
||||
"""
|
||||
更新帖子
|
||||
|
||||
Args:
|
||||
post_id: 帖子 ID
|
||||
post_data: 完整的帖子数据
|
||||
|
||||
Returns:
|
||||
是否成功
|
||||
"""
|
||||
url = f"{self.BASE_URL}/posts/{post_id}/update"
|
||||
response = requests.post(url, headers=self.headers, json=post_data)
|
||||
response.raise_for_status()
|
||||
return True
|
||||
|
||||
def update_plugin(
|
||||
self,
|
||||
post_id: str,
|
||||
source_code: str,
|
||||
readme_content: Optional[str] = None,
|
||||
metadata: Optional[Dict] = None,
|
||||
media_urls: Optional[List[str]] = None,
|
||||
) -> bool:
|
||||
"""
|
||||
更新插件(代码 + README + 元数据 + 图片)
|
||||
|
||||
Args:
|
||||
post_id: 帖子 ID
|
||||
source_code: 插件源代码
|
||||
readme_content: README 内容(用于社区页面展示)
|
||||
metadata: 插件元数据(title, version, description 等)
|
||||
media_urls: 图片 URL 列表
|
||||
|
||||
Returns:
|
||||
是否成功
|
||||
"""
|
||||
post_data = self.get_post(post_id)
|
||||
if not post_data:
|
||||
return False
|
||||
|
||||
# 确保结构存在
|
||||
if "data" not in post_data:
|
||||
post_data["data"] = {}
|
||||
if "function" not in post_data["data"]:
|
||||
post_data["data"]["function"] = {}
|
||||
if "meta" not in post_data["data"]["function"]:
|
||||
post_data["data"]["function"]["meta"] = {}
|
||||
if "manifest" not in post_data["data"]["function"]["meta"]:
|
||||
post_data["data"]["function"]["meta"]["manifest"] = {}
|
||||
|
||||
# 更新源代码
|
||||
post_data["data"]["function"]["content"] = source_code
|
||||
|
||||
# 更新 README(社区页面展示内容)
|
||||
if readme_content:
|
||||
post_data["content"] = readme_content
|
||||
|
||||
# 更新元数据
|
||||
if metadata:
|
||||
post_data["data"]["function"]["meta"]["manifest"].update(metadata)
|
||||
if "title" in metadata:
|
||||
post_data["title"] = metadata["title"]
|
||||
post_data["data"]["function"]["name"] = metadata["title"]
|
||||
if "description" in metadata:
|
||||
post_data["data"]["function"]["meta"]["description"] = metadata[
|
||||
"description"
|
||||
]
|
||||
|
||||
# 更新图片
|
||||
if media_urls:
|
||||
post_data["media"] = media_urls
|
||||
|
||||
return self.update_post(post_id, post_data)
|
||||
|
||||
# ========== 图片上传 ==========
|
||||
|
||||
def upload_image(self, file_path: str) -> Optional[str]:
|
||||
"""
|
||||
上传图片到 OpenWebUI 社区
|
||||
|
||||
Args:
|
||||
file_path: 图片文件路径
|
||||
|
||||
Returns:
|
||||
上传成功后的图片 URL,失败返回 None
|
||||
"""
|
||||
if not os.path.exists(file_path):
|
||||
return None
|
||||
|
||||
# 获取文件信息
|
||||
filename = os.path.basename(file_path)
|
||||
|
||||
# 根据文件扩展名确定 MIME 类型
|
||||
ext = os.path.splitext(filename)[1].lower()
|
||||
mime_types = {
|
||||
".png": "image/png",
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".gif": "image/gif",
|
||||
".webp": "image/webp",
|
||||
}
|
||||
content_type = mime_types.get(ext, "application/octet-stream")
|
||||
|
||||
try:
|
||||
with open(file_path, "rb") as f:
|
||||
files = {"file": (filename, f, content_type)}
|
||||
# 上传时不使用 JSON Content-Type
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
response = requests.post(
|
||||
f"{self.BASE_URL}/files/",
|
||||
headers=headers,
|
||||
files=files,
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
# 返回图片 URL
|
||||
return result.get("url")
|
||||
except Exception as e:
|
||||
print(f" Warning: Failed to upload image: {e}")
|
||||
return None
|
||||
|
||||
# ========== 版本比较 ==========
|
||||
|
||||
def get_remote_version(self, post_id: str) -> Optional[str]:
|
||||
"""
|
||||
获取远程插件版本
|
||||
|
||||
Args:
|
||||
post_id: 帖子 ID
|
||||
|
||||
Returns:
|
||||
版本号,如果不存在返回 None
|
||||
"""
|
||||
post_data = self.get_post(post_id)
|
||||
if not post_data:
|
||||
return None
|
||||
return (
|
||||
post_data.get("data", {})
|
||||
.get("function", {})
|
||||
.get("meta", {})
|
||||
.get("manifest", {})
|
||||
.get("version")
|
||||
)
|
||||
|
||||
def version_needs_update(self, post_id: str, local_version: str) -> bool:
|
||||
"""
|
||||
检查是否需要更新
|
||||
|
||||
Args:
|
||||
post_id: 帖子 ID
|
||||
local_version: 本地版本号
|
||||
|
||||
Returns:
|
||||
如果本地版本与远程不同,返回 True
|
||||
"""
|
||||
remote_version = self.get_remote_version(post_id)
|
||||
if not remote_version:
|
||||
return True # 远程不存在,需要更新
|
||||
return local_version != remote_version
|
||||
|
||||
# ========== 插件发布 ==========
|
||||
|
||||
def publish_plugin_from_file(
|
||||
self, file_path: str, force: bool = False, auto_create: bool = True
|
||||
) -> Tuple[bool, str]:
|
||||
"""
|
||||
从文件发布插件(支持首次创建和更新)
|
||||
|
||||
Args:
|
||||
file_path: 插件文件路径
|
||||
force: 是否强制更新(忽略版本检查)
|
||||
auto_create: 如果没有 openwebui_id,是否自动创建新帖子
|
||||
|
||||
Returns:
|
||||
(是否成功, 消息)
|
||||
"""
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
metadata = self._parse_frontmatter(content)
|
||||
if not metadata:
|
||||
return False, "No frontmatter found"
|
||||
|
||||
title = metadata.get("title")
|
||||
if not title:
|
||||
return False, "No title in frontmatter"
|
||||
|
||||
post_id = metadata.get("openwebui_id") or metadata.get("post_id")
|
||||
local_version = metadata.get("version")
|
||||
|
||||
# 查找 README
|
||||
readme_content = self._find_readme(file_path)
|
||||
|
||||
# 查找并上传图片
|
||||
media_urls = None
|
||||
image_path = self._find_image(file_path)
|
||||
if image_path:
|
||||
print(f" Found image: {os.path.basename(image_path)}")
|
||||
image_url = self.upload_image(image_path)
|
||||
if image_url:
|
||||
print(f" Uploaded image: {image_url}")
|
||||
media_urls = [image_url]
|
||||
|
||||
# 如果没有 post_id,尝试创建新帖子
|
||||
if not post_id:
|
||||
if not auto_create:
|
||||
return False, "No openwebui_id found and auto_create is disabled"
|
||||
|
||||
print(f" Creating new post for: {title}")
|
||||
new_post_id = self.create_plugin(
|
||||
title=title,
|
||||
source_code=content,
|
||||
readme_content=readme_content or metadata.get("description", ""),
|
||||
metadata=metadata,
|
||||
media_urls=media_urls,
|
||||
)
|
||||
|
||||
if new_post_id:
|
||||
# 将新 ID 写回本地文件
|
||||
self._inject_id_to_file(file_path, new_post_id)
|
||||
return True, f"Created new post (ID: {new_post_id})"
|
||||
return False, "Failed to create new post"
|
||||
|
||||
# 版本检查(仅对更新有效)
|
||||
if not force and local_version:
|
||||
if not self.version_needs_update(post_id, local_version):
|
||||
return True, f"Skipped: version {local_version} matches remote"
|
||||
|
||||
# 更新
|
||||
success = self.update_plugin(
|
||||
post_id=post_id,
|
||||
source_code=content,
|
||||
readme_content=readme_content or metadata.get("description", ""),
|
||||
metadata=metadata,
|
||||
media_urls=media_urls,
|
||||
)
|
||||
|
||||
if success:
|
||||
return True, f"Updated to version {local_version}"
|
||||
return False, "Update failed"
|
||||
|
||||
def _parse_frontmatter(self, content: str) -> Dict[str, str]:
|
||||
"""解析插件文件的 frontmatter"""
|
||||
match = re.search(r'^\s*"""\n(.*?)\n"""', content, re.DOTALL)
|
||||
if not match:
|
||||
match = re.search(r'"""\n(.*?)\n"""', content, re.DOTALL)
|
||||
if not match:
|
||||
return {}
|
||||
|
||||
frontmatter = match.group(1)
|
||||
meta = {}
|
||||
for line in frontmatter.split("\n"):
|
||||
if ":" in line:
|
||||
key, value = line.split(":", 1)
|
||||
meta[key.strip()] = value.strip()
|
||||
return meta
|
||||
|
||||
def _find_readme(self, plugin_file_path: str) -> Optional[str]:
|
||||
"""查找插件对应的 README 文件"""
|
||||
plugin_dir = os.path.dirname(plugin_file_path)
|
||||
base_name = os.path.basename(plugin_file_path).lower()
|
||||
|
||||
# 确定优先顺序
|
||||
if base_name.endswith("_cn.py"):
|
||||
readme_files = ["README_CN.md", "README.md"]
|
||||
else:
|
||||
readme_files = ["README.md", "README_CN.md"]
|
||||
|
||||
for readme_name in readme_files:
|
||||
readme_path = os.path.join(plugin_dir, readme_name)
|
||||
if os.path.exists(readme_path):
|
||||
with open(readme_path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
return None
|
||||
|
||||
def _find_image(self, plugin_file_path: str) -> Optional[str]:
|
||||
"""
|
||||
查找插件对应的图片文件
|
||||
图片名称需要和插件文件名一致(不含扩展名)
|
||||
|
||||
例如:
|
||||
export_to_word.py -> export_to_word.png / export_to_word.jpg
|
||||
"""
|
||||
plugin_dir = os.path.dirname(plugin_file_path)
|
||||
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
|
||||
|
||||
# 支持的图片格式
|
||||
image_extensions = [".png", ".jpg", ".jpeg", ".gif", ".webp"]
|
||||
|
||||
for ext in image_extensions:
|
||||
image_path = os.path.join(plugin_dir, plugin_name + ext)
|
||||
if os.path.exists(image_path):
|
||||
return image_path
|
||||
return None
|
||||
|
||||
def _inject_id_to_file(self, file_path: str, post_id: str) -> bool:
|
||||
"""
|
||||
将新创建的帖子 ID 写回本地插件文件的 frontmatter
|
||||
|
||||
Args:
|
||||
file_path: 插件文件路径
|
||||
post_id: 新创建的帖子 ID
|
||||
|
||||
Returns:
|
||||
是否成功
|
||||
"""
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
new_lines = []
|
||||
inserted = False
|
||||
in_frontmatter = False
|
||||
|
||||
for line in lines:
|
||||
# Check for start/end of frontmatter
|
||||
if line.strip() == '"""':
|
||||
if not in_frontmatter:
|
||||
in_frontmatter = True
|
||||
else:
|
||||
in_frontmatter = False
|
||||
|
||||
new_lines.append(line)
|
||||
|
||||
# Insert after version line
|
||||
if (
|
||||
in_frontmatter
|
||||
and not inserted
|
||||
and line.strip().startswith("version:")
|
||||
):
|
||||
new_lines.append(f"openwebui_id: {post_id}\n")
|
||||
inserted = True
|
||||
print(f" Injected openwebui_id: {post_id}")
|
||||
|
||||
if inserted:
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
f.writelines(new_lines)
|
||||
return True
|
||||
|
||||
print(f" Warning: Could not inject ID (no version line found)")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f" Error injecting ID to file: {e}")
|
||||
return False
|
||||
|
||||
# ========== 统计功能 ==========
|
||||
|
||||
def generate_stats(self, posts: List[Dict]) -> Dict:
|
||||
"""
|
||||
生成统计数据
|
||||
|
||||
Args:
|
||||
posts: 帖子列表
|
||||
|
||||
Returns:
|
||||
统计数据字典
|
||||
"""
|
||||
stats = {
|
||||
"total_posts": len(posts),
|
||||
"total_downloads": 0,
|
||||
"total_likes": 0,
|
||||
"posts_by_type": {},
|
||||
"posts_detail": [],
|
||||
"generated_at": datetime.now(BEIJING_TZ).isoformat(),
|
||||
}
|
||||
|
||||
for post in posts:
|
||||
downloads = post.get("downloadCount", 0)
|
||||
likes = post.get("likeCount", 0)
|
||||
post_type = post.get("type", "unknown")
|
||||
|
||||
stats["total_downloads"] += downloads
|
||||
stats["total_likes"] += likes
|
||||
stats["posts_by_type"][post_type] = (
|
||||
stats["posts_by_type"].get(post_type, 0) + 1
|
||||
)
|
||||
|
||||
stats["posts_detail"].append(
|
||||
{
|
||||
"id": post.get("id"),
|
||||
"title": post.get("title"),
|
||||
"type": post_type,
|
||||
"downloads": downloads,
|
||||
"likes": likes,
|
||||
"created_at": post.get("createdAt"),
|
||||
"updated_at": post.get("updatedAt"),
|
||||
}
|
||||
)
|
||||
|
||||
# 按下载量排序
|
||||
stats["posts_detail"].sort(key=lambda x: x["downloads"], reverse=True)
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
# 便捷函数
|
||||
def get_client(api_key: Optional[str] = None) -> OpenWebUICommunityClient:
|
||||
"""
|
||||
获取客户端实例
|
||||
|
||||
Args:
|
||||
api_key: API Key,如果为 None 则从环境变量获取
|
||||
|
||||
Returns:
|
||||
OpenWebUICommunityClient 实例
|
||||
"""
|
||||
key = api_key or os.environ.get("OPENWEBUI_API_KEY")
|
||||
if not key:
|
||||
raise ValueError("OPENWEBUI_API_KEY not set")
|
||||
return OpenWebUICommunityClient(key)
|
||||
@@ -331,6 +331,67 @@ class OpenWebUIStats:
|
||||
json.dump(stats, f, ensure_ascii=False, indent=2)
|
||||
print(f"✅ JSON 数据已保存到: {filepath}")
|
||||
|
||||
def generate_shields_endpoints(self, stats: dict, output_dir: str = "docs/badges"):
|
||||
"""
|
||||
生成 Shields.io endpoint JSON 文件
|
||||
|
||||
Args:
|
||||
stats: 统计数据
|
||||
output_dir: 输出目录
|
||||
"""
|
||||
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def format_number(n: int) -> str:
|
||||
"""格式化数字为易读格式"""
|
||||
if n >= 1000000:
|
||||
return f"{n/1000000:.1f}M"
|
||||
elif n >= 1000:
|
||||
return f"{n/1000:.1f}k"
|
||||
return str(n)
|
||||
|
||||
# 各种徽章数据
|
||||
badges = {
|
||||
"downloads": {
|
||||
"schemaVersion": 1,
|
||||
"label": "downloads",
|
||||
"message": format_number(stats["total_downloads"]),
|
||||
"color": "blue",
|
||||
"namedLogo": "openwebui",
|
||||
},
|
||||
"plugins": {
|
||||
"schemaVersion": 1,
|
||||
"label": "plugins",
|
||||
"message": str(stats["total_posts"]),
|
||||
"color": "green",
|
||||
},
|
||||
"followers": {
|
||||
"schemaVersion": 1,
|
||||
"label": "followers",
|
||||
"message": format_number(stats.get("user", {}).get("followers", 0)),
|
||||
"color": "blue",
|
||||
},
|
||||
"points": {
|
||||
"schemaVersion": 1,
|
||||
"label": "points",
|
||||
"message": format_number(stats.get("user", {}).get("total_points", 0)),
|
||||
"color": "orange",
|
||||
},
|
||||
"upvotes": {
|
||||
"schemaVersion": 1,
|
||||
"label": "upvotes",
|
||||
"message": format_number(stats["total_upvotes"]),
|
||||
"color": "brightgreen",
|
||||
},
|
||||
}
|
||||
|
||||
for name, data in badges.items():
|
||||
filepath = Path(output_dir) / f"{name}.json"
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
print(f" 📊 Generated badge: {name}.json")
|
||||
|
||||
print(f"✅ Shields.io endpoints saved to: {output_dir}/")
|
||||
|
||||
def generate_readme_stats(self, stats: dict, lang: str = "zh") -> str:
|
||||
"""
|
||||
生成 README 统计徽章区域
|
||||
@@ -537,6 +598,10 @@ def main():
|
||||
json_path = script_dir / "docs" / "community-stats.json"
|
||||
stats_client.save_json(stats, str(json_path))
|
||||
|
||||
# 生成 Shields.io endpoint JSON (用于动态徽章)
|
||||
badges_dir = script_dir / "docs" / "badges"
|
||||
stats_client.generate_shields_endpoints(stats, str(badges_dir))
|
||||
|
||||
# 更新 README 文件
|
||||
readme_path = script_dir / "README.md"
|
||||
readme_cn_path = script_dir / "README_CN.md"
|
||||
|
||||
@@ -1,261 +1,231 @@
|
||||
"""
|
||||
Publish plugins to OpenWebUI Community
|
||||
使用 OpenWebUICommunityClient 发布插件到官方社区
|
||||
|
||||
用法:
|
||||
python scripts/publish_plugin.py # 更新已发布的插件(版本变化时)
|
||||
python scripts/publish_plugin.py --force # 强制更新所有已发布的插件
|
||||
python scripts/publish_plugin.py --new plugins/actions/xxx # 首次发布指定目录的新插件
|
||||
python scripts/publish_plugin.py --new plugins/actions/xxx --force # 强制发布新插件
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
import re
|
||||
import argparse
|
||||
|
||||
# Add current directory to path
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from openwebui_community_client import get_client
|
||||
|
||||
|
||||
def parse_frontmatter(content):
|
||||
"""Extracts metadata from the python file docstring."""
|
||||
# Allow leading whitespace and handle potential shebangs
|
||||
match = re.search(r'^\s*"""\n(.*?)\n"""', content, re.DOTALL)
|
||||
if not match:
|
||||
# Fallback for files starting with comments or shebangs
|
||||
match = re.search(r'"""\n(.*?)\n"""', content, re.DOTALL)
|
||||
if not match:
|
||||
return {}
|
||||
def find_existing_plugins(plugins_dir: str) -> list:
|
||||
"""查找所有已发布的插件文件(有 openwebui_id 的)"""
|
||||
plugins = []
|
||||
for root, _, files in os.walk(plugins_dir):
|
||||
for file in files:
|
||||
if file.endswith(".py") and not file.startswith("__"):
|
||||
file_path = os.path.join(root, file)
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read(2000)
|
||||
|
||||
frontmatter = match.group(1)
|
||||
meta = {}
|
||||
for line in frontmatter.split("\n"):
|
||||
if ":" in line:
|
||||
key, value = line.split(":", 1)
|
||||
meta[key.strip()] = value.strip()
|
||||
return meta
|
||||
id_match = re.search(
|
||||
r"(?:openwebui_id|post_id):\s*([a-z0-9-]+)", content
|
||||
)
|
||||
if id_match:
|
||||
plugins.append(
|
||||
{
|
||||
"file_path": file_path,
|
||||
"post_id": id_match.group(1).strip(),
|
||||
}
|
||||
)
|
||||
return plugins
|
||||
|
||||
|
||||
def sync_frontmatter(file_path, content, meta, post_data):
|
||||
"""Syncs remote metadata back to local file frontmatter."""
|
||||
changed = False
|
||||
new_meta = meta.copy()
|
||||
def find_new_plugins_in_dir(target_dir: str) -> list:
|
||||
"""查找指定目录中没有 openwebui_id 的新插件"""
|
||||
plugins = []
|
||||
|
||||
# 1. Sync ID
|
||||
if "openwebui_id" not in new_meta and "post_id" not in new_meta:
|
||||
new_meta["openwebui_id"] = post_data.get("id")
|
||||
changed = True
|
||||
if not os.path.isdir(target_dir):
|
||||
print(f"Error: {target_dir} is not a directory")
|
||||
return plugins
|
||||
|
||||
# 2. Sync Icon URL (often set in UI)
|
||||
manifest = (
|
||||
post_data.get("data", {})
|
||||
.get("function", {})
|
||||
.get("meta", {})
|
||||
.get("manifest", {})
|
||||
)
|
||||
if "icon_url" not in new_meta and manifest.get("icon_url"):
|
||||
new_meta["icon_url"] = manifest.get("icon_url")
|
||||
changed = True
|
||||
for file in os.listdir(target_dir):
|
||||
if file.endswith(".py") and not file.startswith("__"):
|
||||
file_path = os.path.join(target_dir, file)
|
||||
if not os.path.isfile(file_path):
|
||||
continue
|
||||
|
||||
# 3. Sync other fields if missing locally
|
||||
for field in ["author", "author_url", "funding_url"]:
|
||||
if field not in new_meta and manifest.get(field):
|
||||
new_meta[field] = manifest.get(field)
|
||||
changed = True
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read(2000)
|
||||
|
||||
if changed:
|
||||
print(f" Syncing metadata back to {os.path.basename(file_path)}...")
|
||||
# Reconstruct frontmatter
|
||||
# We need to replace the content inside the first """ ... """
|
||||
# This is a bit fragile with regex but sufficient for standard files
|
||||
# 检查是否有 frontmatter (title)
|
||||
title_match = re.search(r"title:\s*(.+)", content)
|
||||
if not title_match:
|
||||
continue
|
||||
|
||||
def replacement(match):
|
||||
lines = []
|
||||
# Keep existing description or comments if we can't parse them easily?
|
||||
# Actually, let's just reconstruct the key-values we know
|
||||
# and try to preserve the description if it was at the end
|
||||
# 检查是否已有 ID
|
||||
id_match = re.search(r"(?:openwebui_id|post_id):\s*([a-z0-9-]+)", content)
|
||||
if id_match:
|
||||
print(f" ⚠️ {file} already has ID, will update instead")
|
||||
plugins.append(
|
||||
{
|
||||
"file_path": file_path,
|
||||
"title": title_match.group(1).strip(),
|
||||
"post_id": id_match.group(1).strip(),
|
||||
"is_new": False,
|
||||
}
|
||||
)
|
||||
else:
|
||||
plugins.append(
|
||||
{
|
||||
"file_path": file_path,
|
||||
"title": title_match.group(1).strip(),
|
||||
"post_id": None,
|
||||
"is_new": True,
|
||||
}
|
||||
)
|
||||
|
||||
# Simple approach: Rebuild the whole block based on new_meta
|
||||
# This might lose comments inside the frontmatter, but standard format is simple keys
|
||||
|
||||
# Try to preserve order: title, author, ..., version, ..., description
|
||||
ordered_keys = [
|
||||
"title",
|
||||
"author",
|
||||
"author_url",
|
||||
"funding_url",
|
||||
"version",
|
||||
"openwebui_id",
|
||||
"icon_url",
|
||||
"requirements",
|
||||
"description",
|
||||
]
|
||||
|
||||
block = ['"""']
|
||||
|
||||
# Add known keys in order
|
||||
for k in ordered_keys:
|
||||
if k in new_meta:
|
||||
block.append(f"{k}: {new_meta[k]}")
|
||||
|
||||
# Add any other custom keys
|
||||
for k, v in new_meta.items():
|
||||
if k not in ordered_keys:
|
||||
block.append(f"{k}: {v}")
|
||||
|
||||
block.append('"""')
|
||||
return "\n".join(block)
|
||||
|
||||
new_content = re.sub(
|
||||
r'^"""\n(.*?)\n"""', replacement, content, count=1, flags=re.DOTALL
|
||||
)
|
||||
|
||||
# If regex didn't match (e.g. leading whitespace), try with whitespace
|
||||
if new_content == content:
|
||||
new_content = re.sub(
|
||||
r'^\s*"""\n(.*?)\n"""', replacement, content, count=1, flags=re.DOTALL
|
||||
)
|
||||
|
||||
if new_content != content:
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
f.write(new_content)
|
||||
return new_content # Return updated content
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def update_plugin(file_path, post_id, token):
|
||||
print(f"Processing {os.path.basename(file_path)} (ID: {post_id})...")
|
||||
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
meta = parse_frontmatter(content)
|
||||
if not meta:
|
||||
print(f" Skipping: No frontmatter found.")
|
||||
return False
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
# 1. Fetch existing post
|
||||
try:
|
||||
response = requests.get(
|
||||
f"https://api.openwebui.com/api/v1/posts/{post_id}", headers=headers
|
||||
)
|
||||
response.raise_for_status()
|
||||
post_data = response.json()
|
||||
except Exception as e:
|
||||
print(f" Error fetching post: {e}")
|
||||
return False
|
||||
|
||||
# 1.5 Sync Metadata back to local file
|
||||
try:
|
||||
content = sync_frontmatter(file_path, content, meta, post_data)
|
||||
# Re-parse meta in case it changed
|
||||
meta = parse_frontmatter(content)
|
||||
except Exception as e:
|
||||
print(f" Warning: Failed to sync local metadata: {e}")
|
||||
|
||||
# 2. Update ONLY Content and Manifest
|
||||
try:
|
||||
# Ensure structure exists before populating nested fields
|
||||
if "data" not in post_data:
|
||||
post_data["data"] = {}
|
||||
if "function" not in post_data["data"]:
|
||||
post_data["data"]["function"] = {}
|
||||
if "meta" not in post_data["data"]["function"]:
|
||||
post_data["data"]["function"]["meta"] = {}
|
||||
if "manifest" not in post_data["data"]["function"]["meta"]:
|
||||
post_data["data"]["function"]["meta"]["manifest"] = {}
|
||||
|
||||
# Update 1: The Source Code (Inner Content)
|
||||
post_data["data"]["function"]["content"] = content
|
||||
|
||||
# Update 2: The Post Body/README (Outer Content)
|
||||
# Try to find a matching README file
|
||||
plugin_dir = os.path.dirname(file_path)
|
||||
base_name = os.path.basename(file_path).lower()
|
||||
readme_content = None
|
||||
|
||||
# Determine preferred README filename
|
||||
readme_files = []
|
||||
if base_name.endswith("_cn.py"):
|
||||
readme_files = ["README_CN.md", "README.md"]
|
||||
else:
|
||||
readme_files = ["README.md", "README_CN.md"]
|
||||
|
||||
for readme_name in readme_files:
|
||||
readme_path = os.path.join(plugin_dir, readme_name)
|
||||
if os.path.exists(readme_path):
|
||||
try:
|
||||
with open(readme_path, "r", encoding="utf-8") as f:
|
||||
readme_content = f.read()
|
||||
print(f" Using README: {readme_name}")
|
||||
break
|
||||
except Exception as e:
|
||||
print(f" Error reading {readme_name}: {e}")
|
||||
|
||||
if readme_content:
|
||||
post_data["content"] = readme_content
|
||||
elif "description" in meta:
|
||||
post_data["content"] = meta["description"]
|
||||
else:
|
||||
post_data["content"] = ""
|
||||
|
||||
# Update Manifest (Metadata)
|
||||
post_data["data"]["function"]["meta"]["manifest"].update(meta)
|
||||
|
||||
# Sync top-level fields for consistency
|
||||
if "title" in meta:
|
||||
post_data["title"] = meta["title"]
|
||||
post_data["data"]["function"]["name"] = meta["title"]
|
||||
if "description" in meta:
|
||||
post_data["data"]["function"]["meta"]["description"] = meta["description"]
|
||||
|
||||
except Exception as e:
|
||||
print(f" Error preparing update: {e}")
|
||||
return False
|
||||
|
||||
# 3. Submit Update
|
||||
try:
|
||||
response = requests.post(
|
||||
f"https://api.openwebui.com/api/v1/posts/{post_id}/update",
|
||||
headers=headers,
|
||||
json=post_data,
|
||||
)
|
||||
response.raise_for_status()
|
||||
print(f" ✅ Success!")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f" ❌ Failed: {e}")
|
||||
return False
|
||||
return plugins
|
||||
|
||||
|
||||
def main():
|
||||
token = os.environ.get("OPENWEBUI_API_KEY")
|
||||
if not token:
|
||||
print("Error: OPENWEBUI_API_KEY not set.")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Publish plugins to OpenWebUI Market",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Update existing plugins (with version check)
|
||||
python scripts/publish_plugin.py
|
||||
|
||||
# Force update all existing plugins
|
||||
python scripts/publish_plugin.py --force
|
||||
|
||||
# Publish new plugins from a specific directory
|
||||
python scripts/publish_plugin.py --new plugins/actions/summary
|
||||
|
||||
# Preview what would be done
|
||||
python scripts/publish_plugin.py --new plugins/actions/summary --dry-run
|
||||
""",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force", action="store_true", help="Force update even if version matches"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--new",
|
||||
metavar="DIR",
|
||||
help="Publish new plugins from the specified directory (required for first-time publishing)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Show what would be done without actually publishing",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
client = get_client()
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
plugins_dir = os.path.join(base_dir, "plugins")
|
||||
|
||||
count = 0
|
||||
# Walk through plugins directory
|
||||
for root, _, files in os.walk(plugins_dir):
|
||||
for file in files:
|
||||
if file.endswith(".py"):
|
||||
file_path = os.path.join(root, file)
|
||||
updated = 0
|
||||
created = 0
|
||||
skipped = 0
|
||||
failed = 0
|
||||
|
||||
# Check for ID in file content without full parse first
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read(
|
||||
2000
|
||||
) # Read first 2000 chars is enough for frontmatter
|
||||
# 处理新插件发布
|
||||
if args.new:
|
||||
target_dir = args.new
|
||||
if not os.path.isabs(target_dir):
|
||||
target_dir = os.path.join(base_dir, target_dir)
|
||||
|
||||
# Simple regex to find ID
|
||||
id_match = re.search(
|
||||
r"(?:openwebui_id|post_id):\s*([a-z0-9-]+)", content
|
||||
)
|
||||
print(f"🆕 Publishing new plugins from: {target_dir}\n")
|
||||
new_plugins = find_new_plugins_in_dir(target_dir)
|
||||
|
||||
if id_match:
|
||||
post_id = id_match.group(1).strip()
|
||||
update_plugin(file_path, post_id, token)
|
||||
count += 1
|
||||
if not new_plugins:
|
||||
print("No plugins found in the specified directory.")
|
||||
return
|
||||
|
||||
print(f"\nFinished. Updated {count} plugins.")
|
||||
for plugin in new_plugins:
|
||||
file_path = plugin["file_path"]
|
||||
file_name = os.path.basename(file_path)
|
||||
title = plugin["title"]
|
||||
is_new = plugin.get("is_new", True)
|
||||
|
||||
if is_new:
|
||||
print(f"🆕 Creating: {file_name} ({title})")
|
||||
else:
|
||||
print(f"📦 Updating: {file_name} (ID: {plugin['post_id'][:8]}...)")
|
||||
|
||||
if args.dry_run:
|
||||
print(f" [DRY-RUN] Would {'create' if is_new else 'update'}")
|
||||
continue
|
||||
|
||||
success, message = client.publish_plugin_from_file(
|
||||
file_path, force=args.force, auto_create=True
|
||||
)
|
||||
|
||||
if success:
|
||||
if "Created" in message:
|
||||
print(f" 🎉 {message}")
|
||||
created += 1
|
||||
elif "Skipped" in message:
|
||||
print(f" ⏭️ {message}")
|
||||
skipped += 1
|
||||
else:
|
||||
print(f" ✅ {message}")
|
||||
updated += 1
|
||||
else:
|
||||
print(f" ❌ {message}")
|
||||
failed += 1
|
||||
|
||||
# 处理已有插件更新
|
||||
else:
|
||||
existing_plugins = find_existing_plugins(plugins_dir)
|
||||
print(f"Found {len(existing_plugins)} existing plugins with OpenWebUI ID.\n")
|
||||
|
||||
if not existing_plugins:
|
||||
print("No existing plugins to update.")
|
||||
print(
|
||||
"\n💡 Tip: Use --new <dir> to publish new plugins from a specific directory"
|
||||
)
|
||||
return
|
||||
|
||||
for plugin in existing_plugins:
|
||||
file_path = plugin["file_path"]
|
||||
file_name = os.path.basename(file_path)
|
||||
post_id = plugin["post_id"]
|
||||
|
||||
print(f"📦 {file_name} (ID: {post_id[:8]}...)")
|
||||
|
||||
if args.dry_run:
|
||||
print(f" [DRY-RUN] Would update")
|
||||
continue
|
||||
|
||||
success, message = client.publish_plugin_from_file(
|
||||
file_path, force=args.force, auto_create=False # 不自动创建,只更新
|
||||
)
|
||||
|
||||
if success:
|
||||
if "Skipped" in message:
|
||||
print(f" ⏭️ {message}")
|
||||
skipped += 1
|
||||
else:
|
||||
print(f" ✅ {message}")
|
||||
updated += 1
|
||||
else:
|
||||
print(f" ❌ {message}")
|
||||
failed += 1
|
||||
|
||||
print(f"\n{'='*50}")
|
||||
print(
|
||||
f"Finished: {created} created, {updated} updated, {skipped} skipped, {failed} failed"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
"""
|
||||
Sync OpenWebUI Post IDs to local plugin files
|
||||
同步远程插件 ID 到本地文件
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
@@ -6,11 +11,12 @@ import difflib
|
||||
# Add current directory to path
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from openwebui_community_client import get_client
|
||||
|
||||
try:
|
||||
from openwebui_stats import OpenWebUIStats
|
||||
from extract_plugin_versions import scan_plugins_directory
|
||||
except ImportError:
|
||||
print("Error: Helper scripts not found.")
|
||||
print("Error: extract_plugin_versions.py not found.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -60,13 +66,13 @@ def insert_id_into_file(file_path, post_id):
|
||||
|
||||
|
||||
def main():
|
||||
token = os.environ.get("OPENWEBUI_API_KEY")
|
||||
if not token:
|
||||
print("Error: OPENWEBUI_API_KEY environment variable not set.")
|
||||
try:
|
||||
client = get_client()
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
print("Fetching remote posts...")
|
||||
client = OpenWebUIStats(token)
|
||||
print("Fetching remote posts from OpenWebUI Community...")
|
||||
remote_posts = client.get_all_posts()
|
||||
print(f"Fetched {len(remote_posts)} remote posts.")
|
||||
|
||||
|
||||