Compare commits
47 Commits
v2026.01.1
...
v2026.01.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0d8c4e048e | ||
|
|
014a5a9d1f | ||
|
|
a6dd970859 | ||
|
|
aac730f5b1 | ||
|
|
ff95d9328e | ||
|
|
afe1d8cf52 | ||
|
|
67b819f3de | ||
|
|
9b6acb6b95 | ||
|
|
a9a59e1e34 | ||
|
|
5b05397356 | ||
|
|
7a7dbc0cfa | ||
|
|
6ac0ba6efe | ||
|
|
d3d008efb4 | ||
|
|
4f1528128a | ||
|
|
93c4326206 | ||
|
|
0fca7fe524 | ||
|
|
afdcab10c6 | ||
|
|
f8cc5eabe6 | ||
|
|
f304eb7633 | ||
|
|
827204e082 | ||
|
|
641d7ee8c8 | ||
|
|
3b11537b5e | ||
|
|
e51d87ae80 | ||
|
|
f16e7c996c | ||
|
|
55eb295c12 | ||
|
|
4767351c5e | ||
|
|
1d2502eb3f | ||
|
|
94540cc131 | ||
|
|
71bef146c8 | ||
|
|
87e47fd4b2 | ||
|
|
2da600838c | ||
|
|
4ee34c1dc6 | ||
|
|
9a854c33d3 | ||
|
|
ae19653a8f | ||
|
|
caf0acf2e1 | ||
|
|
b503ad6fd2 | ||
|
|
357e869a15 | ||
|
|
3035c79d91 | ||
|
|
a5e5e178a0 | ||
|
|
d20081d3ed | ||
|
|
e2d94ba5b5 | ||
|
|
49a19242a4 | ||
|
|
c26d3b30e5 | ||
|
|
60e681042d | ||
|
|
842d65b887 | ||
|
|
ff5cecca1c | ||
|
|
b447143a50 |
30
.agent/rules/plugin_standards.md
Normal file
30
.agent/rules/plugin_standards.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
description: Standards for OpenWebUI Plugin Development, specifically README formatting.
|
||||||
|
globs: plugins/**
|
||||||
|
always_on: true
|
||||||
|
---
|
||||||
|
# Plugin Development Standards
|
||||||
|
|
||||||
|
## README Documentation
|
||||||
|
|
||||||
|
All plugins MUST follow the standard README template.
|
||||||
|
|
||||||
|
**Reference Template**: @docs/PLUGIN_README_TEMPLATE.md
|
||||||
|
|
||||||
|
### Language Requirements
|
||||||
|
- **English Version (`README.md`)**: The primary documentation source. Must follow the template strictly.
|
||||||
|
- **Chinese Version (`README_CN.md`)**: MUST be translated based on the English version (`README.md`) to ensure consistency in structure and content.
|
||||||
|
|
||||||
|
### Metadata Requirements
|
||||||
|
The metadata line must follow this format:
|
||||||
|
`**Author:** [Name](Link) | **Version:** [X.Y.Z] | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT`
|
||||||
|
|
||||||
|
### Structure Checklist
|
||||||
|
1. **Title & Description**
|
||||||
|
2. **Metadata Line** (Author, Version, Project, License)
|
||||||
|
3. **Preview** (Screenshots/GIFs)
|
||||||
|
4. **What's New** (Keep last 3 versions)
|
||||||
|
5. **Key Features**
|
||||||
|
6. **How to Use**
|
||||||
|
7. **Configuration (Valves)**
|
||||||
|
8. **Troubleshooting** (Must include link to GitHub Issues)
|
||||||
@@ -36,6 +36,15 @@
|
|||||||
"bug",
|
"bug",
|
||||||
"ideas"
|
"ideas"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"login": "nahoj",
|
||||||
|
"name": "Johan Grande",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/469017?v=4",
|
||||||
|
"profile": "https://perso.crans.org/grande/",
|
||||||
|
"contributions": [
|
||||||
|
"ideas"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"contributorsPerLine": 7,
|
"contributorsPerLine": 7,
|
||||||
|
|||||||
89
.github/workflows/community-stats.yml
vendored
89
.github/workflows/community-stats.yml
vendored
@@ -1,5 +1,9 @@
|
|||||||
# OpenWebUI 社区统计报告自动生成
|
# OpenWebUI 社区统计报告自动生成
|
||||||
# 只在统计数据变化时 commit,避免频繁提交
|
# 智能检测:只在有意义的变更时才 commit
|
||||||
|
# - 新增插件 (total_posts)
|
||||||
|
# - 插件版本变更 (version)
|
||||||
|
# - 积分增加 (total_points)
|
||||||
|
# - 粉丝增加 (followers)
|
||||||
|
|
||||||
name: Community Stats
|
name: Community Stats
|
||||||
|
|
||||||
@@ -31,9 +35,23 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
pip install requests python-dotenv
|
pip install requests python-dotenv
|
||||||
|
|
||||||
|
|
||||||
|
- name: Capture existing stats (before update)
|
||||||
|
id: old_stats
|
||||||
|
run: |
|
||||||
|
if [ -f docs/community-stats.json ]; then
|
||||||
|
echo "total_posts=$(jq -r '.total_posts // 0' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
echo "total_points=$(jq -r '.user.total_points // 0' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
echo "followers=$(jq -r '.user.followers // 0' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
# 提取所有插件的版本号,生成一个排序后的字符串用于比较
|
||||||
|
echo "versions=$(jq -r '[.posts[].version] | sort | join(",")' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "total_posts=0" >> $GITHUB_OUTPUT
|
||||||
|
echo "total_points=0" >> $GITHUB_OUTPUT
|
||||||
|
echo "followers=0" >> $GITHUB_OUTPUT
|
||||||
|
echo "versions=" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Generate stats report
|
- name: Generate stats report
|
||||||
env:
|
env:
|
||||||
OPENWEBUI_API_KEY: ${{ secrets.OPENWEBUI_API_KEY }}
|
OPENWEBUI_API_KEY: ${{ secrets.OPENWEBUI_API_KEY }}
|
||||||
@@ -41,10 +59,71 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python scripts/openwebui_stats.py
|
python scripts/openwebui_stats.py
|
||||||
|
|
||||||
|
- name: Capture new stats (after update)
|
||||||
|
id: new_stats
|
||||||
|
run: |
|
||||||
|
echo "total_posts=$(jq -r '.total_posts // 0' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
echo "total_points=$(jq -r '.user.total_points // 0' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
echo "followers=$(jq -r '.user.followers // 0' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
echo "versions=$(jq -r '[.posts[].version] | sort | join(",")' docs/community-stats.json)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Check for significant changes
|
||||||
|
id: check_changes
|
||||||
|
run: |
|
||||||
|
OLD_POSTS="${{ steps.old_stats.outputs.total_posts }}"
|
||||||
|
NEW_POSTS="${{ steps.new_stats.outputs.total_posts }}"
|
||||||
|
OLD_POINTS="${{ steps.old_stats.outputs.total_points }}"
|
||||||
|
NEW_POINTS="${{ steps.new_stats.outputs.total_points }}"
|
||||||
|
OLD_FOLLOWERS="${{ steps.old_stats.outputs.followers }}"
|
||||||
|
NEW_FOLLOWERS="${{ steps.new_stats.outputs.followers }}"
|
||||||
|
OLD_VERSIONS="${{ steps.old_stats.outputs.versions }}"
|
||||||
|
NEW_VERSIONS="${{ steps.new_stats.outputs.versions }}"
|
||||||
|
|
||||||
|
SHOULD_COMMIT="false"
|
||||||
|
CHANGE_REASON=""
|
||||||
|
|
||||||
|
# 检查新增插件
|
||||||
|
if [ "$NEW_POSTS" -gt "$OLD_POSTS" ]; then
|
||||||
|
SHOULD_COMMIT="true"
|
||||||
|
CHANGE_REASON="new plugin added ($OLD_POSTS -> $NEW_POSTS)"
|
||||||
|
echo "📦 New plugin detected: $OLD_POSTS -> $NEW_POSTS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 检查版本变更
|
||||||
|
if [ "$OLD_VERSIONS" != "$NEW_VERSIONS" ]; then
|
||||||
|
SHOULD_COMMIT="true"
|
||||||
|
CHANGE_REASON="${CHANGE_REASON:+$CHANGE_REASON, }plugin version updated"
|
||||||
|
echo "🔄 Plugin version changed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 检查积分增加
|
||||||
|
if [ "$NEW_POINTS" -gt "$OLD_POINTS" ]; then
|
||||||
|
SHOULD_COMMIT="true"
|
||||||
|
CHANGE_REASON="${CHANGE_REASON:+$CHANGE_REASON, }points increased ($OLD_POINTS -> $NEW_POINTS)"
|
||||||
|
echo "⭐ Points increased: $OLD_POINTS -> $NEW_POINTS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 检查粉丝增加
|
||||||
|
if [ "$NEW_FOLLOWERS" -gt "$OLD_FOLLOWERS" ]; then
|
||||||
|
SHOULD_COMMIT="true"
|
||||||
|
CHANGE_REASON="${CHANGE_REASON:+$CHANGE_REASON, }followers increased ($OLD_FOLLOWERS -> $NEW_FOLLOWERS)"
|
||||||
|
echo "👥 Followers increased: $OLD_FOLLOWERS -> $NEW_FOLLOWERS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "should_commit=$SHOULD_COMMIT" >> $GITHUB_OUTPUT
|
||||||
|
echo "change_reason=$CHANGE_REASON" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
if [ "$SHOULD_COMMIT" = "false" ]; then
|
||||||
|
echo "ℹ️ No significant changes detected, skipping commit"
|
||||||
|
else
|
||||||
|
echo "✅ Significant changes detected: $CHANGE_REASON"
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Commit and push changes
|
- name: Commit and push changes
|
||||||
|
if: steps.check_changes.outputs.should_commit == 'true'
|
||||||
run: |
|
run: |
|
||||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||||
git config --local user.name "github-actions[bot]"
|
git config --local user.name "github-actions[bot]"
|
||||||
git add docs/community-stats.zh.md docs/community-stats.md docs/community-stats.json README.md README_CN.md
|
git add docs/community-stats.zh.md docs/community-stats.md docs/community-stats.json docs/badges README.md README_CN.md
|
||||||
git diff --staged --quiet || git commit -m "chore: update community stats $(date +'%Y-%m-%d')"
|
git diff --staged --quiet || git commit -m "chore: update community stats - ${{ steps.check_changes.outputs.change_reason }}"
|
||||||
git push
|
git push
|
||||||
|
|||||||
30
README.md
30
README.md
@@ -1,6 +1,6 @@
|
|||||||
# OpenWebUI Extras
|
# OpenWebUI Extras
|
||||||
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
|
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
|
||||||
[](#contributors-)
|
[](#contributors-)
|
||||||
<!-- ALL-CONTRIBUTORS-BADGE:END -->
|
<!-- ALL-CONTRIBUTORS-BADGE:END -->
|
||||||
|
|
||||||
English | [中文](./README_CN.md)
|
English | [中文](./README_CN.md)
|
||||||
@@ -10,28 +10,28 @@ A collection of enhancements, plugins, and prompts for [OpenWebUI](https://githu
|
|||||||
<!-- STATS_START -->
|
<!-- STATS_START -->
|
||||||
## 📊 Community Stats
|
## 📊 Community Stats
|
||||||
|
|
||||||
> 🕐 Auto-updated: 2026-01-15 00:11
|
> 🕐 Auto-updated: 2026-01-19 18:11
|
||||||
|
|
||||||
| 👤 Author | 👥 Followers | ⭐ Points | 🏆 Contributions |
|
| 👤 Author | 👥 Followers | ⭐ Points | 🏆 Contributions |
|
||||||
|:---:|:---:|:---:|:---:|
|
|:---:|:---:|:---:|:---:|
|
||||||
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **104** | **104** | **25** |
|
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **133** | **134** | **25** |
|
||||||
|
|
||||||
| 📝 Posts | ⬇️ Downloads | 👁️ Views | 👍 Upvotes | 💾 Saves |
|
| 📝 Posts | ⬇️ Downloads | 👁️ Views | 👍 Upvotes | 💾 Saves |
|
||||||
|:---:|:---:|:---:|:---:|:---:|
|
|:---:|:---:|:---:|:---:|:---:|
|
||||||
| **16** | **1451** | **16966** | **91** | **108** |
|
| **16** | **1792** | **21276** | **120** | **135** |
|
||||||
|
|
||||||
### 🔥 Top 6 Popular Plugins
|
### 🔥 Top 6 Popular Plugins
|
||||||
|
|
||||||
> 🕐 Auto-updated: 2026-01-15 00:11
|
> 🕐 Auto-updated: 2026-01-19 18:11
|
||||||
|
|
||||||
| Rank | Plugin | Version | Downloads | Views | Updated |
|
| Rank | Plugin | Version | Downloads | Views | Updated |
|
||||||
|:---:|------|:---:|:---:|:---:|:---:|
|
|:---:|------|:---:|:---:|:---:|:---:|
|
||||||
| 🥇 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 0.9.1 | 451 | 4028 | 2026-01-07 |
|
| 🥇 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 0.9.1 | 532 | 4822 | 2026-01-17 |
|
||||||
| 🥈 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 0.3.7 | 194 | 671 | 2026-01-07 |
|
| 🥈 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 1.4.9 | 260 | 2514 | 2026-01-18 |
|
||||||
| 🥉 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 1.4.9 | 185 | 1906 | 2026-01-11 |
|
| 🥉 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 0.3.7 | 209 | 800 | 2026-01-07 |
|
||||||
| 4️⃣ | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 1.1.3 | 156 | 1743 | 2026-01-11 |
|
| 4️⃣ | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 1.1.3 | 180 | 1975 | 2026-01-17 |
|
||||||
| 5️⃣ | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 0.4.3 | 122 | 1084 | 2026-01-07 |
|
| 5️⃣ | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 0.4.3 | 158 | 1377 | 2026-01-17 |
|
||||||
| 6️⃣ | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | 0.2.4 | 116 | 2059 | 2026-01-07 |
|
| 6️⃣ | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | 0.2.4 | 138 | 2329 | 2026-01-17 |
|
||||||
|
|
||||||
*See full stats in [Community Stats Report](./docs/community-stats.md)*
|
*See full stats in [Community Stats Report](./docs/community-stats.md)*
|
||||||
<!-- STATS_END -->
|
<!-- STATS_END -->
|
||||||
@@ -53,14 +53,7 @@ Located in the `plugins/` directory, containing Python-based enhancements:
|
|||||||
#### Filters
|
#### Filters
|
||||||
- **Async Context Compression** (`async-context-compression`): Optimizes token usage via context compression.
|
- **Async Context Compression** (`async-context-compression`): Optimizes token usage via context compression.
|
||||||
- **Context Enhancement** (`context_enhancement_filter`): Enhances chat context.
|
- **Context Enhancement** (`context_enhancement_filter`): Enhances chat context.
|
||||||
- **Gemini Manifold Companion** (`gemini_manifold_companion`): Companion filter for Gemini Manifold.
|
|
||||||
- **Gemini Multimodal Filter** (`web_gemini_multimodel_filter`): Provides multimodal capabilities (PDF, Office, Video) for any model via Gemini.
|
|
||||||
- **Markdown Normalizer** (`markdown_normalizer`): Fixes common Markdown formatting issues in LLM outputs.
|
- **Markdown Normalizer** (`markdown_normalizer`): Fixes common Markdown formatting issues in LLM outputs.
|
||||||
- **Multi-Model Context Merger** (`multi_model_context_merger`): Automatically merges and injects context from multiple model responses.
|
|
||||||
|
|
||||||
|
|
||||||
#### Pipes
|
|
||||||
- **Gemini Manifold** (`gemini_mainfold`): Pipeline for Gemini model integration.
|
|
||||||
|
|
||||||
#### Pipelines
|
#### Pipelines
|
||||||
- **MoE Prompt Refiner** (`moe_prompt_refiner`): Refines prompts for Mixture of Experts (MoE) summary requests to generate high-quality comprehensive reports.
|
- **MoE Prompt Refiner** (`moe_prompt_refiner`): Refines prompts for Mixture of Experts (MoE) summary requests to generate high-quality comprehensive reports.
|
||||||
@@ -126,6 +119,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
|
|||||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/rbb-dev"><img src="https://avatars.githubusercontent.com/u/37469229?v=4?s=100" width="100px;" alt="rbb-dev"/><br /><sub><b>rbb-dev</b></sub></a><br /><a href="#ideas-rbb-dev" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/Fu-Jie/awesome-openwebui/commits?author=rbb-dev" title="Code">💻</a></td>
|
<td align="center" valign="top" width="14.28%"><a href="https://github.com/rbb-dev"><img src="https://avatars.githubusercontent.com/u/37469229?v=4?s=100" width="100px;" alt="rbb-dev"/><br /><sub><b>rbb-dev</b></sub></a><br /><a href="#ideas-rbb-dev" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/Fu-Jie/awesome-openwebui/commits?author=rbb-dev" title="Code">💻</a></td>
|
||||||
<td align="center" valign="top" width="14.28%"><a href="https://trade.xyz/?ref=BZ1RJRXWO"><img src="https://avatars.githubusercontent.com/u/7317522?v=4?s=100" width="100px;" alt="Raxxoor"/><br /><sub><b>Raxxoor</b></sub></a><br /><a href="https://github.com/Fu-Jie/awesome-openwebui/issues?q=author%3Adhaern" title="Bug reports">🐛</a> <a href="#ideas-dhaern" title="Ideas, Planning, & Feedback">🤔</a></td>
|
<td align="center" valign="top" width="14.28%"><a href="https://trade.xyz/?ref=BZ1RJRXWO"><img src="https://avatars.githubusercontent.com/u/7317522?v=4?s=100" width="100px;" alt="Raxxoor"/><br /><sub><b>Raxxoor</b></sub></a><br /><a href="https://github.com/Fu-Jie/awesome-openwebui/issues?q=author%3Adhaern" title="Bug reports">🐛</a> <a href="#ideas-dhaern" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||||
<td align="center" valign="top" width="14.28%"><a href="https://github.com/i-iooi-i"><img src="https://avatars.githubusercontent.com/u/1827701?v=4?s=100" width="100px;" alt="ZOLO"/><br /><sub><b>ZOLO</b></sub></a><br /><a href="https://github.com/Fu-Jie/awesome-openwebui/issues?q=author%3Ai-iooi-i" title="Bug reports">🐛</a> <a href="#ideas-i-iooi-i" title="Ideas, Planning, & Feedback">🤔</a></td>
|
<td align="center" valign="top" width="14.28%"><a href="https://github.com/i-iooi-i"><img src="https://avatars.githubusercontent.com/u/1827701?v=4?s=100" width="100px;" alt="ZOLO"/><br /><sub><b>ZOLO</b></sub></a><br /><a href="https://github.com/Fu-Jie/awesome-openwebui/issues?q=author%3Ai-iooi-i" title="Bug reports">🐛</a> <a href="#ideas-i-iooi-i" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||||
|
<td align="center" valign="top" width="14.28%"><a href="https://perso.crans.org/grande/"><img src="https://avatars.githubusercontent.com/u/469017?v=4?s=100" width="100px;" alt="Johan Grande"/><br /><sub><b>Johan Grande</b></sub></a><br /><a href="#ideas-nahoj" title="Ideas, Planning, & Feedback">🤔</a></td>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
|
|||||||
20
README_CN.md
20
README_CN.md
@@ -7,28 +7,28 @@ OpenWebUI 增强功能集合。包含个人开发与收集的插件、提示词
|
|||||||
<!-- STATS_START -->
|
<!-- STATS_START -->
|
||||||
## 📊 社区统计
|
## 📊 社区统计
|
||||||
|
|
||||||
> 🕐 自动更新于 2026-01-15 00:11
|
> 🕐 自动更新于 2026-01-19 18:11
|
||||||
|
|
||||||
| 👤 作者 | 👥 粉丝 | ⭐ 积分 | 🏆 贡献 |
|
| 👤 作者 | 👥 粉丝 | ⭐ 积分 | 🏆 贡献 |
|
||||||
|:---:|:---:|:---:|:---:|
|
|:---:|:---:|:---:|:---:|
|
||||||
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **104** | **104** | **25** |
|
| [Fu-Jie](https://openwebui.com/u/Fu-Jie) | **133** | **134** | **25** |
|
||||||
|
|
||||||
| 📝 发布 | ⬇️ 下载 | 👁️ 浏览 | 👍 点赞 | 💾 收藏 |
|
| 📝 发布 | ⬇️ 下载 | 👁️ 浏览 | 👍 点赞 | 💾 收藏 |
|
||||||
|:---:|:---:|:---:|:---:|:---:|
|
|:---:|:---:|:---:|:---:|:---:|
|
||||||
| **16** | **1451** | **16966** | **91** | **108** |
|
| **16** | **1792** | **21276** | **120** | **135** |
|
||||||
|
|
||||||
### 🔥 热门插件 Top 6
|
### 🔥 热门插件 Top 6
|
||||||
|
|
||||||
> 🕐 自动更新于 2026-01-15 00:11
|
> 🕐 自动更新于 2026-01-19 18:11
|
||||||
|
|
||||||
| 排名 | 插件 | 版本 | 下载 | 浏览 | 更新日期 |
|
| 排名 | 插件 | 版本 | 下载 | 浏览 | 更新日期 |
|
||||||
|:---:|------|:---:|:---:|:---:|:---:|
|
|:---:|------|:---:|:---:|:---:|:---:|
|
||||||
| 🥇 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 0.9.1 | 451 | 4028 | 2026-01-07 |
|
| 🥇 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | 0.9.1 | 532 | 4822 | 2026-01-17 |
|
||||||
| 🥈 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 0.3.7 | 194 | 671 | 2026-01-07 |
|
| 🥈 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 1.4.9 | 260 | 2514 | 2026-01-18 |
|
||||||
| 🥉 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | 1.4.9 | 185 | 1906 | 2026-01-11 |
|
| 🥉 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | 0.3.7 | 209 | 800 | 2026-01-07 |
|
||||||
| 4️⃣ | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 1.1.3 | 156 | 1743 | 2026-01-11 |
|
| 4️⃣ | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | 1.1.3 | 180 | 1975 | 2026-01-17 |
|
||||||
| 5️⃣ | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 0.4.3 | 122 | 1084 | 2026-01-07 |
|
| 5️⃣ | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | 0.4.3 | 158 | 1377 | 2026-01-17 |
|
||||||
| 6️⃣ | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | 0.2.4 | 116 | 2059 | 2026-01-07 |
|
| 6️⃣ | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | 0.2.4 | 138 | 2329 | 2026-01-17 |
|
||||||
|
|
||||||
*完整统计请查看 [社区统计报告](./docs/community-stats.zh.md)*
|
*完整统计请查看 [社区统计报告](./docs/community-stats.zh.md)*
|
||||||
<!-- STATS_END -->
|
<!-- STATS_END -->
|
||||||
|
|||||||
44
docs/PLUGIN_README_TEMPLATE.md
Normal file
44
docs/PLUGIN_README_TEMPLATE.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
<!--
|
||||||
|
NOTE: This template is for the English version (README.md).
|
||||||
|
The Chinese version (README_CN.md) MUST be translated based on this English version to ensure consistency in structure and content.
|
||||||
|
-->
|
||||||
|
# [Plugin Name] [Optional Emoji]
|
||||||
|
|
||||||
|
[Brief description of what the plugin does. Keep it concise and engaging.]
|
||||||
|
|
||||||
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie) | **Version:** 1.0.0 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
|
|
||||||
|
## What's New
|
||||||
|
|
||||||
|
<!-- Keep the changelog for the last 3 versions here. Remove this section for the initial release. -->
|
||||||
|
|
||||||
|
### v1.0.0
|
||||||
|
- **Initial Release**: Released the first version of the plugin.
|
||||||
|
- **[Feature Name]**: [Brief description of the feature].
|
||||||
|
|
||||||
|
## Key Features 🔑
|
||||||
|
|
||||||
|
- **[Feature 1]**: [Description of feature 1].
|
||||||
|
- **[Feature 2]**: [Description of feature 2].
|
||||||
|
- **[Feature 3]**: [Description of feature 3].
|
||||||
|
|
||||||
|
## How to Use 🛠️
|
||||||
|
|
||||||
|
1. **Install**: Add the plugin to your OpenWebUI instance.
|
||||||
|
2. **Configure**: Adjust settings in the Valves menu (optional).
|
||||||
|
3. **[Action Step]**: Describe how to trigger or use the plugin.
|
||||||
|
4. **[Result Step]**: Describe the expected outcome.
|
||||||
|
|
||||||
|
## Configuration (Valves) ⚙️
|
||||||
|
|
||||||
|
| Valve | Default | Description |
|
||||||
|
|-------|---------|-------------|
|
||||||
|
| `VALVE_NAME` | `Default Value` | Description of what this setting does. |
|
||||||
|
| `ANOTHER_VALVE` | `True` | Another setting description. |
|
||||||
|
|
||||||
|
## Troubleshooting ❓
|
||||||
|
|
||||||
|
- **Plugin not working?**: Check if the filter/action is enabled in the model settings.
|
||||||
|
- **Debug Logs**: Enable `SHOW_DEBUG_LOG` in Valves and check the browser console (F12) for detailed logs.
|
||||||
|
- **Error Messages**: If you see an error, please copy the full error message and report it.
|
||||||
|
- **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
7
docs/badges/downloads.json
Normal file
7
docs/badges/downloads.json
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"schemaVersion": 1,
|
||||||
|
"label": "downloads",
|
||||||
|
"message": "1.8k",
|
||||||
|
"color": "blue",
|
||||||
|
"namedLogo": "openwebui"
|
||||||
|
}
|
||||||
6
docs/badges/followers.json
Normal file
6
docs/badges/followers.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"schemaVersion": 1,
|
||||||
|
"label": "followers",
|
||||||
|
"message": "133",
|
||||||
|
"color": "blue"
|
||||||
|
}
|
||||||
6
docs/badges/plugins.json
Normal file
6
docs/badges/plugins.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"schemaVersion": 1,
|
||||||
|
"label": "plugins",
|
||||||
|
"message": "16",
|
||||||
|
"color": "green"
|
||||||
|
}
|
||||||
6
docs/badges/points.json
Normal file
6
docs/badges/points.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"schemaVersion": 1,
|
||||||
|
"label": "points",
|
||||||
|
"message": "134",
|
||||||
|
"color": "orange"
|
||||||
|
}
|
||||||
6
docs/badges/upvotes.json
Normal file
6
docs/badges/upvotes.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"schemaVersion": 1,
|
||||||
|
"label": "upvotes",
|
||||||
|
"message": "120",
|
||||||
|
"color": "brightgreen"
|
||||||
|
}
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
{
|
{
|
||||||
"total_posts": 16,
|
"total_posts": 16,
|
||||||
"total_downloads": 1451,
|
"total_downloads": 1792,
|
||||||
"total_views": 16966,
|
"total_views": 21276,
|
||||||
"total_upvotes": 91,
|
"total_upvotes": 120,
|
||||||
"total_downvotes": 2,
|
"total_downvotes": 2,
|
||||||
"total_saves": 108,
|
"total_saves": 135,
|
||||||
"total_comments": 23,
|
"total_comments": 24,
|
||||||
"by_type": {
|
"by_type": {
|
||||||
"unknown": 2,
|
"action": 14,
|
||||||
"action": 14
|
"unknown": 2
|
||||||
},
|
},
|
||||||
"posts": [
|
"posts": [
|
||||||
{
|
{
|
||||||
@@ -18,31 +18,15 @@
|
|||||||
"version": "0.9.1",
|
"version": "0.9.1",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "Intelligently analyzes text content and generates interactive mind maps to help users structure and visualize knowledge.",
|
"description": "Intelligently analyzes text content and generates interactive mind maps to help users structure and visualize knowledge.",
|
||||||
"downloads": 451,
|
"downloads": 532,
|
||||||
"views": 4028,
|
"views": 4822,
|
||||||
"upvotes": 12,
|
"upvotes": 15,
|
||||||
"saves": 26,
|
"saves": 28,
|
||||||
"comments": 11,
|
"comments": 11,
|
||||||
"created_at": "2025-12-30",
|
"created_at": "2025-12-30",
|
||||||
"updated_at": "2026-01-07",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a"
|
"url": "https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"title": "Export to Excel",
|
|
||||||
"slug": "export_mulit_table_to_excel_244b8f9d",
|
|
||||||
"type": "action",
|
|
||||||
"version": "0.3.7",
|
|
||||||
"author": "Fu-Jie",
|
|
||||||
"description": "Extracts tables from chat messages and exports them to Excel (.xlsx) files with smart formatting.",
|
|
||||||
"downloads": 194,
|
|
||||||
"views": 671,
|
|
||||||
"upvotes": 3,
|
|
||||||
"saves": 4,
|
|
||||||
"comments": 0,
|
|
||||||
"created_at": "2025-05-30",
|
|
||||||
"updated_at": "2026-01-07",
|
|
||||||
"url": "https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"title": "📊 Smart Infographic (AntV)",
|
"title": "📊 Smart Infographic (AntV)",
|
||||||
"slug": "smart_infographic_ad6f0c7f",
|
"slug": "smart_infographic_ad6f0c7f",
|
||||||
@@ -50,15 +34,31 @@
|
|||||||
"version": "1.4.9",
|
"version": "1.4.9",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "AI-powered infographic generator based on AntV Infographic. Supports professional templates, auto-icon matching, and SVG/PNG downloads.",
|
"description": "AI-powered infographic generator based on AntV Infographic. Supports professional templates, auto-icon matching, and SVG/PNG downloads.",
|
||||||
"downloads": 185,
|
"downloads": 260,
|
||||||
"views": 1906,
|
"views": 2514,
|
||||||
"upvotes": 9,
|
"upvotes": 14,
|
||||||
"saves": 13,
|
"saves": 20,
|
||||||
"comments": 2,
|
"comments": 3,
|
||||||
"created_at": "2025-12-28",
|
"created_at": "2025-12-28",
|
||||||
"updated_at": "2026-01-11",
|
"updated_at": "2026-01-18",
|
||||||
"url": "https://openwebui.com/posts/smart_infographic_ad6f0c7f"
|
"url": "https://openwebui.com/posts/smart_infographic_ad6f0c7f"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"title": "Export to Excel",
|
||||||
|
"slug": "export_mulit_table_to_excel_244b8f9d",
|
||||||
|
"type": "action",
|
||||||
|
"version": "0.3.7",
|
||||||
|
"author": "Fu-Jie",
|
||||||
|
"description": "Extracts tables from chat messages and exports them to Excel (.xlsx) files with smart formatting.",
|
||||||
|
"downloads": 209,
|
||||||
|
"views": 800,
|
||||||
|
"upvotes": 4,
|
||||||
|
"saves": 5,
|
||||||
|
"comments": 0,
|
||||||
|
"created_at": "2025-05-30",
|
||||||
|
"updated_at": "2026-01-07",
|
||||||
|
"url": "https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"title": "Async Context Compression",
|
"title": "Async Context Compression",
|
||||||
"slug": "async_context_compression_b1655bc8",
|
"slug": "async_context_compression_b1655bc8",
|
||||||
@@ -66,13 +66,13 @@
|
|||||||
"version": "1.1.3",
|
"version": "1.1.3",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "Reduces token consumption in long conversations while maintaining coherence through intelligent summarization and message compression.",
|
"description": "Reduces token consumption in long conversations while maintaining coherence through intelligent summarization and message compression.",
|
||||||
"downloads": 156,
|
"downloads": 180,
|
||||||
"views": 1743,
|
"views": 1975,
|
||||||
"upvotes": 7,
|
"upvotes": 9,
|
||||||
"saves": 15,
|
"saves": 19,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2025-11-08",
|
"created_at": "2025-11-08",
|
||||||
"updated_at": "2026-01-11",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/async_context_compression_b1655bc8"
|
"url": "https://openwebui.com/posts/async_context_compression_b1655bc8"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -82,13 +82,13 @@
|
|||||||
"version": "0.4.3",
|
"version": "0.4.3",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "Export current conversation from Markdown to Word (.docx) with Mermaid diagrams rendered client-side (Mermaid.js, SVG+PNG), LaTeX math, real hyperlinks, improved tables, syntax highlighting, and blockquote support.",
|
"description": "Export current conversation from Markdown to Word (.docx) with Mermaid diagrams rendered client-side (Mermaid.js, SVG+PNG), LaTeX math, real hyperlinks, improved tables, syntax highlighting, and blockquote support.",
|
||||||
"downloads": 122,
|
"downloads": 158,
|
||||||
"views": 1084,
|
"views": 1377,
|
||||||
"upvotes": 6,
|
"upvotes": 8,
|
||||||
"saves": 11,
|
"saves": 16,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2026-01-03",
|
"created_at": "2026-01-03",
|
||||||
"updated_at": "2026-01-07",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315"
|
"url": "https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -98,15 +98,31 @@
|
|||||||
"version": "0.2.4",
|
"version": "0.2.4",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "Quickly generates beautiful flashcards from text, extracting key points and categories.",
|
"description": "Quickly generates beautiful flashcards from text, extracting key points and categories.",
|
||||||
"downloads": 116,
|
"downloads": 138,
|
||||||
"views": 2059,
|
"views": 2329,
|
||||||
"upvotes": 8,
|
"upvotes": 10,
|
||||||
"saves": 10,
|
"saves": 10,
|
||||||
"comments": 2,
|
"comments": 2,
|
||||||
"created_at": "2025-12-30",
|
"created_at": "2025-12-30",
|
||||||
"updated_at": "2026-01-07",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/flash_card_65a2ea8f"
|
"url": "https://openwebui.com/posts/flash_card_65a2ea8f"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"title": "Markdown Normalizer",
|
||||||
|
"slug": "markdown_normalizer_baaa8732",
|
||||||
|
"type": "action",
|
||||||
|
"version": "1.2.3",
|
||||||
|
"author": "Fu-Jie",
|
||||||
|
"description": "A content normalizer filter that fixes common Markdown formatting issues in LLM outputs, such as broken code blocks, LaTeX formulas, and list formatting.",
|
||||||
|
"downloads": 84,
|
||||||
|
"views": 2100,
|
||||||
|
"upvotes": 10,
|
||||||
|
"saves": 17,
|
||||||
|
"comments": 5,
|
||||||
|
"created_at": "2026-01-12",
|
||||||
|
"updated_at": "2026-01-17",
|
||||||
|
"url": "https://openwebui.com/posts/markdown_normalizer_baaa8732"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"title": "Deep Dive",
|
"title": "Deep Dive",
|
||||||
"slug": "deep_dive_c0b846e4",
|
"slug": "deep_dive_c0b846e4",
|
||||||
@@ -114,10 +130,10 @@
|
|||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.",
|
"description": "A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.",
|
||||||
"downloads": 54,
|
"downloads": 68,
|
||||||
"views": 523,
|
"views": 663,
|
||||||
"upvotes": 3,
|
"upvotes": 4,
|
||||||
"saves": 4,
|
"saves": 6,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2026-01-08",
|
"created_at": "2026-01-08",
|
||||||
"updated_at": "2026-01-08",
|
"updated_at": "2026-01-08",
|
||||||
@@ -130,13 +146,13 @@
|
|||||||
"version": "0.4.3",
|
"version": "0.4.3",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "将对话导出为 Word (.docx),支持 Mermaid 图表 (客户端渲染 SVG+PNG)、LaTeX 数学公式、真实超链接、增强表格格式、代码高亮和引用块。",
|
"description": "将对话导出为 Word (.docx),支持 Mermaid 图表 (客户端渲染 SVG+PNG)、LaTeX 数学公式、真实超链接、增强表格格式、代码高亮和引用块。",
|
||||||
"downloads": 49,
|
"downloads": 63,
|
||||||
"views": 1155,
|
"views": 1305,
|
||||||
"upvotes": 9,
|
"upvotes": 11,
|
||||||
"saves": 3,
|
"saves": 3,
|
||||||
"comments": 1,
|
"comments": 1,
|
||||||
"created_at": "2026-01-04",
|
"created_at": "2026-01-04",
|
||||||
"updated_at": "2026-01-07",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0"
|
"url": "https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -146,31 +162,15 @@
|
|||||||
"version": "1.4.9",
|
"version": "1.4.9",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "基于 AntV Infographic 的智能信息图生成插件。支持多种专业模板,自动图标匹配,并提供 SVG/PNG 下载功能。",
|
"description": "基于 AntV Infographic 的智能信息图生成插件。支持多种专业模板,自动图标匹配,并提供 SVG/PNG 下载功能。",
|
||||||
"downloads": 41,
|
"downloads": 42,
|
||||||
"views": 603,
|
"views": 683,
|
||||||
"upvotes": 4,
|
"upvotes": 6,
|
||||||
"saves": 0,
|
"saves": 0,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2025-12-28",
|
"created_at": "2025-12-28",
|
||||||
"updated_at": "2026-01-11",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/智能信息图_e04a48ff"
|
"url": "https://openwebui.com/posts/智能信息图_e04a48ff"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"title": "Markdown Normalizer",
|
|
||||||
"slug": "markdown_normalizer_baaa8732",
|
|
||||||
"type": "action",
|
|
||||||
"version": "1.1.2",
|
|
||||||
"author": "Fu-Jie",
|
|
||||||
"description": "A content normalizer filter that fixes common Markdown formatting issues in LLM outputs, such as broken code blocks, LaTeX formulas, and list formatting.",
|
|
||||||
"downloads": 30,
|
|
||||||
"views": 1095,
|
|
||||||
"upvotes": 7,
|
|
||||||
"saves": 11,
|
|
||||||
"comments": 5,
|
|
||||||
"created_at": "2026-01-12",
|
|
||||||
"updated_at": "2026-01-13",
|
|
||||||
"url": "https://openwebui.com/posts/markdown_normalizer_baaa8732"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"title": "思维导图",
|
"title": "思维导图",
|
||||||
"slug": "智能生成交互式思维导图帮助用户可视化知识_8d4b097b",
|
"slug": "智能生成交互式思维导图帮助用户可视化知识_8d4b097b",
|
||||||
@@ -178,15 +178,31 @@
|
|||||||
"version": "0.9.1",
|
"version": "0.9.1",
|
||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "智能分析文本内容,生成交互式思维导图,帮助用户结构化和可视化知识。",
|
"description": "智能分析文本内容,生成交互式思维导图,帮助用户结构化和可视化知识。",
|
||||||
"downloads": 21,
|
"downloads": 22,
|
||||||
"views": 369,
|
"views": 398,
|
||||||
"upvotes": 2,
|
"upvotes": 3,
|
||||||
"saves": 1,
|
"saves": 1,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2025-12-31",
|
"created_at": "2025-12-31",
|
||||||
"updated_at": "2026-01-07",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b"
|
"url": "https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"title": "闪记卡 (Flash Card)",
|
||||||
|
"slug": "闪记卡生成插件_4a31eac3",
|
||||||
|
"type": "action",
|
||||||
|
"version": "0.2.4",
|
||||||
|
"author": "Fu-Jie",
|
||||||
|
"description": "快速将文本提炼为精美的学习记忆卡片,支持核心要点提取与分类。",
|
||||||
|
"downloads": 16,
|
||||||
|
"views": 443,
|
||||||
|
"upvotes": 5,
|
||||||
|
"saves": 1,
|
||||||
|
"comments": 0,
|
||||||
|
"created_at": "2025-12-30",
|
||||||
|
"updated_at": "2026-01-17",
|
||||||
|
"url": "https://openwebui.com/posts/闪记卡生成插件_4a31eac3"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"title": "异步上下文压缩",
|
"title": "异步上下文压缩",
|
||||||
"slug": "异步上下文压缩_5c0617cb",
|
"slug": "异步上下文压缩_5c0617cb",
|
||||||
@@ -195,30 +211,14 @@
|
|||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "通过智能摘要和消息压缩,降低长对话的 token 消耗,同时保持对话连贯性。",
|
"description": "通过智能摘要和消息压缩,降低长对话的 token 消耗,同时保持对话连贯性。",
|
||||||
"downloads": 14,
|
"downloads": 14,
|
||||||
"views": 315,
|
"views": 351,
|
||||||
"upvotes": 4,
|
"upvotes": 5,
|
||||||
"saves": 1,
|
"saves": 1,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2025-11-08",
|
"created_at": "2025-11-08",
|
||||||
"updated_at": "2026-01-11",
|
"updated_at": "2026-01-17",
|
||||||
"url": "https://openwebui.com/posts/异步上下文压缩_5c0617cb"
|
"url": "https://openwebui.com/posts/异步上下文压缩_5c0617cb"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"title": "闪记卡 (Flash Card)",
|
|
||||||
"slug": "闪记卡生成插件_4a31eac3",
|
|
||||||
"type": "action",
|
|
||||||
"version": "0.2.4",
|
|
||||||
"author": "Fu-Jie",
|
|
||||||
"description": "快速将文本提炼为精美的学习记忆卡片,支持核心要点提取与分类。",
|
|
||||||
"downloads": 12,
|
|
||||||
"views": 405,
|
|
||||||
"upvotes": 4,
|
|
||||||
"saves": 1,
|
|
||||||
"comments": 0,
|
|
||||||
"created_at": "2025-12-30",
|
|
||||||
"updated_at": "2026-01-07",
|
|
||||||
"url": "https://openwebui.com/posts/闪记卡生成插件_4a31eac3"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"title": "精读",
|
"title": "精读",
|
||||||
"slug": "精读_99830b0f",
|
"slug": "精读_99830b0f",
|
||||||
@@ -227,8 +227,8 @@
|
|||||||
"author": "Fu-Jie",
|
"author": "Fu-Jie",
|
||||||
"description": "全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。",
|
"description": "全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。",
|
||||||
"downloads": 6,
|
"downloads": 6,
|
||||||
"views": 214,
|
"views": 259,
|
||||||
"upvotes": 2,
|
"upvotes": 3,
|
||||||
"saves": 1,
|
"saves": 1,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2026-01-08",
|
"created_at": "2026-01-08",
|
||||||
@@ -243,8 +243,8 @@
|
|||||||
"author": "",
|
"author": "",
|
||||||
"description": "",
|
"description": "",
|
||||||
"downloads": 0,
|
"downloads": 0,
|
||||||
"views": 5,
|
"views": 59,
|
||||||
"upvotes": 0,
|
"upvotes": 1,
|
||||||
"saves": 0,
|
"saves": 0,
|
||||||
"comments": 0,
|
"comments": 0,
|
||||||
"created_at": "2026-01-14",
|
"created_at": "2026-01-14",
|
||||||
@@ -259,8 +259,8 @@
|
|||||||
"author": "",
|
"author": "",
|
||||||
"description": "",
|
"description": "",
|
||||||
"downloads": 0,
|
"downloads": 0,
|
||||||
"views": 791,
|
"views": 1198,
|
||||||
"upvotes": 11,
|
"upvotes": 12,
|
||||||
"saves": 7,
|
"saves": 7,
|
||||||
"comments": 2,
|
"comments": 2,
|
||||||
"created_at": "2026-01-10",
|
"created_at": "2026-01-10",
|
||||||
@@ -273,11 +273,11 @@
|
|||||||
"name": "Fu-Jie",
|
"name": "Fu-Jie",
|
||||||
"profile_url": "https://openwebui.com/u/Fu-Jie",
|
"profile_url": "https://openwebui.com/u/Fu-Jie",
|
||||||
"profile_image": "https://community.s3.openwebui.com/uploads/users/b15d1348-4347-42b4-b815-e053342d6cb0/profile_d9510745-4bd4-4f8f-a997-4a21847d9300.webp",
|
"profile_image": "https://community.s3.openwebui.com/uploads/users/b15d1348-4347-42b4-b815-e053342d6cb0/profile_d9510745-4bd4-4f8f-a997-4a21847d9300.webp",
|
||||||
"followers": 104,
|
"followers": 133,
|
||||||
"following": 2,
|
"following": 2,
|
||||||
"total_points": 104,
|
"total_points": 134,
|
||||||
"post_points": 89,
|
"post_points": 118,
|
||||||
"comment_points": 15,
|
"comment_points": 16,
|
||||||
"contributions": 25
|
"contributions": 25
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,40 +1,40 @@
|
|||||||
# 📊 OpenWebUI Community Stats Report
|
# 📊 OpenWebUI Community Stats Report
|
||||||
|
|
||||||
> 📅 Updated: 2026-01-15 00:11
|
> 📅 Updated: 2026-01-19 18:11
|
||||||
|
|
||||||
## 📈 Overview
|
## 📈 Overview
|
||||||
|
|
||||||
| Metric | Value |
|
| Metric | Value |
|
||||||
|------|------|
|
|------|------|
|
||||||
| 📝 Total Posts | 16 |
|
| 📝 Total Posts | 16 |
|
||||||
| ⬇️ Total Downloads | 1451 |
|
| ⬇️ Total Downloads | 1792 |
|
||||||
| 👁️ Total Views | 16966 |
|
| 👁️ Total Views | 21276 |
|
||||||
| 👍 Total Upvotes | 91 |
|
| 👍 Total Upvotes | 120 |
|
||||||
| 💾 Total Saves | 108 |
|
| 💾 Total Saves | 135 |
|
||||||
| 💬 Total Comments | 23 |
|
| 💬 Total Comments | 24 |
|
||||||
|
|
||||||
## 📂 By Type
|
## 📂 By Type
|
||||||
|
|
||||||
- **unknown**: 2
|
|
||||||
- **action**: 14
|
- **action**: 14
|
||||||
|
- **unknown**: 2
|
||||||
|
|
||||||
## 📋 Posts List
|
## 📋 Posts List
|
||||||
|
|
||||||
| Rank | Title | Type | Version | Downloads | Views | Upvotes | Saves | Updated |
|
| Rank | Title | Type | Version | Downloads | Views | Upvotes | Saves | Updated |
|
||||||
|:---:|------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
|:---:|------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||||
| 1 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 451 | 4028 | 12 | 26 | 2026-01-07 |
|
| 1 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 532 | 4822 | 15 | 28 | 2026-01-17 |
|
||||||
| 2 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.7 | 194 | 671 | 3 | 4 | 2026-01-07 |
|
| 2 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.9 | 260 | 2514 | 14 | 20 | 2026-01-18 |
|
||||||
| 3 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.9 | 185 | 1906 | 9 | 13 | 2026-01-11 |
|
| 3 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.7 | 209 | 800 | 4 | 5 | 2026-01-07 |
|
||||||
| 4 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | action | 1.1.3 | 156 | 1743 | 7 | 15 | 2026-01-11 |
|
| 4 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | action | 1.1.3 | 180 | 1975 | 9 | 19 | 2026-01-17 |
|
||||||
| 5 | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.3 | 122 | 1084 | 6 | 11 | 2026-01-07 |
|
| 5 | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.3 | 158 | 1377 | 8 | 16 | 2026-01-17 |
|
||||||
| 6 | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 116 | 2059 | 8 | 10 | 2026-01-07 |
|
| 6 | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 138 | 2329 | 10 | 10 | 2026-01-17 |
|
||||||
| 7 | [Deep Dive](https://openwebui.com/posts/deep_dive_c0b846e4) | action | 1.0.0 | 54 | 523 | 3 | 4 | 2026-01-08 |
|
| 7 | [Markdown Normalizer](https://openwebui.com/posts/markdown_normalizer_baaa8732) | action | 1.2.3 | 84 | 2100 | 10 | 17 | 2026-01-17 |
|
||||||
| 8 | [导出为 Word (增强版)](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.3 | 49 | 1155 | 9 | 3 | 2026-01-07 |
|
| 8 | [Deep Dive](https://openwebui.com/posts/deep_dive_c0b846e4) | action | 1.0.0 | 68 | 663 | 4 | 6 | 2026-01-08 |
|
||||||
| 9 | [📊 智能信息图 (AntV Infographic)](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.4.9 | 41 | 603 | 4 | 0 | 2026-01-11 |
|
| 9 | [导出为 Word (增强版)](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.3 | 63 | 1305 | 11 | 3 | 2026-01-17 |
|
||||||
| 10 | [Markdown Normalizer](https://openwebui.com/posts/markdown_normalizer_baaa8732) | action | 1.1.2 | 30 | 1095 | 7 | 11 | 2026-01-13 |
|
| 10 | [📊 智能信息图 (AntV Infographic)](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.4.9 | 42 | 683 | 6 | 0 | 2026-01-17 |
|
||||||
| 11 | [思维导图](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.9.1 | 21 | 369 | 2 | 1 | 2026-01-07 |
|
| 11 | [思维导图](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.9.1 | 22 | 398 | 3 | 1 | 2026-01-17 |
|
||||||
| 12 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | action | 1.1.3 | 14 | 315 | 4 | 1 | 2026-01-11 |
|
| 12 | [闪记卡 (Flash Card)](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.4 | 16 | 443 | 5 | 1 | 2026-01-17 |
|
||||||
| 13 | [闪记卡 (Flash Card)](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.4 | 12 | 405 | 4 | 1 | 2026-01-07 |
|
| 13 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | action | 1.1.3 | 14 | 351 | 5 | 1 | 2026-01-17 |
|
||||||
| 14 | [精读](https://openwebui.com/posts/精读_99830b0f) | action | 1.0.0 | 6 | 214 | 2 | 1 | 2026-01-08 |
|
| 14 | [精读](https://openwebui.com/posts/精读_99830b0f) | action | 1.0.0 | 6 | 259 | 3 | 1 | 2026-01-08 |
|
||||||
| 15 | [Review of Claude Haiku 4.5](https://openwebui.com/posts/review_of_claude_haiku_45_41b0db39) | unknown | | 0 | 5 | 0 | 0 | 2026-01-14 |
|
| 15 | [Review of Claude Haiku 4.5](https://openwebui.com/posts/review_of_claude_haiku_45_41b0db39) | unknown | | 0 | 59 | 1 | 0 | 2026-01-14 |
|
||||||
| 16 | [ 🛠️ Debug Open WebUI Plugins in Your Browser](https://openwebui.com/posts/debug_open_webui_plugins_in_your_browser_81bf7960) | unknown | | 0 | 791 | 11 | 7 | 2026-01-10 |
|
| 16 | [ 🛠️ Debug Open WebUI Plugins in Your Browser](https://openwebui.com/posts/debug_open_webui_plugins_in_your_browser_81bf7960) | unknown | | 0 | 1198 | 12 | 7 | 2026-01-10 |
|
||||||
|
|||||||
@@ -1,40 +1,40 @@
|
|||||||
# 📊 OpenWebUI 社区统计报告
|
# 📊 OpenWebUI 社区统计报告
|
||||||
|
|
||||||
> 📅 更新时间: 2026-01-15 00:11
|
> 📅 更新时间: 2026-01-19 18:11
|
||||||
|
|
||||||
## 📈 总览
|
## 📈 总览
|
||||||
|
|
||||||
| 指标 | 数值 |
|
| 指标 | 数值 |
|
||||||
|------|------|
|
|------|------|
|
||||||
| 📝 发布数量 | 16 |
|
| 📝 发布数量 | 16 |
|
||||||
| ⬇️ 总下载量 | 1451 |
|
| ⬇️ 总下载量 | 1792 |
|
||||||
| 👁️ 总浏览量 | 16966 |
|
| 👁️ 总浏览量 | 21276 |
|
||||||
| 👍 总点赞数 | 91 |
|
| 👍 总点赞数 | 120 |
|
||||||
| 💾 总收藏数 | 108 |
|
| 💾 总收藏数 | 135 |
|
||||||
| 💬 总评论数 | 23 |
|
| 💬 总评论数 | 24 |
|
||||||
|
|
||||||
## 📂 按类型分类
|
## 📂 按类型分类
|
||||||
|
|
||||||
- **unknown**: 2
|
|
||||||
- **action**: 14
|
- **action**: 14
|
||||||
|
- **unknown**: 2
|
||||||
|
|
||||||
## 📋 发布列表
|
## 📋 发布列表
|
||||||
|
|
||||||
| 排名 | 标题 | 类型 | 版本 | 下载 | 浏览 | 点赞 | 收藏 | 更新日期 |
|
| 排名 | 标题 | 类型 | 版本 | 下载 | 浏览 | 点赞 | 收藏 | 更新日期 |
|
||||||
|:---:|------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
|:---:|------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||||
| 1 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 451 | 4028 | 12 | 26 | 2026-01-07 |
|
| 1 | [Smart Mind Map](https://openwebui.com/posts/turn_any_text_into_beautiful_mind_maps_3094c59a) | action | 0.9.1 | 532 | 4822 | 15 | 28 | 2026-01-17 |
|
||||||
| 2 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.7 | 194 | 671 | 3 | 4 | 2026-01-07 |
|
| 2 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.9 | 260 | 2514 | 14 | 20 | 2026-01-18 |
|
||||||
| 3 | [📊 Smart Infographic (AntV)](https://openwebui.com/posts/smart_infographic_ad6f0c7f) | action | 1.4.9 | 185 | 1906 | 9 | 13 | 2026-01-11 |
|
| 3 | [Export to Excel](https://openwebui.com/posts/export_mulit_table_to_excel_244b8f9d) | action | 0.3.7 | 209 | 800 | 4 | 5 | 2026-01-07 |
|
||||||
| 4 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | action | 1.1.3 | 156 | 1743 | 7 | 15 | 2026-01-11 |
|
| 4 | [Async Context Compression](https://openwebui.com/posts/async_context_compression_b1655bc8) | action | 1.1.3 | 180 | 1975 | 9 | 19 | 2026-01-17 |
|
||||||
| 5 | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.3 | 122 | 1084 | 6 | 11 | 2026-01-07 |
|
| 5 | [Export to Word (Enhanced)](https://openwebui.com/posts/export_to_word_enhanced_formatting_fca6a315) | action | 0.4.3 | 158 | 1377 | 8 | 16 | 2026-01-17 |
|
||||||
| 6 | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 116 | 2059 | 8 | 10 | 2026-01-07 |
|
| 6 | [Flash Card](https://openwebui.com/posts/flash_card_65a2ea8f) | action | 0.2.4 | 138 | 2329 | 10 | 10 | 2026-01-17 |
|
||||||
| 7 | [Deep Dive](https://openwebui.com/posts/deep_dive_c0b846e4) | action | 1.0.0 | 54 | 523 | 3 | 4 | 2026-01-08 |
|
| 7 | [Markdown Normalizer](https://openwebui.com/posts/markdown_normalizer_baaa8732) | action | 1.2.3 | 84 | 2100 | 10 | 17 | 2026-01-17 |
|
||||||
| 8 | [导出为 Word (增强版)](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.3 | 49 | 1155 | 9 | 3 | 2026-01-07 |
|
| 8 | [Deep Dive](https://openwebui.com/posts/deep_dive_c0b846e4) | action | 1.0.0 | 68 | 663 | 4 | 6 | 2026-01-08 |
|
||||||
| 9 | [📊 智能信息图 (AntV Infographic)](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.4.9 | 41 | 603 | 4 | 0 | 2026-01-11 |
|
| 9 | [导出为 Word (增强版)](https://openwebui.com/posts/导出为_word_支持公式流程图表格和代码块_8a6306c0) | action | 0.4.3 | 63 | 1305 | 11 | 3 | 2026-01-17 |
|
||||||
| 10 | [Markdown Normalizer](https://openwebui.com/posts/markdown_normalizer_baaa8732) | action | 1.1.2 | 30 | 1095 | 7 | 11 | 2026-01-13 |
|
| 10 | [📊 智能信息图 (AntV Infographic)](https://openwebui.com/posts/智能信息图_e04a48ff) | action | 1.4.9 | 42 | 683 | 6 | 0 | 2026-01-17 |
|
||||||
| 11 | [思维导图](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.9.1 | 21 | 369 | 2 | 1 | 2026-01-07 |
|
| 11 | [思维导图](https://openwebui.com/posts/智能生成交互式思维导图帮助用户可视化知识_8d4b097b) | action | 0.9.1 | 22 | 398 | 3 | 1 | 2026-01-17 |
|
||||||
| 12 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | action | 1.1.3 | 14 | 315 | 4 | 1 | 2026-01-11 |
|
| 12 | [闪记卡 (Flash Card)](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.4 | 16 | 443 | 5 | 1 | 2026-01-17 |
|
||||||
| 13 | [闪记卡 (Flash Card)](https://openwebui.com/posts/闪记卡生成插件_4a31eac3) | action | 0.2.4 | 12 | 405 | 4 | 1 | 2026-01-07 |
|
| 13 | [异步上下文压缩](https://openwebui.com/posts/异步上下文压缩_5c0617cb) | action | 1.1.3 | 14 | 351 | 5 | 1 | 2026-01-17 |
|
||||||
| 14 | [精读](https://openwebui.com/posts/精读_99830b0f) | action | 1.0.0 | 6 | 214 | 2 | 1 | 2026-01-08 |
|
| 14 | [精读](https://openwebui.com/posts/精读_99830b0f) | action | 1.0.0 | 6 | 259 | 3 | 1 | 2026-01-08 |
|
||||||
| 15 | [Review of Claude Haiku 4.5](https://openwebui.com/posts/review_of_claude_haiku_45_41b0db39) | unknown | | 0 | 5 | 0 | 0 | 2026-01-14 |
|
| 15 | [Review of Claude Haiku 4.5](https://openwebui.com/posts/review_of_claude_haiku_45_41b0db39) | unknown | | 0 | 59 | 1 | 0 | 2026-01-14 |
|
||||||
| 16 | [ 🛠️ Debug Open WebUI Plugins in Your Browser](https://openwebui.com/posts/debug_open_webui_plugins_in_your_browser_81bf7960) | unknown | | 0 | 791 | 11 | 7 | 2026-01-10 |
|
| 16 | [ 🛠️ Debug Open WebUI Plugins in Your Browser](https://openwebui.com/posts/debug_open_webui_plugins_in_your_browser_81bf7960) | unknown | | 0 | 1198 | 12 | 7 | 2026-01-10 |
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Async Context Compression
|
# Async Context Compression
|
||||||
|
|
||||||
<span class="category-badge filter">Filter</span>
|
<span class="category-badge filter">Filter</span>
|
||||||
<span class="version-badge">v1.1.3</span>
|
<span class="version-badge">v1.2.0</span>
|
||||||
|
|
||||||
Reduces token consumption in long conversations through intelligent summarization while maintaining conversational coherence.
|
Reduces token consumption in long conversations through intelligent summarization while maintaining conversational coherence.
|
||||||
|
|
||||||
@@ -34,6 +34,10 @@ This is especially useful for:
|
|||||||
- :material-check-all: **Open WebUI v0.7.x Compatibility**: Dynamic DB session handling
|
- :material-check-all: **Open WebUI v0.7.x Compatibility**: Dynamic DB session handling
|
||||||
- :material-account-convert: **Improved Compatibility**: Summary role changed to `assistant`
|
- :material-account-convert: **Improved Compatibility**: Summary role changed to `assistant`
|
||||||
- :material-shield-check: **Enhanced Stability**: Resolved race conditions in state management
|
- :material-shield-check: **Enhanced Stability**: Resolved race conditions in state management
|
||||||
|
- :material-ruler: **Preflight Context Check**: Validates context fit before sending
|
||||||
|
- :material-format-align-justify: **Structure-Aware Trimming**: Preserves document structure
|
||||||
|
- :material-content-cut: **Native Tool Output Trimming**: Trims verbose tool outputs (Note: Non-native tool outputs are not fully injected into context)
|
||||||
|
- :material-chart-bar: **Detailed Token Logging**: Granular token breakdown
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -64,10 +68,13 @@ graph TD
|
|||||||
|
|
||||||
| Option | Type | Default | Description |
|
| Option | Type | Default | Description |
|
||||||
|--------|------|---------|-------------|
|
|--------|------|---------|-------------|
|
||||||
| `token_threshold` | integer | `4000` | Trigger compression above this token count |
|
| `compression_threshold_tokens` | integer | `64000` | Trigger compression above this token count |
|
||||||
| `preserve_recent` | integer | `5` | Number of recent messages to keep uncompressed |
|
| `max_context_tokens` | integer | `128000` | Hard limit for context |
|
||||||
| `summary_model` | string | `"auto"` | Model to use for summarization |
|
| `keep_first` | integer | `1` | Always keep the first N messages |
|
||||||
| `compression_ratio` | float | `0.3` | Target compression ratio |
|
| `keep_last` | integer | `6` | Always keep the last N messages |
|
||||||
|
| `summary_model` | string | `None` | Model to use for summarization |
|
||||||
|
| `max_summary_tokens` | integer | `16384` | Maximum tokens for the summary |
|
||||||
|
| `enable_tool_output_trimming` | boolean | `false` | Enable trimming of large tool outputs |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Async Context Compression(异步上下文压缩)
|
# Async Context Compression(异步上下文压缩)
|
||||||
|
|
||||||
<span class="category-badge filter">Filter</span>
|
<span class="category-badge filter">Filter</span>
|
||||||
<span class="version-badge">v1.1.3</span>
|
<span class="version-badge">v1.2.0</span>
|
||||||
|
|
||||||
通过智能摘要减少长对话的 token 消耗,同时保持对话连贯。
|
通过智能摘要减少长对话的 token 消耗,同时保持对话连贯。
|
||||||
|
|
||||||
@@ -34,6 +34,10 @@ Async Context Compression 过滤器通过以下方式帮助管理长对话的 to
|
|||||||
- :material-check-all: **Open WebUI v0.7.x 兼容性**:动态数据库会话处理
|
- :material-check-all: **Open WebUI v0.7.x 兼容性**:动态数据库会话处理
|
||||||
- :material-account-convert: **兼容性提升**:摘要角色改为 `assistant`
|
- :material-account-convert: **兼容性提升**:摘要角色改为 `assistant`
|
||||||
- :material-shield-check: **稳定性增强**:解决状态管理竞态条件
|
- :material-shield-check: **稳定性增强**:解决状态管理竞态条件
|
||||||
|
- :material-ruler: **预检上下文检查**:发送前验证上下文是否超限
|
||||||
|
- :material-format-align-justify: **结构感知裁剪**:保留文档结构的智能裁剪
|
||||||
|
- :material-content-cut: **原生工具输出裁剪**:自动裁剪冗长的工具输出(注意:非原生工具调用输出不会完整注入上下文)
|
||||||
|
- :material-chart-bar: **详细 Token 日志**:提供细粒度的 Token 统计
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -64,10 +68,13 @@ graph TD
|
|||||||
|
|
||||||
| 选项 | 类型 | 默认值 | 说明 |
|
| 选项 | 类型 | 默认值 | 说明 |
|
||||||
|--------|------|---------|-------------|
|
|--------|------|---------|-------------|
|
||||||
| `token_threshold` | integer | `4000` | 超过该 token 数触发压缩 |
|
| `compression_threshold_tokens` | integer | `64000` | 超过该 token 数触发压缩 |
|
||||||
| `preserve_recent` | integer | `5` | 保留不压缩的最近消息数量 |
|
| `max_context_tokens` | integer | `128000` | 上下文硬性上限 |
|
||||||
| `summary_model` | string | `"auto"` | 用于摘要的模型 |
|
| `keep_first` | integer | `1` | 始终保留的前 N 条消息 |
|
||||||
| `compression_ratio` | float | `0.3` | 目标压缩比例 |
|
| `keep_last` | integer | `6` | 始终保留的后 N 条消息 |
|
||||||
|
| `summary_model` | string | `None` | 用于摘要的模型 |
|
||||||
|
| `max_summary_tokens` | integer | `16384` | 摘要的最大 token 数 |
|
||||||
|
| `enable_tool_output_trimming` | boolean | `false` | 启用长工具输出裁剪 |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ Filters act as middleware in the message pipeline:
|
|||||||
|
|
||||||
Fixes common Markdown formatting issues in LLM outputs, including Mermaid syntax, code blocks, and LaTeX formulas.
|
Fixes common Markdown formatting issues in LLM outputs, including Mermaid syntax, code blocks, and LaTeX formulas.
|
||||||
|
|
||||||
**Version:** 1.1.2
|
**Version:** 1.2.4
|
||||||
|
|
||||||
[:octicons-arrow-right-24: Documentation](markdown_normalizer.md)
|
[:octicons-arrow-right-24: Documentation](markdown_normalizer.md)
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ Filter 充当消息管线中的中间件:
|
|||||||
|
|
||||||
修复 LLM 输出中常见的 Markdown 格式问题,包括 Mermaid 语法、代码块和 LaTeX 公式。
|
修复 LLM 输出中常见的 Markdown 格式问题,包括 Mermaid 语法、代码块和 LaTeX 公式。
|
||||||
|
|
||||||
**版本:** 1.0.1
|
**版本:** 1.2.4
|
||||||
|
|
||||||
[:octicons-arrow-right-24: 查看文档](markdown_normalizer.zh.md)
|
[:octicons-arrow-right-24: 查看文档](markdown_normalizer.zh.md)
|
||||||
|
|
||||||
|
|||||||
@@ -1,45 +1,97 @@
|
|||||||
# Markdown Normalizer Filter
|
# Markdown Normalizer Filter
|
||||||
|
|
||||||
A production-grade content normalizer filter for Open WebUI that fixes common Markdown formatting issues in LLM outputs. It ensures that code blocks, LaTeX formulas, Mermaid diagrams, and other Markdown elements are rendered correctly.
|
A content normalizer filter for Open WebUI that fixes common Markdown formatting issues in LLM outputs. It ensures that code blocks, LaTeX formulas, Mermaid diagrams, and other Markdown elements are rendered correctly.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* **Mermaid Syntax Fix**: Automatically fixes common Mermaid syntax errors, such as unquoted node labels (including multi-line labels and citations) and unclosed subgraphs, ensuring diagrams render correctly.
|
* **Details Tag Normalization**: Ensures proper spacing for `<details>` tags (used for thought chains). Adds a blank line after `</details>` and ensures a newline after self-closing `<details />` tags to prevent rendering issues.
|
||||||
* **Frontend Console Debugging**: Supports printing structured debug logs directly to the browser console (F12) for easier troubleshooting.
|
* **Emphasis Spacing Fix**: Fixes extra spaces inside emphasis markers (e.g., `** text **` -> `**text**`) which can cause rendering failures. Includes safeguards to protect math expressions (e.g., `2 * 3 * 4`) and list variables.
|
||||||
* **Code Block Formatting**: Fixes broken code block prefixes, suffixes, and indentation.
|
* **Mermaid Syntax Fix**: Automatically fixes common Mermaid syntax errors, such as unquoted node labels (including multi-line labels and citations) and unclosed subgraphs. **New in v1.1.2**: Comprehensive protection for edge labels (text on connecting lines) across all link types (solid, dotted, thick).
|
||||||
* **LaTeX Normalization**: Standardizes LaTeX formula delimiters (`\[` -> `$$`, `\(` -> `$`).
|
* **Frontend Console Debugging**: Supports printing structured debug logs directly to the browser console (F12) for easier troubleshooting.
|
||||||
* **Thought Tag Normalization**: Unifies thought tags (`<think>`, `<thinking>` -> `<thought>`).
|
* **Code Block Formatting**: Fixes broken code block prefixes, suffixes, and indentation.
|
||||||
* **Escape Character Fix**: Cleans up excessive escape characters (`\\n`, `\\t`).
|
* **LaTeX Normalization**: Standardizes LaTeX formula delimiters (`\[` -> `$$`, `\(` -> `$`).
|
||||||
* **List Formatting**: Ensures proper newlines in list items.
|
* **Thought Tag Normalization**: Unifies thought tags (`<think>`, `<thinking>` -> `<thought>`).
|
||||||
* **Heading Fix**: Adds missing spaces in headings (`#Heading` -> `# Heading`).
|
* **Escape Character Fix**: Cleans up excessive escape characters (`\\n`, `\\t`).
|
||||||
* **Table Fix**: Adds missing closing pipes in tables.
|
* **List Formatting**: Ensures proper newlines in list items.
|
||||||
* **XML Cleanup**: Removes leftover XML artifacts.
|
* **Heading Fix**: Adds missing spaces in headings (`#Heading` -> `# Heading`).
|
||||||
|
* **Table Fix**: Adds missing closing pipes in tables.
|
||||||
|
* **XML Cleanup**: Removes leftover XML artifacts.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
1. Install the plugin in Open WebUI.
|
1. Install the plugin in Open WebUI.
|
||||||
2. Enable the filter globally or for specific models.
|
2. Enable the filter globally or for specific models.
|
||||||
3. Configure the enabled fixes in the **Valves** settings.
|
3. Configure the enabled fixes in the **Valves** settings.
|
||||||
4. (Optional) **Show Debug Log** is enabled by default in Valves. This prints structured logs to the browser console (F12).
|
4. (Optional) **Show Debug Log** is enabled by default in Valves. This prints structured logs to the browser console (F12).
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> As this is an initial version, some "negative fixes" might occur (e.g., breaking valid Markdown). If you encounter issues, please check the console logs, copy the "Original" vs "Normalized" content, and submit an issue.
|
> As this is an initial version, some "negative fixes" might occur (e.g., breaking valid Markdown). If you encounter issues, please check the console logs, copy the "Original" vs "Normalized" content, and submit an issue.
|
||||||
|
|
||||||
## Configuration (Valves)
|
## Configuration (Valves)
|
||||||
|
|
||||||
* `priority`: Filter priority (default: 50).
|
* `priority`: Filter priority (default: 50).
|
||||||
* `enable_escape_fix`: Fix excessive escape characters.
|
* `enable_escape_fix`: Fix excessive escape characters.
|
||||||
* `enable_thought_tag_fix`: Normalize thought tags.
|
* `enable_thought_tag_fix`: Normalize thought tags.
|
||||||
* `enable_code_block_fix`: Fix code block formatting.
|
* `enable_details_tag_fix`: Normalize details tags (default: True).
|
||||||
* `enable_latex_fix`: Normalize LaTeX formulas.
|
* `enable_code_block_fix`: Fix code block formatting.
|
||||||
* `enable_list_fix`: Fix list item newlines (Experimental).
|
* `enable_latex_fix`: Normalize LaTeX formulas.
|
||||||
* `enable_unclosed_block_fix`: Auto-close unclosed code blocks.
|
* `enable_list_fix`: Fix list item newlines (Experimental).
|
||||||
* `enable_fullwidth_symbol_fix`: Fix full-width symbols in code blocks.
|
* `enable_unclosed_block_fix`: Auto-close unclosed code blocks.
|
||||||
* `enable_mermaid_fix`: Fix Mermaid syntax errors.
|
* `enable_fullwidth_symbol_fix`: Fix full-width symbols in code blocks.
|
||||||
* `enable_heading_fix`: Fix missing space in headings.
|
* `enable_mermaid_fix`: Fix Mermaid syntax errors.
|
||||||
* `enable_table_fix`: Fix missing closing pipe in tables.
|
* `enable_heading_fix`: Fix missing space in headings.
|
||||||
* `enable_xml_tag_cleanup`: Cleanup leftover XML tags.
|
* `enable_table_fix`: Fix missing closing pipe in tables.
|
||||||
* `show_status`: Show status notification when fixes are applied.
|
* `enable_xml_tag_cleanup`: Cleanup leftover XML tags.
|
||||||
* `show_debug_log`: Print debug logs to browser console.
|
* `enable_emphasis_spacing_fix`: Fix extra spaces in emphasis (default: True).
|
||||||
|
* `show_status`: Show status notification when fixes are applied.
|
||||||
|
* `show_debug_log`: Print debug logs to browser console.
|
||||||
|
|
||||||
|
## Troubleshooting ❓
|
||||||
|
|
||||||
|
* **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
### v1.2.4
|
||||||
|
|
||||||
|
* **Documentation Updates**: Synchronized version numbers across all documentation and code files.
|
||||||
|
|
||||||
|
### v1.2.3
|
||||||
|
|
||||||
|
* **List Marker Protection Enhancement**: Fixed a bug where list markers (`*`) followed by plain text and emphasis were having their spaces incorrectly stripped (e.g., `* U16 forward` became `*U16 forward`).
|
||||||
|
* **Placeholder Support**: Confirmed that 4 or more underscores (e.g., `____`) are correctly treated as placeholders and not modified by the emphasis fix.
|
||||||
|
|
||||||
|
### v1.2.2
|
||||||
|
|
||||||
|
* **Code Block Indentation Fix**: Fixed an issue where code blocks nested inside lists were having their indentation incorrectly stripped. Now preserves proper indentation for nested code blocks.
|
||||||
|
* **Underscore Emphasis Support**: Extended emphasis spacing fix to support `__` (double underscore for bold) and `___` (triple underscore for bold+italic) syntax.
|
||||||
|
* **List Marker Protection**: Fixed a bug where list markers (`*`) followed by emphasis markers (`**`) were incorrectly merged (e.g., `* **Yes**` became `***Yes**`). Added safeguard to prevent this.
|
||||||
|
* **Test Suite**: Added comprehensive pytest test suite with 56 test cases covering all major features.
|
||||||
|
|
||||||
|
### v1.2.1
|
||||||
|
|
||||||
|
* **Emphasis Spacing Fix**: Added a new fix for extra spaces inside emphasis markers (e.g., `** text **` -> `**text**`).
|
||||||
|
* Uses a recursive approach to handle nested emphasis (e.g., `**bold _italic _**`).
|
||||||
|
* Includes safeguards to prevent modifying math expressions (e.g., `2 * 3 * 4`) or list variables.
|
||||||
|
* Controlled by the `enable_emphasis_spacing_fix` valve (default: True).
|
||||||
|
|
||||||
|
### v1.2.0
|
||||||
|
|
||||||
|
* **Details Tag Support**: Added normalization for `<details>` tags.
|
||||||
|
* Ensures a blank line is added after `</details>` closing tags to separate thought content from the main response.
|
||||||
|
* Ensures a newline is added after self-closing `<details ... />` tags to prevent them from interfering with subsequent Markdown headings (e.g., fixing `<details/>#Heading`).
|
||||||
|
* Includes safeguard to prevent modification of `<details>` tags inside code blocks.
|
||||||
|
|
||||||
|
### v1.1.2
|
||||||
|
|
||||||
|
* **Mermaid Edge Label Protection**: Implemented comprehensive protection for edge labels (text on connecting lines) to prevent them from being incorrectly modified. Now supports all Mermaid link types including solid (`--`), dotted (`-.`), and thick (`==`) lines with or without arrows.
|
||||||
|
* **Bug Fixes**: Fixed an issue where lines without arrows (e.g., `A -- text --- B`) were not correctly protected.
|
||||||
|
|
||||||
|
### v1.1.0
|
||||||
|
|
||||||
|
* **Mermaid Fix Refinement**: Improved regex to handle nested parentheses in node labels (e.g., `ID("Label (text)")`) and avoided matching connection labels.
|
||||||
|
* **HTML Safeguard Optimization**: Refined `_contains_html` to allow common tags like `<br/>`, `<b>`, `<i>`, etc., ensuring Mermaid diagrams with these tags are still normalized.
|
||||||
|
* **Full-width Symbol Cleanup**: Fixed duplicate keys and incorrect quote mapping in `FULLWIDTH_MAP`.
|
||||||
|
* **Bug Fixes**: Fixed missing `Dict` import in Python files.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@@ -1,45 +1,97 @@
|
|||||||
# Markdown 格式化过滤器 (Markdown Normalizer)
|
# Markdown 格式化过滤器 (Markdown Normalizer)
|
||||||
|
|
||||||
这是一个用于 Open WebUI 的生产级内容格式化过滤器,旨在修复 LLM 输出中常见的 Markdown 格式问题。它能确保代码块、LaTeX 公式、Mermaid 图表和其他 Markdown 元素被正确渲染。
|
这是一个用于 Open WebUI 的内容格式化过滤器,旨在修复 LLM 输出中常见的 Markdown 格式问题。它能确保代码块、LaTeX 公式、Mermaid 图表和其他 Markdown 元素被正确渲染。
|
||||||
|
|
||||||
## 功能特性
|
## 功能特性
|
||||||
|
|
||||||
* **Mermaid 语法修复**: 自动修复常见的 Mermaid 语法错误,如未加引号的节点标签(支持多行标签和引用标记)和未闭合的子图 (Subgraph),确保图表能正确渲染。
|
* **Details 标签规范化**: 确保 `<details>` 标签(常用于思维链)有正确的间距。在 `</details>` 后添加空行,并在自闭合 `<details />` 标签后添加换行,防止渲染问题。
|
||||||
* **前端控制台调试**: 支持将结构化的调试日志直接打印到浏览器控制台 (F12),方便排查问题。
|
* **强调空格修复**: 修复强调标记内部的多余空格(例如 `** 文本 **` -> `**文本**`),这会导致 Markdown 渲染失败。包含保护机制,防止误修改数学表达式(如 `2 * 3 * 4`)或列表变量。
|
||||||
* **代码块格式化**: 修复破损的代码块前缀、后缀和缩进问题。
|
* **Mermaid 语法修复**: 自动修复常见的 Mermaid 语法错误,如未加引号的节点标签(支持多行标签和引用标记)和未闭合的子图 (Subgraph)。**v1.1.2 新增**: 全面保护各种类型的连线标签(实线、虚线、粗线),防止被误修改。
|
||||||
* **LaTeX 规范化**: 标准化 LaTeX 公式定界符 (`\[` -> `$$`, `\(` -> `$`)。
|
* **前端控制台调试**: 支持将结构化的调试日志直接打印到浏览器控制台 (F12),方便排查问题。
|
||||||
* **思维标签规范化**: 统一思维链标签 (`<think>`, `<thinking>` -> `<thought>`)。
|
* **代码块格式化**: 修复破损的代码块前缀、后缀和缩进问题。
|
||||||
* **转义字符修复**: 清理过度的转义字符 (`\\n`, `\\t`)。
|
* **LaTeX 规范化**: 标准化 LaTeX 公式定界符 (`\[` -> `$$`, `\(` -> `$`)。
|
||||||
* **列表格式化**: 确保列表项有正确的换行。
|
* **思维标签规范化**: 统一思维链标签 (`<think>`, `<thinking>` -> `<thought>`)。
|
||||||
* **标题修复**: 修复标题中缺失的空格 (`#标题` -> `# 标题`)。
|
* **转义字符修复**: 清理过度的转义字符 (`\\n`, `\\t`)。
|
||||||
* **表格修复**: 修复表格中缺失的闭合管道符。
|
* **列表格式化**: 确保列表项有正确的换行。
|
||||||
* **XML 清理**: 移除残留的 XML 标签。
|
* **标题修复**: 修复标题中缺失的空格 (`#标题` -> `# 标题`)。
|
||||||
|
* **表格修复**: 修复表格中缺失的闭合管道符。
|
||||||
|
* **XML 清理**: 移除残留的 XML 标签。
|
||||||
|
|
||||||
## 使用方法
|
## 使用方法
|
||||||
|
|
||||||
1. 在 Open WebUI 中安装此插件。
|
1. 在 Open WebUI 中安装此插件。
|
||||||
2. 全局启用或为特定模型启用此过滤器。
|
2. 全局启用或为特定模型启用此过滤器。
|
||||||
3. 在 **Valves** 设置中配置需要启用的修复项。
|
3. 在 **Valves** 设置中配置需要启用的修复项。
|
||||||
4. (可选) **显示调试日志 (Show Debug Log)** 在 Valves 中默认开启。这会将结构化的日志打印到浏览器控制台 (F12)。
|
4. (可选) **显示调试日志 (Show Debug Log)** 在 Valves 中默认开启。这会将结构化的日志打印到浏览器控制台 (F12)。
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> 由于这是初版,可能会出现“负向修复”的情况(例如破坏了原本正确的格式)。如果您遇到问题,请务必查看控制台日志,复制“原始 (Original)”与“规范化 (Normalized)”的内容对比,并提交 Issue 反馈。
|
> 由于这是初版,可能会出现“负向修复”的情况(例如破坏了原本正确的格式)。如果您遇到问题,请务必查看控制台日志,复制“原始 (Original)”与“规范化 (Normalized)”的内容对比,并提交 Issue 反馈。
|
||||||
|
|
||||||
## 配置项 (Valves)
|
## 配置项 (Valves)
|
||||||
|
|
||||||
* `priority`: 过滤器优先级 (默认: 50)。
|
* `priority`: 过滤器优先级 (默认: 50)。
|
||||||
* `enable_escape_fix`: 修复过度的转义字符。
|
* `enable_escape_fix`: 修复过度的转义字符。
|
||||||
* `enable_thought_tag_fix`: 规范化思维标签。
|
* `enable_thought_tag_fix`: 规范化思维标签。
|
||||||
* `enable_code_block_fix`: 修复代码块格式。
|
* `enable_details_tag_fix`: 规范化 Details 标签 (默认: True)。
|
||||||
* `enable_latex_fix`: 规范化 LaTeX 公式。
|
* `enable_code_block_fix`: 修复代码块格式。
|
||||||
* `enable_list_fix`: 修复列表项换行 (实验性)。
|
* `enable_latex_fix`: 规范化 LaTeX 公式。
|
||||||
* `enable_unclosed_block_fix`: 自动闭合未闭合的代码块。
|
* `enable_list_fix`: 修复列表项换行 (实验性)。
|
||||||
* `enable_fullwidth_symbol_fix`: 修复代码块中的全角符号。
|
* `enable_unclosed_block_fix`: 自动闭合未闭合的代码块。
|
||||||
* `enable_mermaid_fix`: 修复 Mermaid 语法错误。
|
* `enable_fullwidth_symbol_fix`: 修复代码块中的全角符号。
|
||||||
* `enable_heading_fix`: 修复标题中缺失的空格。
|
* `enable_mermaid_fix`: 修复 Mermaid 语法错误。
|
||||||
* `enable_table_fix`: 修复表格中缺失的闭合管道符。
|
* `enable_heading_fix`: 修复标题中缺失的空格。
|
||||||
* `enable_xml_tag_cleanup`: 清理残留的 XML 标签。
|
* `enable_table_fix`: 修复表格中缺失的闭合管道符。
|
||||||
* `show_status`: 应用修复时显示状态通知。
|
* `enable_xml_tag_cleanup`: 清理残留的 XML 标签。
|
||||||
* `show_debug_log`: 在浏览器控制台打印调试日志。
|
* `enable_emphasis_spacing_fix`: 修复强调语法中的多余空格 (默认: True)。
|
||||||
|
* `show_status`: 应用修复时显示状态通知。
|
||||||
|
* `show_debug_log`: 在浏览器控制台打印调试日志。
|
||||||
|
|
||||||
|
## 故障排除 (Troubleshooting) ❓
|
||||||
|
|
||||||
|
* **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
|
## 更新日志
|
||||||
|
|
||||||
|
### v1.2.4
|
||||||
|
|
||||||
|
* **文档更新**: 同步了所有文档和代码文件的版本号。
|
||||||
|
|
||||||
|
### v1.2.3
|
||||||
|
|
||||||
|
* **列表标记保护增强**: 修复了列表标记 (`*`) 后跟普通文本和强调标记时,空格被错误剥离的问题(例如 `* U16 前锋` 变成 `*U16 前锋`)。
|
||||||
|
* **占位符支持**: 确认 4 个或更多下划线(如 `____`)会被正确视为占位符,不会被强调修复逻辑修改。
|
||||||
|
|
||||||
|
### v1.2.2
|
||||||
|
|
||||||
|
* **代码块缩进修复**: 修复了列表中嵌套代码块的缩进被错误剥离的问题。现在会正确保留嵌套代码块的缩进。
|
||||||
|
* **下划线强调语法支持**: 扩展强调空格修复以支持 `__` (双下划线加粗) 和 `___` (三下划线加粗斜体) 语法。
|
||||||
|
* **列表标记保护**: 修复了列表标记 (`*`) 后跟强调标记 (`**`) 被错误合并的 Bug(例如 `* **是**` 变成 `***是**`)。添加了保护逻辑防止此问题。
|
||||||
|
* **测试套件**: 新增完整的 pytest 测试套件,包含 56 个测试用例,覆盖所有主要功能。
|
||||||
|
|
||||||
|
### v1.2.1
|
||||||
|
|
||||||
|
* **强调空格修复**: 新增了对强调标记内部多余空格的修复(例如 `** 文本 **` -> `**文本**`)。
|
||||||
|
* 采用递归方法处理嵌套强调(例如 `**加粗 _斜体 _**`)。
|
||||||
|
* 包含保护机制,防止误修改数学表达式(如 `2 * 3 * 4`)或列表变量。
|
||||||
|
* 通过 `enable_emphasis_spacing_fix` 开关控制(默认:开启)。
|
||||||
|
|
||||||
|
### v1.2.0
|
||||||
|
|
||||||
|
* **Details 标签支持**: 新增了对 `<details>` 标签的规范化支持。
|
||||||
|
* 确保在 `</details>` 闭合标签后添加空行,将思维内容与正文分隔开。
|
||||||
|
* 确保在自闭合 `<details ... />` 标签后添加换行,防止其干扰后续的 Markdown 标题(例如修复 `<details/>#标题`)。
|
||||||
|
* 包含保护机制,防止修改代码块内部的 `<details>` 标签。
|
||||||
|
|
||||||
|
### v1.1.2
|
||||||
|
|
||||||
|
* **Mermaid 连线标签保护**: 实现了全面的连线标签保护机制,防止连接线上的文字被误修改。现在支持所有 Mermaid 连线类型,包括实线 (`--`)、虚线 (`-.`) 和粗线 (`==`),无论是否带有箭头。
|
||||||
|
* **Bug 修复**: 修复了无箭头连线(如 `A -- text --- B`)未被正确保护的问题。
|
||||||
|
|
||||||
|
### v1.1.0
|
||||||
|
|
||||||
|
* **Mermaid 修复优化**: 改进了正则表达式以处理节点标签中的嵌套括号(如 `ID("标签 (文本)")`),并避免误匹配连接线上的文字。
|
||||||
|
* **HTML 保护机制优化**: 优化了 `_contains_html` 检测,允许 `<br/>`, `<b>`, `<i>` 等常见标签,确保包含这些标签的 Mermaid 图表能被正常规范化。
|
||||||
|
* **全角符号清理**: 修复了 `FULLWIDTH_MAP` 中的重复键名和错误的引号映射。
|
||||||
|
* **Bug 修复**: 修复了 Python 文件中缺失的 `Dict` 类型导入。
|
||||||
|
|
||||||
## 许可证
|
## 许可证
|
||||||
|
|
||||||
|
|||||||
@@ -124,10 +124,6 @@ Each plugin should include:
|
|||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
> **Note**: For detailed information about each plugin type, see the respective README files in each plugin type directory.
|
> **Note**: For detailed information about each plugin type, see the respective README files in each plugin type directory.
|
||||||
|
|||||||
@@ -124,10 +124,6 @@ plugins/
|
|||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## 许可证
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
> **注意**:有关每种插件类型的详细信息,请参阅每个插件类型目录中的相应 README 文件。
|
> **注意**:有关每种插件类型的详细信息,请参阅每个插件类型目录中的相应 README 文件。
|
||||||
|
|||||||
@@ -230,7 +230,3 @@ except Exception as e:
|
|||||||
|
|
||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|||||||
@@ -229,7 +229,3 @@ except Exception as e:
|
|||||||
|
|
||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## 许可证
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# 🌊 Deep Dive
|
# 🌊 Deep Dive
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.0.0 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.0.0 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
|
|
||||||
A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.
|
A comprehensive thinking lens that dives deep into any content - from context to logic, insights, and action paths.
|
||||||
|
|
||||||
@@ -81,3 +81,10 @@ The plugin generates a structured thinking timeline:
|
|||||||
|
|
||||||
- `deep_dive.py` - English version
|
- `deep_dive.py` - English version
|
||||||
- `deep_dive_cn.py` - Chinese version (精读)
|
- `deep_dive_cn.py` - Chinese version (精读)
|
||||||
|
|
||||||
|
## Troubleshooting ❓
|
||||||
|
|
||||||
|
- **Plugin not working?**: Check if the filter/action is enabled in the model settings.
|
||||||
|
- **Debug Logs**: Enable `SHOW_STATUS` in Valves to see progress updates.
|
||||||
|
- **Error Messages**: If you see an error, please copy the full error message and report it.
|
||||||
|
- **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# 📖 精读
|
# 📖 精读
|
||||||
|
|
||||||
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 1.0.0 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 1.0.0 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **许可证:** MIT
|
||||||
|
|
||||||
全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。
|
全方位的思维透镜 —— 从背景全景到逻辑脉络,从深度洞察到行动路径。
|
||||||
|
|
||||||
@@ -81,3 +81,10 @@
|
|||||||
|
|
||||||
- `deep_dive.py` - 英文版 (Deep Dive)
|
- `deep_dive.py` - 英文版 (Deep Dive)
|
||||||
- `deep_dive_cn.py` - 中文版 (精读)
|
- `deep_dive_cn.py` - 中文版 (精读)
|
||||||
|
|
||||||
|
## 故障排除 (Troubleshooting) ❓
|
||||||
|
|
||||||
|
- **插件不工作?**: 请检查是否在模型设置中启用了该过滤器/动作。
|
||||||
|
- **调试日志**: 在 Valves 中启用 `SHOW_STATUS` 以查看进度更新。
|
||||||
|
- **错误信息**: 如果看到错误,请复制完整的错误信息并报告。
|
||||||
|
- **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# 📝 Export to Word (Enhanced)
|
# 📝 Export to Word (Enhanced)
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 0.4.3 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 0.4.3 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
|
|
||||||
Export conversation to Word (.docx) with **syntax highlighting**, **native math equations**, **Mermaid diagrams**, **citations**, and **enhanced table formatting**.
|
Export conversation to Word (.docx) with **syntax highlighting**, **native math equations**, **Mermaid diagrams**, **citations**, and **enhanced table formatting**.
|
||||||
|
|
||||||
@@ -86,3 +86,10 @@ Export conversation to Word (.docx) with **syntax highlighting**, **native math
|
|||||||
- **Font & Style Configuration**: Customizable fonts and table colors.
|
- **Font & Style Configuration**: Customizable fonts and table colors.
|
||||||
- **Mermaid Enhancements**: Hybrid SVG+PNG rendering, background color config.
|
- **Mermaid Enhancements**: Hybrid SVG+PNG rendering, background color config.
|
||||||
- **Performance**: Real-time progress updates for large exports.
|
- **Performance**: Real-time progress updates for large exports.
|
||||||
|
|
||||||
|
## Troubleshooting ❓
|
||||||
|
|
||||||
|
- **Plugin not working?**: Check if the filter/action is enabled in the model settings.
|
||||||
|
- **Debug Logs**: Check the browser console (F12) for detailed logs if available.
|
||||||
|
- **Error Messages**: If you see an error, please copy the full error message and report it.
|
||||||
|
- **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# 📝 导出为 Word (增强版)
|
# 📝 导出为 Word (增强版)
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 0.4.3 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 0.4.3 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **许可证:** MIT
|
||||||
|
|
||||||
将对话导出为 Word (.docx),支持**代码语法高亮**、**原生数学公式**、**Mermaid 图表**、**引用参考**和**增强表格格式**。
|
将对话导出为 Word (.docx),支持**代码语法高亮**、**原生数学公式**、**Mermaid 图表**、**引用参考**和**增强表格格式**。
|
||||||
|
|
||||||
@@ -86,3 +86,10 @@
|
|||||||
- **字体与样式配置**: 支持自定义中英文字体、代码字体以及表格颜色。
|
- **字体与样式配置**: 支持自定义中英文字体、代码字体以及表格颜色。
|
||||||
- **Mermaid 增强**: 混合 SVG+PNG 渲染,支持背景色配置。
|
- **Mermaid 增强**: 混合 SVG+PNG 渲染,支持背景色配置。
|
||||||
- **性能优化**: 导出大型文档时提供实时进度反馈。
|
- **性能优化**: 导出大型文档时提供实时进度反馈。
|
||||||
|
|
||||||
|
## 故障排除 (Troubleshooting) ❓
|
||||||
|
|
||||||
|
- **插件不工作?**: 请检查是否在模型设置中启用了该过滤器/动作。
|
||||||
|
- **调试日志**: 请查看浏览器控制台 (F12) 获取详细日志(如果可用)。
|
||||||
|
- **错误信息**: 如果看到错误,请复制完整的错误信息并报告。
|
||||||
|
- **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|||||||
@@ -2,9 +2,18 @@
|
|||||||
|
|
||||||
Generate polished learning flashcards from any text—title, summary, key points, tags, and category—ready for review and sharing.
|
Generate polished learning flashcards from any text—title, summary, key points, tags, and category—ready for review and sharing.
|
||||||
|
|
||||||
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 0.2.4 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
|
|
||||||
|
## Preview 📸
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Highlights
|
## What's New
|
||||||
|
|
||||||
|
### v0.2.4
|
||||||
|
- **Clean Output**: Removed debug messages from output.
|
||||||
|
|
||||||
|
## Key Features 🔑
|
||||||
|
|
||||||
- **One-click generation**: Drop in text, get a structured card.
|
- **One-click generation**: Drop in text, get a structured card.
|
||||||
- **Concise extraction**: 3–5 key points and 2–4 tags automatically surfaced.
|
- **Concise extraction**: 3–5 key points and 2–4 tags automatically surfaced.
|
||||||
@@ -12,7 +21,14 @@ Generate polished learning flashcards from any text—title, summary, key points
|
|||||||
- **Progressive merge**: Multiple runs append cards into the same HTML container; enable clearing to reset.
|
- **Progressive merge**: Multiple runs append cards into the same HTML container; enable clearing to reset.
|
||||||
- **Status updates**: Live notifications for generating/done/error.
|
- **Status updates**: Live notifications for generating/done/error.
|
||||||
|
|
||||||
## Parameters
|
## How to Use 🛠️
|
||||||
|
|
||||||
|
1. **Install**: Add the plugin to your OpenWebUI instance.
|
||||||
|
2. **Configure**: Adjust settings in the Valves menu (optional).
|
||||||
|
3. **Trigger**: Send text to the chat.
|
||||||
|
4. **Result**: Watch status updates; the card HTML is embedded into the latest message.
|
||||||
|
|
||||||
|
## Configuration (Valves) ⚙️
|
||||||
|
|
||||||
| Param | Description | Default |
|
| Param | Description | Default |
|
||||||
| ------------------- | ------------------------------------------------------------ | ------- |
|
| ------------------- | ------------------------------------------------------------ | ------- |
|
||||||
@@ -23,34 +39,9 @@ Generate polished learning flashcards from any text—title, summary, key points
|
|||||||
| CLEAR_PREVIOUS_HTML | Whether to clear previous card HTML (otherwise append/merge) | false |
|
| CLEAR_PREVIOUS_HTML | Whether to clear previous card HTML (otherwise append/merge) | false |
|
||||||
| MESSAGE_COUNT | Use the latest N messages to build the card | 1 |
|
| MESSAGE_COUNT | Use the latest N messages to build the card | 1 |
|
||||||
|
|
||||||
## How to Use
|
## Troubleshooting ❓
|
||||||
|
|
||||||
1. Install and enable “Flash Card”.
|
- **Plugin not working?**: Check if the filter/action is enabled in the model settings.
|
||||||
2. Send the text to the chat (multi-turn supported; governed by MESSAGE_COUNT).
|
- **Debug Logs**: Enable `SHOW_STATUS` in Valves to see progress updates.
|
||||||
3. Watch status updates; the card HTML is embedded into the latest message.
|
- **Error Messages**: If you see an error, please copy the full error message and report it.
|
||||||
4. To regenerate from scratch, toggle CLEAR_PREVIOUS_HTML or resend text.
|
- **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
## Output Format
|
|
||||||
|
|
||||||
- JSON fields: `title`, `summary`, `key_points` (3–5), `tags` (2–4), `category`.
|
|
||||||
- UI: gradient-styled card with tags, key-point list; supports stacking multiple cards.
|
|
||||||
|
|
||||||
## Tips
|
|
||||||
|
|
||||||
- Very short text triggers a prompt to add more; consider summarizing first.
|
|
||||||
- Long text is accepted; for deep analysis, pre-condense with other tools before card creation.
|
|
||||||
|
|
||||||
## Author
|
|
||||||
|
|
||||||
Fu-Jie
|
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
## Changelog
|
|
||||||
|
|
||||||
### v0.2.4
|
|
||||||
|
|
||||||
- Removed debug messages from output
|
|
||||||
|
|||||||
@@ -2,9 +2,18 @@
|
|||||||
|
|
||||||
快速将文本提炼为精美的学习记忆卡片,自动抽取标题、摘要、关键要点、标签和分类,适合复习与分享。
|
快速将文本提炼为精美的学习记忆卡片,自动抽取标题、摘要、关键要点、标签和分类,适合复习与分享。
|
||||||
|
|
||||||
|
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 0.2.4 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **许可证:** MIT
|
||||||
|
|
||||||
|
## 预览 📸
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 功能亮点
|
## 更新日志
|
||||||
|
|
||||||
|
### v0.2.4
|
||||||
|
- **输出优化**: 移除输出中的调试信息。
|
||||||
|
|
||||||
|
## 核心特性 🔑
|
||||||
|
|
||||||
- **一键生成**:输入任意文本,直接产出结构化卡片。
|
- **一键生成**:输入任意文本,直接产出结构化卡片。
|
||||||
- **要点聚合**:自动提取 3-5 个记忆要点与 2-4 个标签。
|
- **要点聚合**:自动提取 3-5 个记忆要点与 2-4 个标签。
|
||||||
@@ -12,7 +21,14 @@
|
|||||||
- **渐进合并**:多次调用会将新卡片合并到同一 HTML 容器中;如需重置可启用清空选项。
|
- **渐进合并**:多次调用会将新卡片合并到同一 HTML 容器中;如需重置可启用清空选项。
|
||||||
- **状态提示**:实时推送“生成中/完成/错误”等状态与通知。
|
- **状态提示**:实时推送“生成中/完成/错误”等状态与通知。
|
||||||
|
|
||||||
## 参数说明
|
## 使用方法 🛠️
|
||||||
|
|
||||||
|
1. **安装**: 在插件市场安装并启用“闪记卡”。
|
||||||
|
2. **配置**: 根据需要调整 Valves 设置(可选)。
|
||||||
|
3. **触发**: 将待整理的文本发送到聊天框。
|
||||||
|
4. **结果**: 等待状态提示,卡片将以 HTML 形式嵌入到最新消息中。
|
||||||
|
|
||||||
|
## 配置参数 (Valves) ⚙️
|
||||||
|
|
||||||
| 参数 | 说明 | 默认值 |
|
| 参数 | 说明 | 默认值 |
|
||||||
| ------------------- | ------------------------------------- | ------ |
|
| ------------------- | ------------------------------------- | ------ |
|
||||||
@@ -23,34 +39,9 @@
|
|||||||
| CLEAR_PREVIOUS_HTML | 是否清空旧的卡片 HTML(否则合并追加) | false |
|
| CLEAR_PREVIOUS_HTML | 是否清空旧的卡片 HTML(否则合并追加) | false |
|
||||||
| MESSAGE_COUNT | 取最近 N 条消息生成卡片 | 1 |
|
| MESSAGE_COUNT | 取最近 N 条消息生成卡片 | 1 |
|
||||||
|
|
||||||
## 使用步骤
|
## 故障排除 (Troubleshooting) ❓
|
||||||
|
|
||||||
1. 在插件市场安装并启用“闪记卡”。
|
- **插件不工作?**: 请检查是否在模型设置中启用了该过滤器/动作。
|
||||||
2. 将待整理的文本发送到聊天框(可多轮对话,受 MESSAGE_COUNT 控制)。
|
- **调试日志**: 在 Valves 中启用 `SHOW_STATUS` 以查看进度更新。
|
||||||
3. 等待状态提示,卡片将以 HTML 形式嵌入到最新消息中。
|
- **错误信息**: 如果看到错误,请复制完整的错误信息并报告。
|
||||||
4. 若需重新生成,开启 CLEAR_PREVIOUS_HTML 或直接重发文本。
|
- **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
## 输出格式
|
|
||||||
|
|
||||||
- JSON 字段:`title`、`summary`、`key_points`(3-5 条)、`tags`(2-4 条)、`category`。
|
|
||||||
- 前端呈现:单卡片带渐变主题、标签胶囊、要点列表,可连续追加多张卡片。
|
|
||||||
|
|
||||||
## 使用建议
|
|
||||||
|
|
||||||
- 文本过短会提醒补充,可先汇总再生成卡片。
|
|
||||||
- 长文本无需截断,直接生成;如需深度分析可先用其他工具精炼后再制作卡片。
|
|
||||||
|
|
||||||
## 作者
|
|
||||||
|
|
||||||
Fu-Jie
|
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
|
||||||
|
|
||||||
## 许可证
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
## 更新日志
|
|
||||||
|
|
||||||
### v0.2.4
|
|
||||||
|
|
||||||
- 移除输出中的调试信息
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# 📊 Smart Infographic (AntV)
|
# 📊 Smart Infographic (AntV)
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.4.9 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.4.9 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
|
|
||||||
An Open WebUI plugin powered by the AntV Infographic engine. It transforms long text into professional, beautiful infographics with a single click.
|
An Open WebUI plugin powered by the AntV Infographic engine. It transforms long text into professional, beautiful infographics with a single click.
|
||||||
|
|
||||||
@@ -56,6 +56,14 @@ You can adjust the following parameters in the plugin settings to optimize the g
|
|||||||
| **Hierarchy** | `hierarchy-tree-tech-style-capsule-item`, `hierarchy-structure` | Org Charts, Structures |
|
| **Hierarchy** | `hierarchy-tree-tech-style-capsule-item`, `hierarchy-structure` | Org Charts, Structures |
|
||||||
| **Charts** | `chart-column-simple`, `chart-bar-plain-text`, `chart-line-plain-text`, `chart-wordcloud` | Trends, Distributions, Metrics |
|
| **Charts** | `chart-column-simple`, `chart-bar-plain-text`, `chart-line-plain-text`, `chart-wordcloud` | Trends, Distributions, Metrics |
|
||||||
|
|
||||||
|
## Troubleshooting ❓
|
||||||
|
|
||||||
|
- **Plugin not working?**: Check if the filter/action is enabled in the model settings.
|
||||||
|
- **Debug Logs**: Enable `SHOW_STATUS` in Valves to see progress updates.
|
||||||
|
- **Error Messages**: If you see an error, please copy the full error message and report it.
|
||||||
|
- **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
|
|
||||||
## 📝 Syntax Example (For Advanced Users)
|
## 📝 Syntax Example (For Advanced Users)
|
||||||
|
|
||||||
You can also input this syntax directly for AI to render:
|
You can also input this syntax directly for AI to render:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# 📊 智能信息图 (AntV Infographic)
|
# 📊 智能信息图 (AntV Infographic)
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.4.9 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui)
|
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 1.4.9 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **许可证:** MIT
|
||||||
|
|
||||||
基于 AntV Infographic 引擎的 Open WebUI 插件,能够将长文本内容一键转换为专业、美观的信息图表。
|
基于 AntV Infographic 引擎的 Open WebUI 插件,能够将长文本内容一键转换为专业、美观的信息图表。
|
||||||
|
|
||||||
@@ -56,6 +56,14 @@
|
|||||||
| **层级与结构** | `hierarchy-tree-tech-style-capsule-item`, `hierarchy-structure` | 组织架构、层级关系 |
|
| **层级与结构** | `hierarchy-tree-tech-style-capsule-item`, `hierarchy-structure` | 组织架构、层级关系 |
|
||||||
| **图表与数据** | `chart-column-simple`, `chart-bar-plain-text`, `chart-line-plain-text`, `chart-wordcloud` | 数据趋势、比例分布、数值对比 |
|
| **图表与数据** | `chart-column-simple`, `chart-bar-plain-text`, `chart-line-plain-text`, `chart-wordcloud` | 数据趋势、比例分布、数值对比 |
|
||||||
|
|
||||||
|
## 故障排除 (Troubleshooting) ❓
|
||||||
|
|
||||||
|
- **插件不工作?**: 请检查是否在模型设置中启用了该过滤器/动作。
|
||||||
|
- **调试日志**: 在 Valves 中启用 `SHOW_STATUS` 以查看进度更新。
|
||||||
|
- **错误信息**: 如果看到错误,请复制完整的错误信息并报告。
|
||||||
|
- **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
|
|
||||||
## 📝 语法示例 (高级用户)
|
## 📝 语法示例 (高级用户)
|
||||||
|
|
||||||
你也可以直接输入以下语法让 AI 渲染:
|
你也可以直接输入以下语法让 AI 渲染:
|
||||||
|
|||||||
@@ -1,65 +0,0 @@
|
|||||||
# 📊 Smart Infographic (AntV)
|
|
||||||
|
|
||||||
An Open WebUI plugin powered by the AntV Infographic engine. It transforms long text into professional, beautiful infographics with a single click.
|
|
||||||
|
|
||||||
## ✨ Key Features
|
|
||||||
|
|
||||||
- 🚀 **AI-Powered Transformation**: Automatically analyzes text logic, extracts key points, and generates structured charts.
|
|
||||||
- 🎨 **Professional Templates**: Includes various AntV official templates: Lists, Trees, Mindmaps, Comparison Tables, Flowcharts, and Statistical Charts.
|
|
||||||
- 🔍 **Auto-Icon Matching**: Built-in logic to search and match the most relevant Material Design Icons based on content.
|
|
||||||
- 📥 **Multi-Format Export**: Download your infographics as **SVG**, **PNG**, or a **Standalone HTML** file.
|
|
||||||
- 🌈 **Highly Customizable**: Supports Dark/Light modes, auto-adapts theme colors, with bold titles and refined card layouts.
|
|
||||||
- 📱 **Responsive Design**: Generated charts look great on both desktop and mobile devices.
|
|
||||||
|
|
||||||
## 🛠️ Supported Template Types
|
|
||||||
|
|
||||||
| Category | Template Name | Use Case |
|
|
||||||
| :--- | :--- | :--- |
|
|
||||||
| **Lists & Hierarchy** | `list-grid`, `tree-vertical`, `mindmap` | Features, Org Charts, Brainstorming |
|
|
||||||
| **Sequence & Relation** | `sequence-roadmap`, `relation-circle` | Roadmaps, Circular Flows, Steps |
|
|
||||||
| **Comparison & Analysis** | `compare-binary`, `compare-swot`, `quadrant-quarter` | Pros/Cons, SWOT, Quadrants |
|
|
||||||
| **Charts & Data** | `chart-bar`, `chart-line`, `chart-pie` | Trends, Distributions, Metrics |
|
|
||||||
|
|
||||||
## 🚀 How to Use
|
|
||||||
|
|
||||||
1. **Install**: Search for "Smart Infographic" in the Open WebUI Community and install.
|
|
||||||
2. **Trigger**: Enter your text in the chat, then click the **Action Button** (📊 icon) next to the input box.
|
|
||||||
3. **AI Processing**: The AI analyzes the text and generates the infographic syntax.
|
|
||||||
4. **Preview & Download**: Preview the result and use the download buttons below to save your infographic.
|
|
||||||
|
|
||||||
## ⚙️ Configuration (Valves)
|
|
||||||
|
|
||||||
You can adjust the following parameters in the plugin settings to optimize the generation:
|
|
||||||
|
|
||||||
| Parameter | Default | Description |
|
|
||||||
| :--- | :--- | :--- |
|
|
||||||
| **Show Status (SHOW_STATUS)** | `True` | Whether to show real-time AI analysis and generation status in the chat. |
|
|
||||||
| **Model ID (MODEL_ID)** | `Empty` | Specify the LLM model for text analysis. If empty, the current chat model is used. |
|
|
||||||
| **Min Text Length (MIN_TEXT_LENGTH)** | `100` | Minimum characters required to trigger analysis, preventing accidental triggers on short text. |
|
|
||||||
| **Clear Previous (CLEAR_PREVIOUS_HTML)** | `False` | Whether to clear previous charts. If `False`, new charts will be appended below. |
|
|
||||||
| **Message Count (MESSAGE_COUNT)** | `1` | Number of recent messages to use for analysis. Increase this for more context. |
|
|
||||||
|
|
||||||
## 📝 Syntax Example (For Advanced Users)
|
|
||||||
|
|
||||||
You can also input this syntax directly for AI to render:
|
|
||||||
|
|
||||||
```infographic
|
|
||||||
infographic list-grid
|
|
||||||
data
|
|
||||||
title 🚀 Plugin Benefits
|
|
||||||
desc Why use the Smart Infographic plugin
|
|
||||||
items
|
|
||||||
- label Fast Generation
|
|
||||||
desc Convert text to charts in seconds
|
|
||||||
- label Beautiful Design
|
|
||||||
desc Uses AntV professional design standards
|
|
||||||
```
|
|
||||||
|
|
||||||
## 👨💻 Author
|
|
||||||
|
|
||||||
**jeff**
|
|
||||||
- GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
|
||||||
|
|
||||||
## 📄 License
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 162 KiB After Width: | Height: | Size: 234 KiB |
@@ -1,14 +1,10 @@
|
|||||||
# Smart Mind Map - Mind Mapping Generation Plugin
|
# Smart Mind Map - Mind Mapping Generation Plugin
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 0.9.1 | **License:** MIT
|
|
||||||
|
|
||||||
> **Important**: To ensure the maintainability and usability of all plugins, each plugin should be accompanied by clear and comprehensive documentation to ensure its functionality, configuration, and usage are well explained.
|
|
||||||
|
|
||||||
Smart Mind Map is a powerful OpenWebUI action plugin that intelligently analyzes long-form text content and automatically generates interactive mind maps, helping users structure and visualize knowledge.
|
Smart Mind Map is a powerful OpenWebUI action plugin that intelligently analyzes long-form text content and automatically generates interactive mind maps, helping users structure and visualize knowledge.
|
||||||
|
|
||||||
---
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 0.9.1 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
|
|
||||||
## 🔥 What's New in v0.9.1
|
## What's New in v0.9.1
|
||||||
|
|
||||||
**New Feature: Image Output Mode**
|
**New Feature: Image Output Mode**
|
||||||
|
|
||||||
@@ -18,362 +14,51 @@ Smart Mind Map is a powerful OpenWebUI action plugin that intelligently analyzes
|
|||||||
- **Efficient Storage**: Image mode uploads SVG to `/api/v1/files`, avoiding huge base64 strings in chat history.
|
- **Efficient Storage**: Image mode uploads SVG to `/api/v1/files`, avoiding huge base64 strings in chat history.
|
||||||
- **Smart Features**: Auto-responsive width and automatic theme detection (light/dark) for generated images.
|
- **Smart Features**: Auto-responsive width and automatic theme detection (light/dark) for generated images.
|
||||||
|
|
||||||
| Feature | HTML Mode (Default) | Image Mode |
|
## Key Features 🔑
|
||||||
| :--- | :--- | :--- |
|
|
||||||
| **Output Format** | Interactive HTML Block | Static Markdown Image |
|
|
||||||
| **Interactivity** | Zoom, Pan, Expand/Collapse | None (Static Image) |
|
|
||||||
| **Chat History** | Contains HTML Code | Clean (Image URL only) |
|
|
||||||
| **Storage** | Browser Rendering | `/api/v1/files` Upload |
|
|
||||||
|
|
||||||
---
|
- ✅ **Intelligent Text Analysis**: Automatically identifies core themes, key concepts, and hierarchical structures.
|
||||||
|
- ✅ **Interactive Visualization**: Generates beautiful interactive mind maps based on Markmap.js.
|
||||||
|
- ✅ **High-Resolution PNG Export**: Export mind maps as high-quality PNG images (9x scale).
|
||||||
|
- ✅ **Complete Control Panel**: Zoom controls, expand level selection, and fullscreen mode.
|
||||||
|
- ✅ **Theme Switching**: Manual theme toggle button with automatic theme detection.
|
||||||
|
- ✅ **Image Output Mode**: Generate static SVG images embedded directly in Markdown for cleaner history.
|
||||||
|
|
||||||
## Core Features
|
## How to Use 🛠️
|
||||||
|
|
||||||
- ✅ **Intelligent Text Analysis**: Automatically identifies core themes, key concepts, and hierarchical structures
|
1. **Install**: Upload the `smart_mind_map.py` file in OpenWebUI Admin Settings -> Plugins -> Actions.
|
||||||
- ✅ **Interactive Visualization**: Generates beautiful interactive mind maps based on Markmap.js
|
2. **Configure**: Ensure you have an LLM model configured (e.g., `gemini-2.5-flash`).
|
||||||
- ✅ **High-Resolution PNG Export**: Export mind maps as high-quality PNG images (9x scale, ~1-2MB file size)
|
3. **Trigger**: Enable the "Smart Mind Map" action in chat settings and send text (at least 100 characters).
|
||||||
- ✅ **Complete Control Panel**: Zoom controls (+/-/reset), expand level selection (All/2/3 levels), and fullscreen mode
|
4. **Result**: The mind map will be rendered directly in the chat interface.
|
||||||
- ✅ **Theme Switching**: Manual theme toggle button (light/dark) with automatic theme detection
|
|
||||||
- ✅ **Dark Mode Support**: Full dark mode support with automatic detection and manual override
|
|
||||||
- ✅ **Multi-language Support**: Automatically adjusts output based on user language
|
|
||||||
- ✅ **Real-time Rendering**: Renders mind maps directly in the chat interface without navigation
|
|
||||||
- ✅ **Export Capabilities**: Supports PNG, SVG code, and Markdown source export
|
|
||||||
- ✅ **Customizable Configuration**: Configurable LLM model, minimum text length, and other parameters
|
|
||||||
- ✅ **Image Output Mode**: Generate static SVG images embedded directly in Markdown (**No HTML code output**, cleaner chat history)
|
|
||||||
|
|
||||||
---
|
## Configuration (Valves) ⚙️
|
||||||
|
|
||||||
## How It Works
|
|
||||||
|
|
||||||
1. **Text Extraction**: Extracts text content from user messages (automatically filters HTML code blocks)
|
|
||||||
2. **Intelligent Analysis**: Analyzes text structure using the configured LLM model
|
|
||||||
3. **Markdown Generation**: Converts analysis results to Markmap-compatible Markdown format
|
|
||||||
4. **Visual Rendering**: Renders the mind map using Markmap.js in an HTML template with optimized font hierarchy (H1: 22px bold, H2: 18px bold)
|
|
||||||
5. **Interactive Display**: Presents the mind map to users in an interactive format with complete control panel
|
|
||||||
6. **Theme Detection**: Automatically detects and applies the current OpenWebUI theme (light/dark mode)
|
|
||||||
7. **Export Options**: Provides PNG (high-resolution), SVG, and Markdown export functionality
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Installation and Configuration
|
|
||||||
|
|
||||||
### 1. Plugin Installation
|
|
||||||
|
|
||||||
1. Download the `smart_mind_map_cn.py` file to your local computer
|
|
||||||
2. In OpenWebUI Admin Settings, find the "Plugins" section
|
|
||||||
3. Select "Actions" type
|
|
||||||
4. Upload the downloaded file
|
|
||||||
5. Refresh the page, and the plugin will be available
|
|
||||||
|
|
||||||
### 2. Model Configuration
|
|
||||||
|
|
||||||
The plugin requires access to an LLM model for text analysis. Please ensure:
|
|
||||||
|
|
||||||
- Your OpenWebUI instance has at least one available LLM model configured
|
|
||||||
- Recommended to use fast, economical models (e.g., `gemini-2.5-flash`) for the best experience
|
|
||||||
- Configure the `LLM_MODEL_ID` parameter in the plugin settings
|
|
||||||
|
|
||||||
### 3. Plugin Activation
|
|
||||||
|
|
||||||
Select the "Smart Mind Map" action plugin in chat settings to enable it.
|
|
||||||
|
|
||||||
### 4. Theme Color Consistency (Optional)
|
|
||||||
|
|
||||||
To keep the mind map visually consistent with the OpenWebUI theme colors, enable same-origin access for artifacts in OpenWebUI:
|
|
||||||
|
|
||||||
- **Configuration Location**: In OpenWebUI User Settings: **Interface** → **Artifacts** → **iframe Sandbox Allow Same Origin**
|
|
||||||
- **Enable Option**: Check the "Allow same-origin access for artifacts" / "iframe sandbox allow-same-origin" option
|
|
||||||
- **Sandbox Attributes**: Ensure the iframe's sandbox attribute includes both `allow-same-origin` and `allow-scripts`
|
|
||||||
|
|
||||||
Once enabled, the mind map will automatically detect and apply the current OpenWebUI theme (light/dark) without any manual configuration.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Configuration Parameters
|
|
||||||
|
|
||||||
You can adjust the following parameters in the plugin's settings (Valves):
|
|
||||||
|
|
||||||
| Parameter | Default | Description |
|
| Parameter | Default | Description |
|
||||||
| :--- | :--- | :--- |
|
| :--- | :--- | :--- |
|
||||||
| `show_status` | `true` | Whether to display operation status updates in the chat interface (e.g., "Analyzing..."). |
|
| `show_status` | `true` | Whether to display operation status updates. |
|
||||||
| `LLM_MODEL_ID` | `gemini-2.5-flash` | LLM model ID for text analysis. Recommended to use fast and economical models. |
|
| `LLM_MODEL_ID` | `gemini-2.5-flash` | LLM model ID for text analysis. |
|
||||||
| `MIN_TEXT_LENGTH` | `100` | Minimum text length (in characters) required for mind map analysis. Text that's too short cannot generate valid mind maps. |
|
| `MIN_TEXT_LENGTH` | `100` | Minimum text length required for analysis. |
|
||||||
| `CLEAR_PREVIOUS_HTML` | `false` | Whether to clear previous plugin-generated HTML content when generating a new mind map. |
|
| `CLEAR_PREVIOUS_HTML` | `false` | Whether to clear previous plugin-generated HTML content. |
|
||||||
| `MESSAGE_COUNT` | `1` | Number of recent messages to use for mind map generation (1-5). |
|
| `MESSAGE_COUNT` | `1` | Number of recent messages to use for generation (1-5). |
|
||||||
| `OUTPUT_MODE` | `html` | Output mode: `html` for interactive HTML (default), or `image` to embed as static Markdown image. |
|
| `OUTPUT_MODE` | `html` | Output mode: `html` (interactive) or `image` (static). |
|
||||||
|
|
||||||
---
|
## Troubleshooting ❓
|
||||||
|
|
||||||
## Usage
|
- **Plugin not working?**: Check if the action is enabled in the chat settings.
|
||||||
|
- **Text too short**: Ensure input text contains at least 100 characters.
|
||||||
### Basic Usage
|
- **Rendering failed**: Check browser console for errors related to Markmap.js or D3.js.
|
||||||
|
- **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
1. Enable the "Smart Mind Map" action in chat settings
|
|
||||||
2. Input or paste long-form text content (at least 100 characters) in the conversation
|
|
||||||
3. After sending the message, the plugin will automatically analyze and generate a mind map
|
|
||||||
4. The mind map will be rendered directly in the chat interface
|
|
||||||
|
|
||||||
### Usage Example
|
|
||||||
|
|
||||||
**Input Text:**
|
|
||||||
|
|
||||||
```
|
|
||||||
Artificial Intelligence (AI) is a branch of computer science dedicated to creating systems capable of performing tasks that typically require human intelligence.
|
|
||||||
Main application areas include:
|
|
||||||
1. Machine Learning - Enables computers to learn from data
|
|
||||||
2. Natural Language Processing - Understanding and generating human language
|
|
||||||
3. Computer Vision - Recognizing and processing images
|
|
||||||
4. Robotics - Creating intelligent systems that can interact with the physical world
|
|
||||||
```
|
|
||||||
|
|
||||||
**Generated Result:**
|
|
||||||
The plugin will generate an interactive mind map centered on "Artificial Intelligence", including major application areas and their sub-concepts.
|
|
||||||
|
|
||||||
### Export Features
|
|
||||||
|
|
||||||
Generated mind maps support three export methods:
|
|
||||||
|
|
||||||
1. **Download PNG**: Click the "📥 Download PNG" button to export the mind map as a high-resolution PNG image (9x scale, ~1-2MB file size)
|
|
||||||
2. **Copy SVG Code**: Click the "Copy SVG Code" button to copy the mind map in SVG format to the clipboard
|
|
||||||
3. **Copy Markdown**: Click the "Copy Markdown" button to copy the raw Markdown format to the clipboard
|
|
||||||
|
|
||||||
### Control Panel
|
|
||||||
|
|
||||||
The interactive mind map includes a comprehensive control panel:
|
|
||||||
|
|
||||||
- **Zoom Controls**: `+` (zoom in), `-` (zoom out), `↻` (reset view)
|
|
||||||
- **Expand Level**: Switch between "All", "2 Levels", "3 Levels" to control node expansion depth
|
|
||||||
- **Fullscreen**: Enter fullscreen mode for better viewing experience
|
|
||||||
- **Theme Toggle**: Manually switch between light and dark themes
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Technical Architecture
|
## Technical Architecture
|
||||||
|
|
||||||
### Frontend Rendering
|
- **Markmap.js**: Open-source mind mapping rendering engine.
|
||||||
|
- **PNG Export**: 9x scale factor for print-quality output (~1-2MB file size).
|
||||||
- **Markmap.js**: Open-source mind mapping rendering engine
|
- **Theme Detection**: 4-level priority detection (Manual > Meta > Class > System).
|
||||||
- **D3.js**: Data visualization foundation library
|
- **Security**: XSS protection and input validation.
|
||||||
- **Responsive Design**: Adapts to different screen sizes
|
|
||||||
- **Font Hierarchy**: Optimized typography with H1 (22px bold) and H2 (18px bold) for better readability
|
|
||||||
|
|
||||||
### PNG Export Technology
|
|
||||||
|
|
||||||
- **SVG to Canvas Conversion**: Converts mind map SVG to canvas for PNG export
|
|
||||||
- **ForeignObject Handling**: Properly processes HTML content within SVG elements
|
|
||||||
- **High Resolution**: 9x scale factor for print-quality output (~1-2MB file size)
|
|
||||||
- **Theme Preservation**: Maintains current theme (light/dark) in exported PNG
|
|
||||||
|
|
||||||
### Theme Detection Mechanism
|
|
||||||
|
|
||||||
Automatically detects and applies themes with a 4-level priority:
|
|
||||||
|
|
||||||
1. **Explicit Toggle**: User manually clicks theme toggle button (highest priority)
|
|
||||||
2. **Meta Tag**: Reads `<meta name="theme-color">` from parent document
|
|
||||||
3. **Class/Data-Theme**: Checks `class` or `data-theme` attributes on parent HTML/body
|
|
||||||
4. **System Preference**: Falls back to `prefers-color-scheme` media query
|
|
||||||
|
|
||||||
### Backend Processing
|
|
||||||
|
|
||||||
- **LLM Integration**: Calls configured models via `generate_chat_completion`
|
|
||||||
- **Text Preprocessing**: Automatically filters HTML code blocks, extracts plain text content
|
|
||||||
- **Format Conversion**: Converts LLM output to Markmap-compatible Markdown format
|
|
||||||
|
|
||||||
### Security Enhancements
|
|
||||||
|
|
||||||
- **XSS Protection**: Automatically escapes `</script>` tags to prevent script injection
|
|
||||||
- **Input Validation**: Checks text length to avoid invalid requests
|
|
||||||
- **Non-Bubbling Events**: Button clicks use `stopPropagation()` to prevent navigation interception
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Issue: Plugin Won't Start
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
|
|
||||||
- Check OpenWebUI logs for error messages
|
|
||||||
- Confirm the plugin is correctly uploaded and enabled
|
|
||||||
- Verify OpenWebUI version supports action plugins
|
|
||||||
|
|
||||||
### Issue: Text Content Too Short
|
|
||||||
|
|
||||||
**Symptom:** Prompt shows "Text content is too short for effective analysis"
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
|
|
||||||
- Ensure input text contains at least 100 characters (default configuration)
|
|
||||||
- Lower the `MIN_TEXT_LENGTH` parameter value in plugin settings
|
|
||||||
- Provide more detailed, structured text content
|
|
||||||
|
|
||||||
### Issue: Mind Map Not Generated
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
|
|
||||||
- Check if `LLM_MODEL_ID` is configured correctly
|
|
||||||
- Confirm the configured model is available in OpenWebUI
|
|
||||||
- Review backend logs for LLM call failures
|
|
||||||
- Verify user has sufficient permissions to access the configured model
|
|
||||||
|
|
||||||
### Issue: Mind Map Display Error
|
|
||||||
|
|
||||||
**Symptom:** Shows "⚠️ Mind map rendering failed"
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
|
|
||||||
- Check browser console for error messages
|
|
||||||
- Confirm Markmap.js and D3.js libraries are loading correctly
|
|
||||||
- Verify generated Markdown format conforms to Markmap specifications
|
|
||||||
- Try refreshing the page to re-render
|
|
||||||
|
|
||||||
### Issue: PNG Export Not Working
|
|
||||||
|
|
||||||
**Symptom:** PNG download button doesn't work or produces blank/corrupted images
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
|
|
||||||
- Ensure browser supports HTML5 Canvas API (all modern browsers do)
|
|
||||||
- Check browser console for errors related to `toDataURL()` or canvas rendering
|
|
||||||
- Verify the mind map is fully rendered before clicking export
|
|
||||||
- Try refreshing the page and re-generating the mind map
|
|
||||||
- Use Chrome or Firefox for best PNG export compatibility
|
|
||||||
|
|
||||||
### Issue: Theme Not Auto-Detected
|
|
||||||
|
|
||||||
**Symptom:** Mind map doesn't match OpenWebUI theme colors
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
|
|
||||||
- Enable "iframe Sandbox Allow Same Origin" in OpenWebUI Settings → Interface → Artifacts
|
|
||||||
- Verify the iframe's sandbox attribute includes both `allow-same-origin` and `allow-scripts`
|
|
||||||
- Ensure parent document has `<meta name="theme-color">` tag or theme class/attribute
|
|
||||||
- Use the manual theme toggle button to override automatic detection
|
|
||||||
- Check browser console for cross-origin errors
|
|
||||||
|
|
||||||
### Issue: Export Function Not Working
|
|
||||||
|
|
||||||
**Solution:**
|
|
||||||
|
|
||||||
- Confirm browser supports Clipboard API
|
|
||||||
- Check if browser is blocking clipboard access permissions
|
|
||||||
- Use modern browsers (Chrome, Firefox, Edge, etc.)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
1. **Text Preparation**
|
1. **Text Preparation**: Provide text with clear structure and distinct hierarchies.
|
||||||
- Provide text content with clear structure and distinct hierarchies
|
2. **Model Selection**: Use fast models like `gemini-2.5-flash` for daily use.
|
||||||
- Use paragraphs, lists, and other formatting to help LLM understand text structure
|
3. **Export Quality**: Use PNG for presentations and SVG for further editing.
|
||||||
- Avoid excessively lengthy or unstructured text
|
|
||||||
|
|
||||||
2. **Model Selection**
|
|
||||||
- For daily use, recommend fast models like `gemini-2.5-flash`
|
|
||||||
- For complex text analysis, use more powerful models (e.g., GPT-4)
|
|
||||||
- Balance speed and analysis quality based on needs
|
|
||||||
|
|
||||||
3. **Performance Optimization**
|
|
||||||
- Set `MIN_TEXT_LENGTH` appropriately to avoid processing text that's too short
|
|
||||||
- For particularly long texts, consider summarizing before generating mind maps
|
|
||||||
- Disable `show_status` in production environments to reduce interface updates
|
|
||||||
|
|
||||||
4. **Export Quality**
|
|
||||||
- **PNG Export**: Best for presentations, documents, and sharing (9x resolution suitable for printing)
|
|
||||||
- **SVG Export**: Best for further editing in vector graphics tools (infinite scalability)
|
|
||||||
- **Markdown Export**: Best for version control, collaboration, and regeneration
|
|
||||||
|
|
||||||
5. **Theme Consistency**
|
|
||||||
- Enable same-origin access for automatic theme detection
|
|
||||||
- Use manual theme toggle if automatic detection fails
|
|
||||||
- Export PNG after switching to desired theme for consistent visuals
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
This plugin uses only OpenWebUI's built-in dependencies. **No additional packages need to be installed.**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Changelog
|
|
||||||
|
|
||||||
### v0.9.1
|
|
||||||
|
|
||||||
**New Feature: Image Output Mode**
|
|
||||||
|
|
||||||
- Added `OUTPUT_MODE` configuration parameter with two options:
|
|
||||||
- `html` (default): Interactive HTML mind map with full control panel
|
|
||||||
- `image`: Static SVG image embedded directly in Markdown (uploaded to `/api/v1/files`)
|
|
||||||
- Image mode features:
|
|
||||||
- Auto-responsive width (adapts to chat container)
|
|
||||||
- Automatic theme detection (light/dark)
|
|
||||||
- Persistent storage via Chat API (survives page refresh)
|
|
||||||
- Efficient file storage (no huge base64 strings in chat history)
|
|
||||||
|
|
||||||
**Improvements:**
|
|
||||||
|
|
||||||
- Implemented robust Chat API update mechanism with retry logic
|
|
||||||
- Fixed message persistence using both `messages[]` and `history.messages`
|
|
||||||
- Added Event API for immediate frontend updates
|
|
||||||
- Removed unnecessary `SVG_WIDTH` and `SVG_HEIGHT` parameters (now auto-calculated)
|
|
||||||
|
|
||||||
**Technical Details:**
|
|
||||||
|
|
||||||
- Image mode uses `__event_call__` to execute JavaScript in the browser
|
|
||||||
- SVG is rendered offline, converted to Blob, and uploaded to OpenWebUI Files API
|
|
||||||
- Updates chat message with `/api/v1/files/{id}/content` URL via OpenWebUI Backend-Controlled API flow
|
|
||||||
|
|
||||||
### v0.8.2
|
|
||||||
|
|
||||||
- Removed debug messages from output
|
|
||||||
|
|
||||||
### v0.8.0 (Previous Version)
|
|
||||||
|
|
||||||
**Major Features:**
|
|
||||||
|
|
||||||
- Added high-resolution PNG export (9x scale, ~1-2MB file size)
|
|
||||||
- Implemented complete control panel with zoom controls (+/-/reset)
|
|
||||||
- Added expand level selection (All/2/3 levels)
|
|
||||||
- Integrated fullscreen mode with auto-fit
|
|
||||||
- Added manual theme toggle button (light/dark)
|
|
||||||
- Implemented automatic theme detection with 4-level priority
|
|
||||||
|
|
||||||
**Improvements:**
|
|
||||||
|
|
||||||
- Optimized font hierarchy (H1: 22px bold, H2: 18px bold)
|
|
||||||
- Enhanced dark mode with full theme support
|
|
||||||
- Improved PNG export technology (SVG to Canvas with foreignObject handling)
|
|
||||||
- Added theme preservation in exported PNG images
|
|
||||||
- Enhanced security with non-bubbling button events
|
|
||||||
|
|
||||||
**Bug Fixes:**
|
|
||||||
|
|
||||||
- Fixed theme detection in cross-origin iframes
|
|
||||||
- Resolved PNG export issues with HTML content in SVG
|
|
||||||
- Improved compatibility with OpenWebUI theme system
|
|
||||||
|
|
||||||
### v0.7.2
|
|
||||||
|
|
||||||
- Optimized text extraction logic, automatically filters HTML code blocks
|
|
||||||
- Improved error handling and user feedback
|
|
||||||
- Enhanced export functionality compatibility
|
|
||||||
- Optimized UI styling and interactive experience
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
This plugin is released under the MIT License.
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Welcome to submit issue reports and improvement suggestions! Please visit the project repository: [awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Related Resources
|
|
||||||
|
|
||||||
- [Markmap Official Website](https://markmap.js.org/)
|
|
||||||
- [OpenWebUI Documentation](https://docs.openwebui.com/)
|
|
||||||
- [D3.js Official Website](https://d3js.org/)
|
|
||||||
|
|||||||
@@ -1,14 +1,10 @@
|
|||||||
# 思维导图 - 思维导图生成插件
|
# 思维导图 - 思维导图生成插件
|
||||||
|
|
||||||
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 0.9.1 | **许可证:** MIT
|
|
||||||
|
|
||||||
> **重要提示**:为了确保所有插件的可维护性和易用性,每个插件都应附带清晰、完整的文档,以确保其功能、配置和使用方法得到充分说明。
|
|
||||||
|
|
||||||
思维导图是一个强大的 OpenWebUI 动作插件,能够智能分析长篇文本内容,自动生成交互式思维导图,帮助用户结构化和可视化知识。
|
思维导图是一个强大的 OpenWebUI 动作插件,能够智能分析长篇文本内容,自动生成交互式思维导图,帮助用户结构化和可视化知识。
|
||||||
|
|
||||||
---
|
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 0.9.1 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **许可证:** MIT
|
||||||
|
|
||||||
## 🔥 v0.9.1 更新亮点
|
## v0.9.1 更新亮点
|
||||||
|
|
||||||
**新功能:图片输出模式**
|
**新功能:图片输出模式**
|
||||||
|
|
||||||
@@ -18,362 +14,51 @@
|
|||||||
- **高效存储**:图片模式将 SVG 上传至 `/api/v1/files`,避免聊天记录中出现超长 Base64 字符串。
|
- **高效存储**:图片模式将 SVG 上传至 `/api/v1/files`,避免聊天记录中出现超长 Base64 字符串。
|
||||||
- **智能特性**:生成的图片支持自动响应式宽度和自动主题检测(亮色/暗色)。
|
- **智能特性**:生成的图片支持自动响应式宽度和自动主题检测(亮色/暗色)。
|
||||||
|
|
||||||
| 特性 | HTML 模式 (默认) | 图片模式 |
|
## 核心特性 🔑
|
||||||
| :--- | :--- | :--- |
|
|
||||||
| **输出格式** | 交互式 HTML 代码块 | 静态 Markdown 图片 |
|
|
||||||
| **交互性** | 缩放、拖拽、展开/折叠 | 无 (静态图片) |
|
|
||||||
| **聊天记录** | 包含 HTML 代码 | 简洁 (仅图片链接) |
|
|
||||||
| **存储方式** | 浏览器实时渲染 | `/api/v1/files` 上传 |
|
|
||||||
|
|
||||||
---
|
- ✅ **智能文本分析**:自动识别文本的核心主题、关键概念和层次结构。
|
||||||
|
- ✅ **交互式可视化**:基于 Markmap.js 生成美观的交互式思维导图。
|
||||||
|
- ✅ **高分辨率 PNG 导出**:导出高质量的 PNG 图片(9 倍分辨率)。
|
||||||
|
- ✅ **完整控制面板**:缩放控制、展开层级选择、全屏模式。
|
||||||
|
- ✅ **主题切换**:手动主题切换按钮与自动主题检测。
|
||||||
|
- ✅ **图片输出模式**:生成静态 SVG 图片直接嵌入 Markdown,聊天记录更简洁。
|
||||||
|
|
||||||
## 核心特性
|
## 使用方法 🛠️
|
||||||
|
|
||||||
- ✅ **智能文本分析**:自动识别文本的核心主题、关键概念和层次结构
|
1. **安装**: 在 OpenWebUI 管理员设置 -> 插件 -> 动作中上传 `smart_mind_map_cn.py`。
|
||||||
- ✅ **交互式可视化**:基于 Markmap.js 生成美观的交互式思维导图
|
2. **配置**: 确保配置了 LLM 模型(如 `gemini-2.5-flash`)。
|
||||||
- ✅ **高分辨率 PNG 导出**:导出高质量的 PNG 图片(9 倍分辨率,约 1-2MB 文件大小)
|
3. **触发**: 在聊天设置中启用“思维导图”动作,并发送文本(至少 100 字符)。
|
||||||
- ✅ **完整控制面板**:缩放控制(+/-/重置)、展开层级选择(全部/2级/3级)、全屏模式
|
4. **结果**: 思维导图将在聊天界面中直接渲染显示。
|
||||||
- ✅ **主题切换**:手动主题切换按钮(亮色/暗色)与自动主题检测
|
|
||||||
- ✅ **深色模式支持**:完整的深色模式支持,自动检测与手动覆盖
|
|
||||||
- ✅ **多语言支持**:根据用户语言自动调整输出
|
|
||||||
- ✅ **实时渲染**:在聊天界面中直接渲染思维导图,无需跳转
|
|
||||||
- ✅ **导出功能**:支持 PNG、SVG 代码和 Markdown 源码导出
|
|
||||||
- ✅ **自定义配置**:可配置 LLM 模型、最小文本长度等参数
|
|
||||||
- ✅ **图片输出模式**:生成静态 SVG 图片直接嵌入 Markdown(**不输出 HTML 代码**,聊天记录更简洁)
|
|
||||||
|
|
||||||
---
|
## 配置参数 (Valves) ⚙️
|
||||||
|
|
||||||
## 工作原理
|
|
||||||
|
|
||||||
1. **文本提取**:从用户消息中提取文本内容(自动过滤 HTML 代码块)
|
|
||||||
2. **智能分析**:使用配置的 LLM 模型分析文本结构
|
|
||||||
3. **Markdown 生成**:将分析结果转换为 Markmap 兼容的 Markdown 格式
|
|
||||||
4. **可视化渲染**:在 HTML 模板中使用 Markmap.js 渲染思维导图,并优化字体层级(H1:22px 粗体,H2:18px 粗体)
|
|
||||||
5. **交互展示**:以可交互的形式展示给用户,并提供完整的控制面板
|
|
||||||
6. **主题检测**:自动检测并应用当前 OpenWebUI 的主题(亮色/暗色模式)
|
|
||||||
7. **导出选项**:提供 PNG(高分辨率)、SVG 和 Markdown 导出功能
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 安装与配置
|
|
||||||
|
|
||||||
### 1. 插件安装
|
|
||||||
|
|
||||||
1. 下载 `smart_mind_map_cn.py` 文件到本地
|
|
||||||
2. 在 OpenWebUI 管理员设置中找到"插件"(Plugins)部分
|
|
||||||
3. 选择"动作"(Actions)类型
|
|
||||||
4. 上传下载的文件
|
|
||||||
5. 刷新页面,插件即可使用
|
|
||||||
|
|
||||||
### 2. 模型配置
|
|
||||||
|
|
||||||
插件需要访问 LLM 模型来分析文本。请确保:
|
|
||||||
|
|
||||||
- 您的 OpenWebUI 实例中配置了至少一个可用的 LLM 模型
|
|
||||||
- 推荐使用快速、经济的模型(如 `gemini-2.5-flash`)来获得最佳体验
|
|
||||||
- 在插件设置中配置 `LLM_MODEL_ID` 参数
|
|
||||||
|
|
||||||
### 3. 插件启用
|
|
||||||
|
|
||||||
在聊天设置中选择"思维导图"动作插件即可启用。
|
|
||||||
|
|
||||||
### 4. 主题颜色风格一致性(可选)
|
|
||||||
|
|
||||||
为了使思维导图与 OpenWebUI 主题颜色风格保持一致,需要在 OpenWebUI 中启用 artifact 的同源访问:
|
|
||||||
|
|
||||||
- **配置位置**:在 OpenWebUI 用户设置中找到"界面"→"产物"部分(Settings → Interface → Products/Artifacts)
|
|
||||||
- **启用选项**:勾选 "iframe 沙盒允许同源访问"(Allow same-origin access for artifacts / iframe sandbox allow-same-origin)
|
|
||||||
- **沙箱属性**:确保 iframe 的 sandbox 属性包含 `allow-same-origin` 和 `allow-scripts`
|
|
||||||
|
|
||||||
启用后,思维导图会自动检测并应用 OpenWebUI 的当前主题(亮色/暗色),无需手动配置。
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 配置参数
|
|
||||||
|
|
||||||
您可以在插件的设置(Valves)中调整以下参数:
|
|
||||||
|
|
||||||
| 参数 | 默认值 | 描述 |
|
| 参数 | 默认值 | 描述 |
|
||||||
| :--- | :--- | :--- |
|
| :--- | :--- | :--- |
|
||||||
| `show_status` | `true` | 是否在聊天界面显示操作状态更新(如"正在分析...")。 |
|
| `show_status` | `true` | 是否在聊天界面显示操作状态更新。 |
|
||||||
| `LLM_MODEL_ID` | `gemini-2.5-flash` | 用于文本分析的 LLM 模型 ID。推荐使用快速且经济的模型。 |
|
| `LLM_MODEL_ID` | `gemini-2.5-flash` | 用于文本分析的 LLM 模型 ID。 |
|
||||||
| `MIN_TEXT_LENGTH` | `100` | 进行思维导图分析所需的最小文本长度(字符数)。文本过短将无法生成有效的导图。 |
|
| `MIN_TEXT_LENGTH` | `100` | 进行思维导图分析所需的最小文本长度。 |
|
||||||
| `CLEAR_PREVIOUS_HTML` | `false` | 在生成新的思维导图时,是否清除之前由插件生成的 HTML 内容。 |
|
| `CLEAR_PREVIOUS_HTML` | `false` | 在生成新的思维导图时,是否清除之前的 HTML 内容。 |
|
||||||
| `MESSAGE_COUNT` | `1` | 用于生成思维导图的最近消息数量(1-5)。 |
|
| `MESSAGE_COUNT` | `1` | 用于生成思维导图的最近消息数量(1-5)。 |
|
||||||
| `OUTPUT_MODE` | `html` | 输出模式:`html` 为交互式 HTML(默认),`image` 为嵌入静态 Markdown 图片。 |
|
| `OUTPUT_MODE` | `html` | 输出模式:`html`(交互式)或 `image`(静态图片)。 |
|
||||||
|
|
||||||
---
|
## 故障排除 (Troubleshooting) ❓
|
||||||
|
|
||||||
## 使用方法
|
- **插件无法启动**:检查 OpenWebUI 日志,确认插件已正确上传并启用。
|
||||||
|
- **文本内容过短**:确保输入的文本至少包含 100 个字符。
|
||||||
### 基本使用
|
- **渲染失败**:检查浏览器控制台,确认 Markmap.js 和 D3.js 库是否正确加载。
|
||||||
|
- **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
1. 在聊天设置中启用"思维导图"动作
|
|
||||||
2. 在对话中输入或粘贴长篇文本内容(至少 100 字符)
|
|
||||||
3. 发送消息后,插件会自动分析并生成思维导图
|
|
||||||
4. 思维导图将在聊天界面中直接渲染显示
|
|
||||||
|
|
||||||
### 使用示例
|
|
||||||
|
|
||||||
**输入文本:**
|
|
||||||
|
|
||||||
```
|
|
||||||
人工智能(AI)是计算机科学的一个分支,致力于创建能够执行通常需要人类智能的任务的系统。
|
|
||||||
主要应用领域包括:
|
|
||||||
1. 机器学习 - 使计算机能够从数据中学习
|
|
||||||
2. 自然语言处理 - 理解和生成人类语言
|
|
||||||
3. 计算机视觉 - 识别和处理图像
|
|
||||||
4. 机器人技术 - 创建能够与物理世界交互的智能系统
|
|
||||||
```
|
|
||||||
|
|
||||||
**生成结果:**
|
|
||||||
插件会生成一个以"人工智能"为中心主题的交互式思维导图,包含主要应用领域及其子概念。
|
|
||||||
|
|
||||||
### 导出功能
|
|
||||||
|
|
||||||
生成的思维导图支持三种导出方式:
|
|
||||||
|
|
||||||
1. **下载 PNG**:点击“📥 下载 PNG”按钮,可将思维导图导出为高分辨率 PNG 图片(9 倍分辨率,约 1-2MB 文件大小)
|
|
||||||
2. **复制 SVG 代码**:点击“复制 SVG 代码”按钮,可将思维导图的 SVG 格式复制到剪贴板
|
|
||||||
3. **复制 Markdown**:点击“复制 Markdown”按钮,可将原始 Markdown 格式复制到剪贴板
|
|
||||||
|
|
||||||
### 控制面板
|
|
||||||
|
|
||||||
交互式思维导图包含完整的控制面板:
|
|
||||||
|
|
||||||
- **缩放控制**:`+`(放大)、`-`(缩小)、`↻`(重置视图)
|
|
||||||
- **展开层级**:在“全部”、“2 级”、“3 级”之间切换,控制节点展开深度
|
|
||||||
- **全屏模式**:进入全屏模式,获得更好的查看体验
|
|
||||||
- **主题切换**:手动在亮色和暗色主题之间切换
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 技术架构
|
## 技术架构
|
||||||
|
|
||||||
### 前端渲染
|
- **Markmap.js**:开源的思维导图渲染引擎。
|
||||||
|
- **PNG 导出技术**:9 倍缩放因子,输出打印级质量。
|
||||||
- **Markmap.js**:开源的思维导图渲染引擎
|
- **主题检测机制**:4 级优先级检测(手动 > Meta > Class > 系统)。
|
||||||
- **D3.js**:数据可视化基础库
|
- **安全性增强**:XSS 防护与输入验证。
|
||||||
- **响应式设计**:适配不同屏幕尺寸
|
|
||||||
- **字体层级**:优化的字体排版,H1(22px 粗体)和 H2(18px 粗体),提供更好的可读性
|
|
||||||
|
|
||||||
### PNG 导出技术
|
|
||||||
|
|
||||||
- **SVG 转 Canvas**:将思维导图 SVG 转换为 Canvas 以导出 PNG
|
|
||||||
- **ForeignObject 处理**:正确处理 SVG 元素中的 HTML 内容
|
|
||||||
- **高分辨率**:9 倍缩放因子,输出打印级质量(约 1-2MB 文件大小)
|
|
||||||
- **主题保持**:在导出的 PNG 中保持当前主题(亮色/暗色)
|
|
||||||
|
|
||||||
### 主题检测机制
|
|
||||||
|
|
||||||
自动检测并应用主题,具有 4 级优先级:
|
|
||||||
|
|
||||||
1. **显式切换**:用户手动点击主题切换按钮(最高优先级)
|
|
||||||
2. **Meta 标签**:从父文档读取 `<meta name="theme-color">`
|
|
||||||
3. **Class/Data-Theme**:检查父文档 HTML/body 的 `class` 或 `data-theme` 属性
|
|
||||||
4. **系统偏好**:回退到 `prefers-color-scheme` 媒体查询
|
|
||||||
|
|
||||||
### 后端处理
|
|
||||||
|
|
||||||
- **LLM 集成**:通过 `generate_chat_completion` 调用配置的模型
|
|
||||||
- **文本预处理**:自动过滤 HTML 代码块,提取纯文本内容
|
|
||||||
- **格式转换**:将 LLM 输出转换为 Markmap 兼容的 Markdown 格式
|
|
||||||
|
|
||||||
### 安全性增强
|
|
||||||
|
|
||||||
- **XSS 防护**:自动转义 `</script>` 标签,防止脚本注入
|
|
||||||
- **输入验证**:检查文本长度,避免无效请求
|
|
||||||
- **非冒泡事件**:按钮点击使用 `stopPropagation()` 防止导航拦截
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 故障排除
|
|
||||||
|
|
||||||
### 问题:插件无法启动
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
|
|
||||||
- 检查 OpenWebUI 日志,查看是否有错误信息
|
|
||||||
- 确认插件已正确上传并启用
|
|
||||||
- 验证 OpenWebUI 版本是否支持动作插件
|
|
||||||
|
|
||||||
### 问题:文本内容过短
|
|
||||||
|
|
||||||
**现象:** 提示"文本内容过短,无法进行有效分析"
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
|
|
||||||
- 确保输入的文本至少包含 100 个字符(默认配置)
|
|
||||||
- 可以在插件设置中降低 `MIN_TEXT_LENGTH` 参数值
|
|
||||||
- 提供更详细、结构化的文本内容
|
|
||||||
|
|
||||||
### 问题:思维导图未生成
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
|
|
||||||
- 检查 `LLM_MODEL_ID` 是否配置正确
|
|
||||||
- 确认配置的模型在 OpenWebUI 中可用
|
|
||||||
- 查看后端日志,检查是否有 LLM 调用失败的错误
|
|
||||||
- 验证用户是否有足够的权限访问配置的模型
|
|
||||||
|
|
||||||
### 问题:思维导图显示错误
|
|
||||||
|
|
||||||
**现象:** 显示"⚠️ 思维导图渲染失败"
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
|
|
||||||
- 检查浏览器控制台的错误信息
|
|
||||||
- 确认 Markmap.js 和 D3.js 库是否正确加载
|
|
||||||
- 验证生成的 Markdown 格式是否符合 Markmap 规范
|
|
||||||
- 尝试刷新页面重新渲染
|
|
||||||
|
|
||||||
### 问题:PNG 导出不工作
|
|
||||||
|
|
||||||
**现象:**PNG 下载按钮不工作或生成空白/损坏的图片
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
|
|
||||||
- 确保浏览器支持 HTML5 Canvas API(所有现代浏览器都支持)
|
|
||||||
- 检查浏览器控制台是否有与 `toDataURL()` 或 Canvas 渲染相关的错误
|
|
||||||
- 确保思维导图在点击导出前已完全渲染
|
|
||||||
- 尝试刷新页面并重新生成思维导图
|
|
||||||
- 使用 Chrome 或 Firefox,获得最佳 PNG 导出兼容性
|
|
||||||
|
|
||||||
### 问题:主题未自动检测
|
|
||||||
|
|
||||||
**现象:**思维导图不匹配 OpenWebUI 主题颜色
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
|
|
||||||
- 在 OpenWebUI 设置 → 界面 → 产物中,启用“iframe 沙盒允许同源访问”
|
|
||||||
- 验证 iframe 的 sandbox 属性包含 `allow-same-origin` 和 `allow-scripts`
|
|
||||||
- 确保父文档有 `<meta name="theme-color">` 标签或主题 class/属性
|
|
||||||
- 使用手动主题切换按钮覆盖自动检测
|
|
||||||
- 检查浏览器控制台是否有跨域错误
|
|
||||||
|
|
||||||
### 问题:导出功能不工作
|
|
||||||
|
|
||||||
**解决方案:**
|
|
||||||
|
|
||||||
- 确认浏览器支持剪贴板 API
|
|
||||||
- 检查浏览器是否阻止了剪贴板访问权限
|
|
||||||
- 使用现代浏览器(Chrome、Firefox、Edge 等)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 最佳实践
|
## 最佳实践
|
||||||
|
|
||||||
1. **文本准备**
|
1. **文本准备**:提供结构清晰、层次分明的文本内容。
|
||||||
- 提供结构清晰、层次分明的文本内容
|
2. **模型选择**:日常使用推荐 `gemini-2.5-flash` 等快速模型。
|
||||||
- 使用段落、列表等格式帮助 LLM 理解文本结构
|
3. **导出质量**:PNG 适合演示分享,SVG 适合进一步矢量编辑。
|
||||||
- 避免过于冗长或无结构的文本
|
|
||||||
|
|
||||||
2. **模型选择**
|
|
||||||
- 对于日常使用,推荐 `gemini-2.5-flash` 等快速模型
|
|
||||||
- 对于复杂文本分析,可以使用更强大的模型(如 GPT-4)
|
|
||||||
- 根据需求平衡速度和分析质量
|
|
||||||
|
|
||||||
3. **性能优化**
|
|
||||||
- 合理设置 `MIN_TEXT_LENGTH`,避免处理过短的文本
|
|
||||||
- 对于特别长的文本,考虑先进行摘要再生成思维导图
|
|
||||||
- 在生产环境中关闭 `show_status` 以减少界面更新
|
|
||||||
|
|
||||||
4. **导出质量**
|
|
||||||
- **PNG 导出**:最适合演示、文档和分享(9 倍分辨率适合打印)
|
|
||||||
- **SVG 导出**:最适合在矢量图形工具中进一步编辑(无限缩放)
|
|
||||||
- **Markdown 导出**:最适合版本控制、协作和重新生成
|
|
||||||
|
|
||||||
5. **主题一致性**
|
|
||||||
- 启用同源访问以实现自动主题检测
|
|
||||||
- 如果自动检测失败,使用手动主题切换
|
|
||||||
- 在切换到所需主题后导出 PNG,以保持视觉一致性
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 依赖要求
|
|
||||||
|
|
||||||
本插件仅使用 OpenWebUI 的内置依赖,**无需安装额外的软件包。**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 更新日志
|
|
||||||
|
|
||||||
### v0.9.1
|
|
||||||
|
|
||||||
**新功能:图片输出模式**
|
|
||||||
|
|
||||||
- 新增 `OUTPUT_MODE` 配置参数,支持两种模式:
|
|
||||||
- `html`(默认):交互式 HTML 思维导图,带完整控制面板
|
|
||||||
- `image`:静态 SVG 图片直接嵌入 Markdown(上传至 `/api/v1/files`)
|
|
||||||
- 图片模式特性:
|
|
||||||
- 自动响应式宽度(适应聊天容器)
|
|
||||||
- 自动主题检测(亮色/暗色)
|
|
||||||
- 通过 Chat API 持久化存储(刷新页面后保留)
|
|
||||||
- 高效文件存储(聊天记录中无超长 Base64 字符串)
|
|
||||||
|
|
||||||
**改进项:**
|
|
||||||
|
|
||||||
- 实现健壮的 Chat API 更新机制,带重试逻辑
|
|
||||||
- 修复消息持久化,同时更新 `messages[]` 和 `history.messages`
|
|
||||||
- 添加 Event API 实现即时前端更新
|
|
||||||
- 移除不必要的 `SVG_WIDTH` 和 `SVG_HEIGHT` 参数(现已自动计算)
|
|
||||||
|
|
||||||
**技术细节:**
|
|
||||||
|
|
||||||
- 图片模式使用 `__event_call__` 在浏览器中执行 JavaScript
|
|
||||||
- SVG 离屏渲染,转换为 Blob,并上传至 OpenWebUI Files API
|
|
||||||
- 通过 OpenWebUI Backend-Controlled API 流程更新聊天消息为 `/api/v1/files/{id}/content` URL
|
|
||||||
|
|
||||||
### v0.8.2
|
|
||||||
|
|
||||||
- 移除输出中的调试信息
|
|
||||||
|
|
||||||
### v0.8.0 (Previous Version)
|
|
||||||
|
|
||||||
**主要功能:**
|
|
||||||
|
|
||||||
- 添加高分辨率 PNG 导出(9 倍分辨率,约 1-2MB 文件大小)
|
|
||||||
- 实现完整的控制面板,包含缩放控制(+/-/重置)
|
|
||||||
- 添加展开层级选择(全部/2级/3级)
|
|
||||||
- 集成全屏模式,自动适应
|
|
||||||
- 添加手动主题切换按钮(亮色/暗色)
|
|
||||||
- 实现 4 级优先级的自动主题检测
|
|
||||||
|
|
||||||
**改进项:**
|
|
||||||
|
|
||||||
- 优化字体层级(H1:22px 粗体,H2:18px 粗体)
|
|
||||||
- 增强深色模式,完整的主题支持
|
|
||||||
- 改进 PNG 导出技术(SVG 转 Canvas,处理 foreignObject)
|
|
||||||
- 在导出的 PNG 图片中保持主题
|
|
||||||
- 增强安全性,按钮事件使用非冒泡机制
|
|
||||||
|
|
||||||
**Bug 修复:**
|
|
||||||
|
|
||||||
- 修复跨域 iframe 中的主题检测问题
|
|
||||||
- 解决 SVG 中 HTML 内容的 PNG 导出问题
|
|
||||||
- 改进与 OpenWebUI 主题系统的兼容性
|
|
||||||
|
|
||||||
### v0.7.2
|
|
||||||
|
|
||||||
- 优化文本提取逻辑,自动过滤 HTML 代码块
|
|
||||||
- 改进错误处理和用户反馈
|
|
||||||
- 增强导出功能的兼容性
|
|
||||||
- 优化 UI 样式和交互体验
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 许可证
|
|
||||||
|
|
||||||
本插件采用 MIT 许可证发布。
|
|
||||||
|
|
||||||
## 贡献
|
|
||||||
|
|
||||||
欢迎提交问题报告和改进建议!请访问项目仓库:[awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 相关资源
|
|
||||||
|
|
||||||
- [Markmap 官方网站](https://markmap.js.org/)
|
|
||||||
- [OpenWebUI 文档](https://docs.openwebui.com/)
|
|
||||||
- [D3.js 官方网站](https://d3js.org/)
|
|
||||||
|
|||||||
@@ -48,7 +48,3 @@ When adding a new filter, please follow these steps:
|
|||||||
|
|
||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|||||||
@@ -70,7 +70,3 @@
|
|||||||
|
|
||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## 许可证
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|||||||
@@ -1,9 +1,19 @@
|
|||||||
# Async Context Compression Filter
|
# Async Context Compression Filter
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.1.3 | **License:** MIT
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.2.0 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
|
|
||||||
This filter reduces token consumption in long conversations through intelligent summarization and message compression while keeping conversations coherent.
|
This filter reduces token consumption in long conversations through intelligent summarization and message compression while keeping conversations coherent.
|
||||||
|
|
||||||
|
## What's new in 1.2.0
|
||||||
|
|
||||||
|
- **Preflight Context Check**: Before sending to the model, validates that total tokens fit within the context window. Automatically trims or drops oldest messages if exceeded.
|
||||||
|
- **Structure-Aware Assistant Trimming**: When context exceeds the limit, long AI responses are intelligently collapsed while preserving their structure (headers H1-H6, first line, last line).
|
||||||
|
- **Native Tool Output Trimming**: Detects and trims native tool outputs (`function_calling: "native"`), extracting only the final answer. Enable via `enable_tool_output_trimming`. **Note**: Non-native tool outputs are not fully injected into context.
|
||||||
|
- **Consolidated Status Notifications**: Unified "Context Usage" and "Context Summary Updated" notifications with appended warnings (e.g., `| ⚠️ High Usage`) for clearer feedback.
|
||||||
|
- **Context Usage Warning**: Emits a warning notification when context usage exceeds 90%.
|
||||||
|
- **Enhanced Header Detection**: Optimized regex (`^#{1,6}\s+`) to avoid false positives like `#hashtag`.
|
||||||
|
- **Detailed Token Logging**: Logs now show token breakdown for System, Head, Summary, and Tail sections with total.
|
||||||
|
|
||||||
## What's new in 1.1.3
|
## What's new in 1.1.3
|
||||||
- **Improved Compatibility**: Changed summary injection role from `user` to `assistant` for better compatibility across different LLMs.
|
- **Improved Compatibility**: Changed summary injection role from `user` to `assistant` for better compatibility across different LLMs.
|
||||||
- **Enhanced Stability**: Fixed a race condition in state management that could cause "inlet state not found" warnings in high-concurrency scenarios.
|
- **Enhanced Stability**: Fixed a race condition in state management that could cause "inlet state not found" warnings in high-concurrency scenarios.
|
||||||
@@ -31,6 +41,10 @@ This filter reduces token consumption in long conversations through intelligent
|
|||||||
- ✅ Persistent storage via Open WebUI's shared database connection (PostgreSQL, SQLite, etc.).
|
- ✅ Persistent storage via Open WebUI's shared database connection (PostgreSQL, SQLite, etc.).
|
||||||
- ✅ Flexible retention policy to keep the first and last N messages.
|
- ✅ Flexible retention policy to keep the first and last N messages.
|
||||||
- ✅ Smart injection of historical summaries back into the context.
|
- ✅ Smart injection of historical summaries back into the context.
|
||||||
|
- ✅ Structure-aware trimming that preserves document structure (headers, intro, conclusion).
|
||||||
|
- ✅ Native tool output trimming for cleaner context when using function calling.
|
||||||
|
- ✅ Real-time context usage monitoring with warning notifications (>90%).
|
||||||
|
- ✅ Detailed token logging for precise debugging and optimization.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -64,14 +78,12 @@ It is recommended to keep this filter early in the chain so it runs before filte
|
|||||||
| `max_summary_tokens` | `4000` | Maximum tokens for the generated summary. |
|
| `max_summary_tokens` | `4000` | Maximum tokens for the generated summary. |
|
||||||
| `summary_temperature` | `0.3` | Randomness for summary generation. Lower is more deterministic. |
|
| `summary_temperature` | `0.3` | Randomness for summary generation. Lower is more deterministic. |
|
||||||
| `model_thresholds` | `{}` | Per-model overrides for `compression_threshold_tokens` and `max_context_tokens` (useful for mixed models). |
|
| `model_thresholds` | `{}` | Per-model overrides for `compression_threshold_tokens` and `max_context_tokens` (useful for mixed models). |
|
||||||
|
| `enable_tool_output_trimming` | `false` | When enabled and `function_calling: "native"` is active, trims verbose tool outputs to extract only the final answer. |
|
||||||
| `debug_mode` | `true` | Log verbose debug info. Set to `false` in production. |
|
| `debug_mode` | `true` | Log verbose debug info. Set to `false` in production. |
|
||||||
| `show_debug_log` | `false` | Print debug logs to browser console (F12). Useful for frontend debugging. |
|
| `show_debug_log` | `false` | Print debug logs to browser console (F12). Useful for frontend debugging. |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
- **Database table not created**: Ensure Open WebUI is configured with a database and check Open WebUI logs for errors.
|
|
||||||
- **Summary not generated**: Confirm `compression_threshold_tokens` was hit and `summary_model` is compatible. Review logs for details.
|
|
||||||
- **Initial system prompt is lost**: Keep `keep_first` greater than 0 to protect the initial message.
|
- **Initial system prompt is lost**: Keep `keep_first` greater than 0 to protect the initial message.
|
||||||
- **Compression effect is weak**: Raise `compression_threshold_tokens` or lower `keep_first` / `keep_last` to allow more aggressive compression.
|
- **Compression effect is weak**: Raise `compression_threshold_tokens` or lower `keep_first` / `keep_last` to allow more aggressive compression.
|
||||||
|
- **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|||||||
@@ -1,11 +1,21 @@
|
|||||||
# 异步上下文压缩过滤器
|
# 异步上下文压缩过滤器
|
||||||
|
|
||||||
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 1.1.3 | **许可证:** MIT
|
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 1.2.0 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **许可证:** MIT
|
||||||
|
|
||||||
> **重要提示**:为了确保所有过滤器的可维护性和易用性,每个过滤器都应附带清晰、完整的文档,以确保其功能、配置和使用方法得到充分说明。
|
> **重要提示**:为了确保所有过滤器的可维护性和易用性,每个过滤器都应附带清晰、完整的文档,以确保其功能、配置和使用方法得到充分说明。
|
||||||
|
|
||||||
本过滤器通过智能摘要和消息压缩技术,在保持对话连贯性的同时,显著降低长对话的 Token 消耗。
|
本过滤器通过智能摘要和消息压缩技术,在保持对话连贯性的同时,显著降低长对话的 Token 消耗。
|
||||||
|
|
||||||
|
## 1.2.0 版本更新
|
||||||
|
|
||||||
|
- **预检上下文检查 (Preflight Context Check)**: 在发送给模型之前,验证总 Token 是否符合上下文窗口。如果超出,自动裁剪或丢弃最旧的消息。
|
||||||
|
- **结构感知助手裁剪 (Structure-Aware Assistant Trimming)**: 当上下文超出限制时,智能折叠过长的 AI 回复,同时保留其结构(标题 H1-H6、首行、尾行)。
|
||||||
|
- **原生工具输出裁剪 (Native Tool Output Trimming)**: 检测并裁剪原生工具输出 (`function_calling: "native"`),仅提取最终答案。通过 `enable_tool_output_trimming` 启用。**注意**:非原生工具调用输出不会完整注入上下文。
|
||||||
|
- **统一状态通知**: 统一了“上下文使用情况”和“上下文摘要更新”的通知,并附加警告(例如 `| ⚠️ 高负载`),反馈更清晰。
|
||||||
|
- **上下文使用警告**: 当上下文使用率超过 90% 时发出警告通知。
|
||||||
|
- **增强的标题检测**: 优化了正则表达式 (`^#{1,6}\s+`) 以避免误判(如 `#hashtag`)。
|
||||||
|
- **详细 Token 日志**: 日志现在显示 System、Head、Summary 和 Tail 部分的 Token 细分及总计。
|
||||||
|
|
||||||
## 1.1.3 版本更新
|
## 1.1.3 版本更新
|
||||||
- **兼容性提升**: 将摘要注入角色从 `user` 改为 `assistant`,以提高在不同 LLM 之间的兼容性。
|
- **兼容性提升**: 将摘要注入角色从 `user` 改为 `assistant`,以提高在不同 LLM 之间的兼容性。
|
||||||
- **稳定性增强**: 修复了状态管理中的竞态条件,解决了高并发场景下可能出现的“无法获取 inlet 状态”警告。
|
- **稳定性增强**: 修复了状态管理中的竞态条件,解决了高并发场景下可能出现的“无法获取 inlet 状态”警告。
|
||||||
@@ -33,6 +43,10 @@
|
|||||||
- ✅ **持久化存储**: 复用 Open WebUI 共享数据库连接,自动支持 PostgreSQL/SQLite 等。
|
- ✅ **持久化存储**: 复用 Open WebUI 共享数据库连接,自动支持 PostgreSQL/SQLite 等。
|
||||||
- ✅ **灵活保留策略**: 可配置保留对话头部和尾部消息,确保关键信息连贯。
|
- ✅ **灵活保留策略**: 可配置保留对话头部和尾部消息,确保关键信息连贯。
|
||||||
- ✅ **智能注入**: 将历史摘要智能注入到新上下文中。
|
- ✅ **智能注入**: 将历史摘要智能注入到新上下文中。
|
||||||
|
- ✅ **结构感知裁剪**: 智能折叠过长消息,保留文档骨架(标题、首尾)。
|
||||||
|
- ✅ **原生工具输出裁剪**: 支持裁剪冗长的工具调用输出。
|
||||||
|
- ✅ **实时监控**: 实时监控上下文使用情况,超过 90% 发出警告。
|
||||||
|
- ✅ **详细日志**: 提供精确的 Token 统计日志,便于调试。
|
||||||
|
|
||||||
详细的工作原理和流程请参考 [工作流程指南](WORKFLOW_GUIDE_CN.md)。
|
详细的工作原理和流程请参考 [工作流程指南](WORKFLOW_GUIDE_CN.md)。
|
||||||
|
|
||||||
@@ -100,21 +114,15 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `debug_mode`
|
| 参数 | 默认值 | 描述 |
|
||||||
|
| :----------------------------- | :------- | :-------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
- **默认值**: `true`
|
| `enable_tool_output_trimming` | `false` | 启用时,若 `function_calling: "native"` 激活,将裁剪冗长的工具输出以仅提取最终答案。 |
|
||||||
- **描述**: 是否在 Open WebUI 的控制台日志中打印详细的调试信息(如 Token 计数、压缩进度、数据库操作等)。生产环境建议设为 `false`。
|
| `debug_mode` | `true` | 是否在 Open WebUI 的控制台日志中打印详细的调试信息(如 Token 计数、压缩进度、数据库操作等)。生产环境建议设为 `false`。 |
|
||||||
|
| `show_debug_log` | `false` | 是否在浏览器控制台 (F12) 打印调试日志。便于前端调试。 |
|
||||||
#### `show_debug_log`
|
| `show_token_usage_status` | `true` | 是否在对话结束时显示 Token 使用情况的状态通知。 |
|
||||||
|
|
||||||
- **默认值**: `false`
|
|
||||||
- **描述**: 是否在浏览器控制台 (F12) 打印调试日志。便于前端调试。
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 故障排除
|
|
||||||
|
|
||||||
- **数据库表未创建**:确保 Open WebUI 已配置数据库,并查看日志获取错误信息。
|
|
||||||
- **摘要未生成**:检查是否达到 `compression_threshold_tokens`,确认 `summary_model` 可用,并查看日志。
|
|
||||||
- **初始系统提示丢失**:将 `keep_first` 设置为大于 0。
|
- **初始系统提示丢失**:将 `keep_first` 设置为大于 0。
|
||||||
- **压缩效果不明显**:提高 `compression_threshold_tokens`,或降低 `keep_first` / `keep_last` 以增强压缩力度。
|
- **压缩效果不明显**:提高 `compression_threshold_tokens`,或降低 `keep_first` / `keep_last` 以增强压缩力度。
|
||||||
|
- **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|||||||
@@ -5,10 +5,20 @@ author: Fu-Jie
|
|||||||
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||||
funding_url: https://github.com/open-webui
|
funding_url: https://github.com/open-webui
|
||||||
description: Reduces token consumption in long conversations while maintaining coherence through intelligent summarization and message compression.
|
description: Reduces token consumption in long conversations while maintaining coherence through intelligent summarization and message compression.
|
||||||
version: 1.1.3
|
version: 1.2.0
|
||||||
openwebui_id: b1655bc8-6de9-4cad-8cb5-a6f7829a02ce
|
openwebui_id: b1655bc8-6de9-4cad-8cb5-a6f7829a02ce
|
||||||
license: MIT
|
license: MIT
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
📌 What's new in 1.2.0
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
✅ Preflight Context Check: Validates context fit before sending to model.
|
||||||
|
✅ Structure-Aware Trimming: Collapses long AI responses while keeping H1-H6, intro, and conclusion.
|
||||||
|
✅ Native Tool Output Trimming: Cleaner context when using function calling. (Note: Non-native tool outputs are not fully injected into context)
|
||||||
|
✅ Context Usage Warning: Notification when usage exceeds 90%.
|
||||||
|
✅ Detailed Token Logging: Granular breakdown of System, Head, Summary, and Tail tokens.
|
||||||
|
|
||||||
═══════════════════════════════════════════════════════════════════════════════
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
📌 Overview
|
📌 Overview
|
||||||
═══════════════════════════════════════════════════════════════════════════════
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
@@ -21,6 +31,8 @@ Core Features:
|
|||||||
✅ Persistent storage with database support (PostgreSQL and SQLite)
|
✅ Persistent storage with database support (PostgreSQL and SQLite)
|
||||||
✅ Flexible retention policy (configurable to keep first and last N messages)
|
✅ Flexible retention policy (configurable to keep first and last N messages)
|
||||||
✅ Smart summary injection to maintain context
|
✅ Smart summary injection to maintain context
|
||||||
|
✅ Structure-aware trimming to preserve document skeleton
|
||||||
|
✅ Native tool output trimming for function calling support
|
||||||
|
|
||||||
═══════════════════════════════════════════════════════════════════════════════
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
🔄 Workflow
|
🔄 Workflow
|
||||||
@@ -110,6 +122,10 @@ model_thresholds
|
|||||||
Description: Threshold override configuration for specific models.
|
Description: Threshold override configuration for specific models.
|
||||||
Example: {"gpt-4": {"compression_threshold_tokens": 8000, "max_context_tokens": 32000}}
|
Example: {"gpt-4": {"compression_threshold_tokens": 8000, "max_context_tokens": 32000}}
|
||||||
|
|
||||||
|
enable_tool_output_trimming
|
||||||
|
Default: false
|
||||||
|
Description: When enabled and `function_calling: "native"` is active, trims verbose tool outputs to extract only the final answer.
|
||||||
|
|
||||||
keep_first
|
keep_first
|
||||||
Default: 1
|
Default: 1
|
||||||
Description: Always keep the first N messages of the conversation. Set to 0 to disable. The first message often contains important system prompts.
|
Description: Always keep the first N messages of the conversation. Set to 0 to disable. The first message often contains important system prompts.
|
||||||
@@ -245,6 +261,7 @@ Solution:
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field, model_validator
|
from pydantic import BaseModel, Field, model_validator
|
||||||
from typing import Optional, Dict, Any, List, Union, Callable, Awaitable
|
from typing import Optional, Dict, Any, List, Union, Callable, Awaitable
|
||||||
|
import re
|
||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import hashlib
|
import hashlib
|
||||||
@@ -254,6 +271,7 @@ import contextlib
|
|||||||
# Open WebUI built-in imports
|
# Open WebUI built-in imports
|
||||||
from open_webui.utils.chat import generate_chat_completion
|
from open_webui.utils.chat import generate_chat_completion
|
||||||
from open_webui.models.users import Users
|
from open_webui.models.users import Users
|
||||||
|
from open_webui.models.models import Models
|
||||||
from fastapi.requests import Request
|
from fastapi.requests import Request
|
||||||
from open_webui.main import app as webui_app
|
from open_webui.main import app as webui_app
|
||||||
|
|
||||||
@@ -370,10 +388,6 @@ class Filter:
|
|||||||
self.valves = self.Valves()
|
self.valves = self.Valves()
|
||||||
self._owui_db = owui_db
|
self._owui_db = owui_db
|
||||||
self._db_engine = owui_engine
|
self._db_engine = owui_engine
|
||||||
self._db_engine = owui_engine
|
|
||||||
self._fallback_session_factory = (
|
|
||||||
sessionmaker(bind=self._db_engine) if self._db_engine else None
|
|
||||||
)
|
|
||||||
self._fallback_session_factory = (
|
self._fallback_session_factory = (
|
||||||
sessionmaker(bind=self._db_engine) if self._db_engine else None
|
sessionmaker(bind=self._db_engine) if self._db_engine else None
|
||||||
)
|
)
|
||||||
@@ -494,7 +508,14 @@ class Filter:
|
|||||||
default=True, description="Enable detailed logging for debugging."
|
default=True, description="Enable detailed logging for debugging."
|
||||||
)
|
)
|
||||||
show_debug_log: bool = Field(
|
show_debug_log: bool = Field(
|
||||||
default=False, description="Print debug logs to browser console (F12)"
|
default=False, description="Show debug logs in the frontend console"
|
||||||
|
)
|
||||||
|
show_token_usage_status: bool = Field(
|
||||||
|
default=True, description="Show token usage status notification"
|
||||||
|
)
|
||||||
|
enable_tool_output_trimming: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Enable trimming of large tool outputs (only works with native function calling).",
|
||||||
)
|
)
|
||||||
|
|
||||||
def _save_summary(self, chat_id: str, summary: str, compressed_count: int):
|
def _save_summary(self, chat_id: str, summary: str, compressed_count: int):
|
||||||
@@ -758,6 +779,8 @@ class Filter:
|
|||||||
body: dict,
|
body: dict,
|
||||||
__user__: Optional[dict] = None,
|
__user__: Optional[dict] = None,
|
||||||
__metadata__: dict = None,
|
__metadata__: dict = None,
|
||||||
|
__request__: Request = None,
|
||||||
|
__model__: dict = None,
|
||||||
__event_emitter__: Callable[[Any], Awaitable[None]] = None,
|
__event_emitter__: Callable[[Any], Awaitable[None]] = None,
|
||||||
__event_call__: Callable[[Any], Awaitable[None]] = None,
|
__event_call__: Callable[[Any], Awaitable[None]] = None,
|
||||||
) -> dict:
|
) -> dict:
|
||||||
@@ -765,10 +788,211 @@ class Filter:
|
|||||||
Executed before sending to the LLM.
|
Executed before sending to the LLM.
|
||||||
Compression Strategy: Only responsible for injecting existing summaries, no Token calculation.
|
Compression Strategy: Only responsible for injecting existing summaries, no Token calculation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
messages = body.get("messages", [])
|
messages = body.get("messages", [])
|
||||||
|
|
||||||
|
# --- Native Tool Output Trimming (Opt-in, only for native function calling) ---
|
||||||
|
metadata = body.get("metadata", {})
|
||||||
|
is_native_func_calling = metadata.get("function_calling") == "native"
|
||||||
|
|
||||||
|
if self.valves.enable_tool_output_trimming and is_native_func_calling:
|
||||||
|
trimmed_count = 0
|
||||||
|
|
||||||
|
for msg in messages:
|
||||||
|
content = msg.get("content", "")
|
||||||
|
if not isinstance(content, str):
|
||||||
|
continue
|
||||||
|
|
||||||
|
role = msg.get("role")
|
||||||
|
|
||||||
|
# Only process assistant messages with native tool outputs
|
||||||
|
if role == "assistant":
|
||||||
|
# Detect tool output markers in assistant content
|
||||||
|
if "tool_call_id:" in content or (
|
||||||
|
content.startswith('"') and "\\"" in content
|
||||||
|
):
|
||||||
|
# Always trim tool outputs when enabled
|
||||||
|
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🔍 Native tool output detected in assistant message.",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract the final answer (after last tool call metadata)
|
||||||
|
# Pattern: Matches escaped JSON strings like """...""" followed by newlines
|
||||||
|
# We look for the last occurrence of such a pattern and take everything after it
|
||||||
|
|
||||||
|
# 1. Try matching the specific OpenWebUI tool output format: """..."""
|
||||||
|
# This regex finds the last end-quote of a tool output block
|
||||||
|
tool_output_pattern = r'""".*?"""\s*'
|
||||||
|
|
||||||
|
# Find all matches
|
||||||
|
matches = list(
|
||||||
|
re.finditer(tool_output_pattern, content, re.DOTALL)
|
||||||
|
)
|
||||||
|
|
||||||
|
if matches:
|
||||||
|
# Get the end position of the last match
|
||||||
|
last_match_end = matches[-1].end()
|
||||||
|
|
||||||
|
# Everything after the last tool output is the final answer
|
||||||
|
final_answer = content[last_match_end:].strip()
|
||||||
|
|
||||||
|
if final_answer:
|
||||||
|
msg["content"] = (
|
||||||
|
f"... [Tool outputs trimmed]\n{final_answer}"
|
||||||
|
)
|
||||||
|
trimmed_count += 1
|
||||||
|
else:
|
||||||
|
# Fallback: Try splitting on "Arguments:" if the new format isn't found
|
||||||
|
# (Preserving backward compatibility or different model behaviors)
|
||||||
|
parts = re.split(r"(?:Arguments:\s*\{[^}]+\})\n+", content)
|
||||||
|
if len(parts) > 1:
|
||||||
|
final_answer = parts[-1].strip()
|
||||||
|
if final_answer:
|
||||||
|
msg["content"] = (
|
||||||
|
f"... [Tool outputs trimmed]\n{final_answer}"
|
||||||
|
)
|
||||||
|
trimmed_count += 1
|
||||||
|
|
||||||
|
if trimmed_count > 0 and self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✂️ Trimmed {trimmed_count} tool output message(s).",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
chat_ctx = self._get_chat_context(body, __metadata__)
|
chat_ctx = self._get_chat_context(body, __metadata__)
|
||||||
chat_id = chat_ctx["chat_id"]
|
chat_id = chat_ctx["chat_id"]
|
||||||
|
|
||||||
|
# Extract system prompt for accurate token calculation
|
||||||
|
# 1. For custom models: check DB (Models.get_model_by_id)
|
||||||
|
# 2. For base models: check messages for role='system'
|
||||||
|
system_prompt_content = None
|
||||||
|
|
||||||
|
# Try to get from DB (custom model)
|
||||||
|
try:
|
||||||
|
model_id = body.get("model")
|
||||||
|
if model_id:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🔍 Attempting DB lookup for model: {model_id}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Clean model ID if needed (though get_model_by_id usually expects the full ID)
|
||||||
|
model_obj = Models.get_model_by_id(model_id)
|
||||||
|
|
||||||
|
if model_obj:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✅ Model found in DB: {model_obj.name} (ID: {model_obj.id})",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
if model_obj.params:
|
||||||
|
try:
|
||||||
|
params = model_obj.params
|
||||||
|
# Handle case where params is a JSON string
|
||||||
|
if isinstance(params, str):
|
||||||
|
params = json.loads(params)
|
||||||
|
|
||||||
|
# Handle dict or Pydantic object
|
||||||
|
if isinstance(params, dict):
|
||||||
|
system_prompt_content = params.get("system")
|
||||||
|
else:
|
||||||
|
# Assume Pydantic model or object
|
||||||
|
system_prompt_content = getattr(params, "system", None)
|
||||||
|
|
||||||
|
if system_prompt_content:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 📝 System prompt found in DB params ({len(system_prompt_content)} chars)",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ 'system' key missing in model params",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ❌ Failed to parse model params: {e}",
|
||||||
|
type="error",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ Model params are empty",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ❌ Model NOT found in DB",
|
||||||
|
type="warning",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ❌ Error fetching system prompt from DB: {e}",
|
||||||
|
type="error",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
if self.valves.debug_mode:
|
||||||
|
print(f"[Inlet] Error fetching system prompt from DB: {e}")
|
||||||
|
|
||||||
|
# Fall back to checking messages (base model or already included)
|
||||||
|
if not system_prompt_content:
|
||||||
|
for msg in messages:
|
||||||
|
if msg.get("role") == "system":
|
||||||
|
system_prompt_content = msg.get("content", "")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Build system_prompt_msg for token calculation
|
||||||
|
system_prompt_msg = None
|
||||||
|
if system_prompt_content:
|
||||||
|
system_prompt_msg = {"role": "system", "content": system_prompt_content}
|
||||||
|
if self.valves.debug_mode:
|
||||||
|
print(
|
||||||
|
f"[Inlet] Found system prompt ({len(system_prompt_content)} chars). Including in budget."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Log message statistics (Moved here to include extracted system prompt)
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
try:
|
||||||
|
msg_stats = {
|
||||||
|
"user": 0,
|
||||||
|
"assistant": 0,
|
||||||
|
"system": 0,
|
||||||
|
"total": len(messages),
|
||||||
|
}
|
||||||
|
for msg in messages:
|
||||||
|
role = msg.get("role", "unknown")
|
||||||
|
if role in msg_stats:
|
||||||
|
msg_stats[role] += 1
|
||||||
|
|
||||||
|
# If system prompt was extracted from DB/Model but not in messages, count it
|
||||||
|
if system_prompt_content:
|
||||||
|
# Check if it's already counted (i.e., was in messages)
|
||||||
|
is_in_messages = any(m.get("role") == "system" for m in messages)
|
||||||
|
if not is_in_messages:
|
||||||
|
msg_stats["system"] += 1
|
||||||
|
msg_stats["total"] += 1
|
||||||
|
|
||||||
|
stats_str = f"Total: {msg_stats['total']} | User: {msg_stats['user']} | Assistant: {msg_stats['assistant']} | System: {msg_stats['system']}"
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] Message Stats: {stats_str}", event_call=__event_call__
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"[Inlet] Error logging message stats: {e}")
|
||||||
|
|
||||||
if not chat_id:
|
if not chat_id:
|
||||||
await self._log(
|
await self._log(
|
||||||
"[Inlet] ❌ Missing chat_id in metadata, skipping compression",
|
"[Inlet] ❌ Missing chat_id in metadata, skipping compression",
|
||||||
@@ -787,10 +1011,6 @@ class Filter:
|
|||||||
# Target is to compress up to the (total - keep_last) message
|
# Target is to compress up to the (total - keep_last) message
|
||||||
target_compressed_count = max(0, len(messages) - self.valves.keep_last)
|
target_compressed_count = max(0, len(messages) - self.valves.keep_last)
|
||||||
|
|
||||||
# Record the target compression progress for the original messages, for use in outlet
|
|
||||||
# Target is to compress up to the (total - keep_last) message
|
|
||||||
target_compressed_count = max(0, len(messages) - self.valves.keep_last)
|
|
||||||
|
|
||||||
await self._log(
|
await self._log(
|
||||||
f"[Inlet] Recorded target compression progress: {target_compressed_count}",
|
f"[Inlet] Recorded target compression progress: {target_compressed_count}",
|
||||||
event_call=__event_call__,
|
event_call=__event_call__,
|
||||||
@@ -799,6 +1019,14 @@ class Filter:
|
|||||||
# Load summary record
|
# Load summary record
|
||||||
summary_record = await asyncio.to_thread(self._load_summary_record, chat_id)
|
summary_record = await asyncio.to_thread(self._load_summary_record, chat_id)
|
||||||
|
|
||||||
|
# Calculate effective_keep_first to ensure all system messages are protected
|
||||||
|
last_system_index = -1
|
||||||
|
for i, msg in enumerate(messages):
|
||||||
|
if msg.get("role") == "system":
|
||||||
|
last_system_index = i
|
||||||
|
|
||||||
|
effective_keep_first = max(self.valves.keep_first, last_system_index + 1)
|
||||||
|
|
||||||
final_messages = []
|
final_messages = []
|
||||||
|
|
||||||
if summary_record:
|
if summary_record:
|
||||||
@@ -812,8 +1040,8 @@ class Filter:
|
|||||||
|
|
||||||
# 1. Head messages (Keep First)
|
# 1. Head messages (Keep First)
|
||||||
head_messages = []
|
head_messages = []
|
||||||
if self.valves.keep_first > 0:
|
if effective_keep_first > 0:
|
||||||
head_messages = messages[: self.valves.keep_first]
|
head_messages = messages[:effective_keep_first]
|
||||||
|
|
||||||
# 2. Summary message (Inserted as User message)
|
# 2. Summary message (Inserted as User message)
|
||||||
summary_content = (
|
summary_content = (
|
||||||
@@ -826,29 +1054,215 @@ class Filter:
|
|||||||
|
|
||||||
# 3. Tail messages (Tail) - All messages starting from the last compression point
|
# 3. Tail messages (Tail) - All messages starting from the last compression point
|
||||||
# Note: Must ensure head messages are not duplicated
|
# Note: Must ensure head messages are not duplicated
|
||||||
start_index = max(compressed_count, self.valves.keep_first)
|
start_index = max(compressed_count, effective_keep_first)
|
||||||
tail_messages = messages[start_index:]
|
tail_messages = messages[start_index:]
|
||||||
|
|
||||||
final_messages = head_messages + [summary_msg] + tail_messages
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
tail_preview = [
|
||||||
|
f"{i + start_index}: [{m.get('role')}] {m.get('content', '')[:30]}..."
|
||||||
|
for i, m in enumerate(tail_messages)
|
||||||
|
]
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 📜 Tail Messages (Start Index: {start_index}): {tail_preview}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- Preflight Check & Budgeting (Simplified) ---
|
||||||
|
|
||||||
|
# Assemble candidate messages (for output)
|
||||||
|
candidate_messages = head_messages + [summary_msg] + tail_messages
|
||||||
|
|
||||||
|
# Prepare messages for token calculation (include system prompt if missing)
|
||||||
|
calc_messages = candidate_messages
|
||||||
|
if system_prompt_msg:
|
||||||
|
# Check if system prompt is already in head_messages
|
||||||
|
is_in_head = any(m.get("role") == "system" for m in head_messages)
|
||||||
|
if not is_in_head:
|
||||||
|
calc_messages = [system_prompt_msg] + candidate_messages
|
||||||
|
|
||||||
|
# Get max context limit
|
||||||
|
model = self._clean_model_id(body.get("model"))
|
||||||
|
thresholds = self._get_model_thresholds(model)
|
||||||
|
max_context_tokens = thresholds.get(
|
||||||
|
"max_context_tokens", self.valves.max_context_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate total tokens
|
||||||
|
total_tokens = await asyncio.to_thread(
|
||||||
|
self._calculate_messages_tokens, calc_messages
|
||||||
|
)
|
||||||
|
|
||||||
|
# Preflight Check Log
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🔎 Preflight Check: {total_tokens}t / {max_context_tokens}t ({(total_tokens/max_context_tokens*100):.1f}%)",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# If over budget, reduce history (Keep Last)
|
||||||
|
if total_tokens > max_context_tokens:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ Candidate prompt ({total_tokens} Tokens) exceeds limit ({max_context_tokens}). Reducing history...",
|
||||||
|
type="warning",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Dynamically remove messages from the start of tail_messages
|
||||||
|
# Always try to keep at least the last message (usually user input)
|
||||||
|
while total_tokens > max_context_tokens and len(tail_messages) > 1:
|
||||||
|
# Strategy 1: Structure-Aware Assistant Trimming
|
||||||
|
# Retain: Headers (#), First Line, Last Line. Collapse the rest.
|
||||||
|
target_msg = None
|
||||||
|
target_idx = -1
|
||||||
|
|
||||||
|
# Find the oldest assistant message that is long and not yet trimmed
|
||||||
|
for i, msg in enumerate(tail_messages):
|
||||||
|
# Skip the last message (usually user input, protect it)
|
||||||
|
if i == len(tail_messages) - 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
if msg.get("role") == "assistant":
|
||||||
|
content = str(msg.get("content", ""))
|
||||||
|
is_trimmed = msg.get("metadata", {}).get(
|
||||||
|
"is_trimmed", False
|
||||||
|
)
|
||||||
|
# Only target messages that are reasonably long (> 200 chars)
|
||||||
|
if len(content) > 200 and not is_trimmed:
|
||||||
|
target_msg = msg
|
||||||
|
target_idx = i
|
||||||
|
break
|
||||||
|
|
||||||
|
# If found a suitable assistant message, apply structure-aware trimming
|
||||||
|
if target_msg:
|
||||||
|
content = str(target_msg.get("content", ""))
|
||||||
|
lines = content.split("\n")
|
||||||
|
kept_lines = []
|
||||||
|
|
||||||
|
# Logic: Keep headers, first non-empty line, last non-empty line
|
||||||
|
first_line_found = False
|
||||||
|
last_line_idx = -1
|
||||||
|
|
||||||
|
# Find last non-empty line index
|
||||||
|
for idx in range(len(lines) - 1, -1, -1):
|
||||||
|
if lines[idx].strip():
|
||||||
|
last_line_idx = idx
|
||||||
|
break
|
||||||
|
|
||||||
|
for idx, line in enumerate(lines):
|
||||||
|
stripped = line.strip()
|
||||||
|
if not stripped:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Keep headers (H1-H6, requires space after #)
|
||||||
|
if re.match(r"^#{1,6}\s+", stripped):
|
||||||
|
kept_lines.append(line)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Keep first non-empty line
|
||||||
|
if not first_line_found:
|
||||||
|
kept_lines.append(line)
|
||||||
|
first_line_found = True
|
||||||
|
# Add placeholder if there's more content coming
|
||||||
|
if idx < last_line_idx:
|
||||||
|
kept_lines.append("\n... [Content collapsed] ...\n")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Keep last non-empty line
|
||||||
|
if idx == last_line_idx:
|
||||||
|
kept_lines.append(line)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Update message content
|
||||||
|
new_content = "\n".join(kept_lines)
|
||||||
|
|
||||||
|
# Safety check: If trimming didn't save much (e.g. mostly headers), force drop
|
||||||
|
if len(new_content) > len(content) * 0.8:
|
||||||
|
# Fallback to drop if structure preservation is too verbose
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
target_msg["content"] = new_content
|
||||||
|
if "metadata" not in target_msg:
|
||||||
|
target_msg["metadata"] = {}
|
||||||
|
target_msg["metadata"]["is_trimmed"] = True
|
||||||
|
|
||||||
|
# Calculate token reduction
|
||||||
|
old_tokens = self._count_tokens(content)
|
||||||
|
new_tokens = self._count_tokens(target_msg["content"])
|
||||||
|
diff = old_tokens - new_tokens
|
||||||
|
total_tokens -= diff
|
||||||
|
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 📉 Structure-trimmed Assistant message. Saved: {diff} tokens.",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Strategy 2: Fallback - Drop Oldest Message Entirely (FIFO)
|
||||||
|
# (User requested to remove progressive trimming for other cases)
|
||||||
|
dropped = tail_messages.pop(0)
|
||||||
|
dropped_tokens = self._count_tokens(str(dropped.get("content", "")))
|
||||||
|
total_tokens -= dropped_tokens
|
||||||
|
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🗑️ Dropped message from history to fit context. Role: {dropped.get('role')}, Tokens: {dropped_tokens}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Re-assemble
|
||||||
|
candidate_messages = head_messages + [summary_msg] + tail_messages
|
||||||
|
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✂️ History reduced. New total: {total_tokens} Tokens (Tail size: {len(tail_messages)})",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
final_messages = candidate_messages
|
||||||
|
|
||||||
|
# Calculate detailed token stats for logging
|
||||||
|
system_tokens = (
|
||||||
|
self._count_tokens(system_prompt_msg.get("content", ""))
|
||||||
|
if system_prompt_msg
|
||||||
|
else 0
|
||||||
|
)
|
||||||
|
head_tokens = self._calculate_messages_tokens(head_messages)
|
||||||
|
summary_tokens = self._count_tokens(summary_content)
|
||||||
|
tail_tokens = self._calculate_messages_tokens(tail_messages)
|
||||||
|
|
||||||
|
system_info = (
|
||||||
|
f"System({system_tokens}t)" if system_prompt_msg else "System(0t)"
|
||||||
|
)
|
||||||
|
|
||||||
|
total_section_tokens = (
|
||||||
|
system_tokens + head_tokens + summary_tokens + tail_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] Applied summary: {system_info} + Head({len(head_messages)} msg, {head_tokens}t) + Summary({summary_tokens}t) + Tail({len(tail_messages)} msg, {tail_tokens}t) = Total({total_section_tokens}t)",
|
||||||
|
type="success",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare status message (Context Usage format)
|
||||||
|
if max_context_tokens > 0:
|
||||||
|
usage_ratio = total_section_tokens / max_context_tokens
|
||||||
|
status_msg = f"Context Usage (Estimated): {total_section_tokens} / {max_context_tokens} Tokens ({usage_ratio*100:.1f}%)"
|
||||||
|
if usage_ratio > 0.9:
|
||||||
|
status_msg += " | ⚠️ High Usage"
|
||||||
|
else:
|
||||||
|
status_msg = f"Loaded historical summary (Hidden {compressed_count} historical messages)"
|
||||||
|
|
||||||
# Send status notification
|
|
||||||
if __event_emitter__:
|
if __event_emitter__:
|
||||||
await __event_emitter__(
|
await __event_emitter__(
|
||||||
{
|
{
|
||||||
"type": "status",
|
"type": "status",
|
||||||
"data": {
|
"data": {
|
||||||
"description": f"Loaded historical summary (Hidden {compressed_count} historical messages)",
|
"description": status_msg,
|
||||||
"done": True,
|
"done": True,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
await self._log(
|
|
||||||
f"[Inlet] Applied summary: Head({len(head_messages)}) + Summary + Tail({len(tail_messages)})",
|
|
||||||
type="success",
|
|
||||||
event_call=__event_call__,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Emit debug log to frontend (Keep the structured log as well)
|
# Emit debug log to frontend (Keep the structured log as well)
|
||||||
await self._emit_debug_log(
|
await self._emit_debug_log(
|
||||||
__event_call__,
|
__event_call__,
|
||||||
@@ -861,8 +1275,71 @@ class Filter:
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# No summary, use original messages
|
# No summary, use original messages
|
||||||
|
# But still need to check budget!
|
||||||
final_messages = messages
|
final_messages = messages
|
||||||
|
|
||||||
|
# Include system prompt in calculation
|
||||||
|
calc_messages = final_messages
|
||||||
|
if system_prompt_msg:
|
||||||
|
is_in_messages = any(m.get("role") == "system" for m in final_messages)
|
||||||
|
if not is_in_messages:
|
||||||
|
calc_messages = [system_prompt_msg] + final_messages
|
||||||
|
|
||||||
|
# Get max context limit
|
||||||
|
model = self._clean_model_id(body.get("model"))
|
||||||
|
thresholds = self._get_model_thresholds(model)
|
||||||
|
max_context_tokens = thresholds.get(
|
||||||
|
"max_context_tokens", self.valves.max_context_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
total_tokens = await asyncio.to_thread(
|
||||||
|
self._calculate_messages_tokens, calc_messages
|
||||||
|
)
|
||||||
|
|
||||||
|
if total_tokens > max_context_tokens:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ Original messages ({total_tokens} Tokens) exceed limit ({max_context_tokens}). Reducing history...",
|
||||||
|
type="warning",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Dynamically remove messages from the start
|
||||||
|
# We'll respect effective_keep_first to protect system prompts
|
||||||
|
|
||||||
|
start_trim_index = effective_keep_first
|
||||||
|
|
||||||
|
while (
|
||||||
|
total_tokens > max_context_tokens
|
||||||
|
and len(final_messages)
|
||||||
|
> start_trim_index + 1 # Keep at least 1 message after keep_first
|
||||||
|
):
|
||||||
|
dropped = final_messages.pop(start_trim_index)
|
||||||
|
total_tokens -= self._count_tokens(str(dropped.get("content", "")))
|
||||||
|
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✂️ Messages reduced. New total: {total_tokens} Tokens",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Send status notification (Context Usage format)
|
||||||
|
if __event_emitter__:
|
||||||
|
status_msg = f"Context Usage (Estimated): {total_tokens} / {max_context_tokens} Tokens"
|
||||||
|
if max_context_tokens > 0:
|
||||||
|
usage_ratio = total_tokens / max_context_tokens
|
||||||
|
status_msg += f" ({usage_ratio*100:.1f}%)"
|
||||||
|
if usage_ratio > 0.9:
|
||||||
|
status_msg += " | ⚠️ High Usage"
|
||||||
|
|
||||||
|
await __event_emitter__(
|
||||||
|
{
|
||||||
|
"type": "status",
|
||||||
|
"data": {
|
||||||
|
"description": status_msg,
|
||||||
|
"done": True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
body["messages"] = final_messages
|
body["messages"] = final_messages
|
||||||
|
|
||||||
await self._log(
|
await self._log(
|
||||||
@@ -1048,11 +1525,23 @@ class Filter:
|
|||||||
return
|
return
|
||||||
|
|
||||||
middle_messages = messages[start_index:end_index]
|
middle_messages = messages[start_index:end_index]
|
||||||
|
tail_preview_msgs = messages[end_index:]
|
||||||
|
|
||||||
await self._log(
|
if self.valves.show_debug_log and __event_call__:
|
||||||
f"[🤖 Async Summary Task] Middle messages to process: {len(middle_messages)}",
|
middle_preview = [
|
||||||
event_call=__event_call__,
|
f"{i + start_index}: [{m.get('role')}] {m.get('content', '')[:20]}..."
|
||||||
)
|
for i, m in enumerate(middle_messages[:3])
|
||||||
|
]
|
||||||
|
tail_preview = [
|
||||||
|
f"{i + end_index}: [{m.get('role')}] {m.get('content', '')[:20]}..."
|
||||||
|
for i, m in enumerate(tail_preview_msgs)
|
||||||
|
]
|
||||||
|
await self._log(
|
||||||
|
f"[🤖 Async Summary Task] 📊 Boundary Check:\n"
|
||||||
|
f" - Middle (Compressing): {len(middle_messages)} msgs (Indices {start_index}-{end_index-1}) -> Preview: {middle_preview}\n"
|
||||||
|
f" - Tail (Keeping): {len(tail_preview_msgs)} msgs (Indices {end_index}-End) -> Preview: {tail_preview}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
# 3. Check Token limit and truncate (Max Context Truncation)
|
# 3. Check Token limit and truncate (Max Context Truncation)
|
||||||
# [Optimization] Use the summary model's (if any) threshold to decide how many middle messages can be processed
|
# [Optimization] Use the summary model's (if any) threshold to decide how many middle messages can be processed
|
||||||
@@ -1186,6 +1675,109 @@ class Filter:
|
|||||||
event_call=__event_call__,
|
event_call=__event_call__,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# --- Token Usage Status Notification ---
|
||||||
|
if self.valves.show_token_usage_status and __event_emitter__:
|
||||||
|
try:
|
||||||
|
# 1. Fetch System Prompt (DB fallback)
|
||||||
|
system_prompt_msg = None
|
||||||
|
model_id = body.get("model")
|
||||||
|
if model_id:
|
||||||
|
try:
|
||||||
|
model_obj = Models.get_model_by_id(model_id)
|
||||||
|
if model_obj and model_obj.params:
|
||||||
|
params = model_obj.params
|
||||||
|
if isinstance(params, str):
|
||||||
|
params = json.loads(params)
|
||||||
|
if isinstance(params, dict):
|
||||||
|
sys_content = params.get("system")
|
||||||
|
else:
|
||||||
|
sys_content = getattr(params, "system", None)
|
||||||
|
|
||||||
|
if sys_content:
|
||||||
|
system_prompt_msg = {
|
||||||
|
"role": "system",
|
||||||
|
"content": sys_content,
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
pass # Ignore DB errors here, best effort
|
||||||
|
|
||||||
|
# 2. Calculate Effective Keep First
|
||||||
|
last_system_index = -1
|
||||||
|
for i, msg in enumerate(messages):
|
||||||
|
if msg.get("role") == "system":
|
||||||
|
last_system_index = i
|
||||||
|
effective_keep_first = max(
|
||||||
|
self.valves.keep_first, last_system_index + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Construct Next Context
|
||||||
|
# Head
|
||||||
|
head_msgs = (
|
||||||
|
messages[:effective_keep_first]
|
||||||
|
if effective_keep_first > 0
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
summary_content = (
|
||||||
|
f"【System Prompt: The following is a summary of the historical conversation, provided for context only. Do not reply to the summary content itself; answer the subsequent latest questions directly.】\n\n"
|
||||||
|
f"{new_summary}\n\n"
|
||||||
|
f"---\n"
|
||||||
|
f"Below is the recent conversation:"
|
||||||
|
)
|
||||||
|
summary_msg = {"role": "assistant", "content": summary_content}
|
||||||
|
|
||||||
|
# Tail (using target_compressed_count which is what we just compressed up to)
|
||||||
|
# Note: target_compressed_count is the index *after* the last compressed message?
|
||||||
|
# In _generate_summary_async, target_compressed_count is passed in.
|
||||||
|
# It represents the number of messages to be covered by summary (excluding keep_last).
|
||||||
|
# So tail starts at max(target_compressed_count, effective_keep_first).
|
||||||
|
start_index = max(target_compressed_count, effective_keep_first)
|
||||||
|
tail_msgs = messages[start_index:]
|
||||||
|
|
||||||
|
# Assemble
|
||||||
|
next_context = head_msgs + [summary_msg] + tail_msgs
|
||||||
|
|
||||||
|
# Inject system prompt if needed
|
||||||
|
if system_prompt_msg:
|
||||||
|
is_in_head = any(m.get("role") == "system" for m in head_msgs)
|
||||||
|
if not is_in_head:
|
||||||
|
next_context = [system_prompt_msg] + next_context
|
||||||
|
|
||||||
|
# 4. Calculate Tokens
|
||||||
|
token_count = self._calculate_messages_tokens(next_context)
|
||||||
|
|
||||||
|
# 5. Get Thresholds & Calculate Ratio
|
||||||
|
model = self._clean_model_id(body.get("model"))
|
||||||
|
thresholds = self._get_model_thresholds(model)
|
||||||
|
max_context_tokens = thresholds.get(
|
||||||
|
"max_context_tokens", self.valves.max_context_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
# 6. Emit Status
|
||||||
|
status_msg = f"Context Summary Updated: {token_count} / {max_context_tokens} Tokens"
|
||||||
|
if max_context_tokens > 0:
|
||||||
|
ratio = (token_count / max_context_tokens) * 100
|
||||||
|
status_msg += f" ({ratio:.1f}%)"
|
||||||
|
if ratio > 90.0:
|
||||||
|
status_msg += " | ⚠️ High Usage"
|
||||||
|
|
||||||
|
await __event_emitter__(
|
||||||
|
{
|
||||||
|
"type": "status",
|
||||||
|
"data": {
|
||||||
|
"description": status_msg,
|
||||||
|
"done": True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
await self._log(
|
||||||
|
f"[Status] Error calculating tokens: {e}",
|
||||||
|
type="error",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
await self._log(
|
await self._log(
|
||||||
f"[🤖 Async Summary Task] ❌ Error: {str(e)}",
|
f"[🤖 Async Summary Task] ❌ Error: {str(e)}",
|
||||||
|
|||||||
@@ -5,10 +5,20 @@ author: Fu-Jie
|
|||||||
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||||
funding_url: https://github.com/open-webui
|
funding_url: https://github.com/open-webui
|
||||||
description: 通过智能摘要和消息压缩,降低长对话的 token 消耗,同时保持对话连贯性。
|
description: 通过智能摘要和消息压缩,降低长对话的 token 消耗,同时保持对话连贯性。
|
||||||
version: 1.1.3
|
version: 1.2.0
|
||||||
openwebui_id: 5c0617cb-a9e4-4bd6-a440-d276534ebd18
|
openwebui_id: 5c0617cb-a9e4-4bd6-a440-d276534ebd18
|
||||||
license: MIT
|
license: MIT
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
📌 1.2.0 版本更新
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
✅ 预检上下文检查:发送给模型前验证上下文是否适配。
|
||||||
|
✅ 结构感知裁剪:折叠过长的 AI 响应,同时保留标题 (H1-H6)、开头和结尾。
|
||||||
|
✅ 原生工具输出裁剪:使用函数调用时清理上下文,去除冗余输出。(注意:非原生工具调用输出不会完整注入上下文)
|
||||||
|
✅ 上下文使用警告:当使用量超过 90% 时发出通知。
|
||||||
|
✅ 详细 Token 日志:细粒度记录 System、Head、Summary 和 Tail 的 Token 消耗。
|
||||||
|
|
||||||
═══════════════════════════════════════════════════════════════════════════════
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
📌 功能概述
|
📌 功能概述
|
||||||
═══════════════════════════════════════════════════════════════════════════════
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
@@ -248,9 +258,11 @@ import asyncio
|
|||||||
import json
|
import json
|
||||||
import hashlib
|
import hashlib
|
||||||
import time
|
import time
|
||||||
|
import re
|
||||||
|
|
||||||
# Open WebUI 内置导入
|
# Open WebUI 内置导入
|
||||||
from open_webui.utils.chat import generate_chat_completion
|
from open_webui.utils.chat import generate_chat_completion
|
||||||
|
from open_webui.models.models import Models
|
||||||
from open_webui.models.users import Users
|
from open_webui.models.users import Users
|
||||||
from fastapi.requests import Request
|
from fastapi.requests import Request
|
||||||
from open_webui.main import app as webui_app
|
from open_webui.main import app as webui_app
|
||||||
@@ -353,6 +365,13 @@ class Filter:
|
|||||||
show_debug_log: bool = Field(
|
show_debug_log: bool = Field(
|
||||||
default=False, description="在浏览器控制台打印调试日志 (F12)"
|
default=False, description="在浏览器控制台打印调试日志 (F12)"
|
||||||
)
|
)
|
||||||
|
show_token_usage_status: bool = Field(
|
||||||
|
default=True, description="在对话结束时显示 Token 使用情况的状态通知"
|
||||||
|
)
|
||||||
|
enable_tool_output_trimming: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="启用原生工具输出裁剪 (仅适用于 native function calling),裁剪过长的工具输出以节省 Token。",
|
||||||
|
)
|
||||||
|
|
||||||
def _save_summary(self, chat_id: str, summary: str, compressed_count: int):
|
def _save_summary(self, chat_id: str, summary: str, compressed_count: int):
|
||||||
"""保存摘要到数据库"""
|
"""保存摘要到数据库"""
|
||||||
@@ -614,12 +633,217 @@ class Filter:
|
|||||||
) -> dict:
|
) -> dict:
|
||||||
"""
|
"""
|
||||||
在发送到 LLM 之前执行
|
在发送到 LLM 之前执行
|
||||||
压缩策略:只负责注入已有的摘要,不进行 Token 计算
|
压缩策略:
|
||||||
|
1. 注入已有摘要
|
||||||
|
2. 预检 Token 预算
|
||||||
|
3. 如果超限,执行结构化裁剪(Structure-Aware Trimming)或丢弃旧消息
|
||||||
"""
|
"""
|
||||||
messages = body.get("messages", [])
|
messages = body.get("messages", [])
|
||||||
|
|
||||||
|
# --- 原生工具输出裁剪 (Native Tool Output Trimming) ---
|
||||||
|
# 即使未启用压缩,也始终检查并裁剪过长的工具输出,以节省 Token
|
||||||
|
if self.valves.enable_tool_output_trimming:
|
||||||
|
trimmed_count = 0
|
||||||
|
for msg in messages:
|
||||||
|
content = msg.get("content", "")
|
||||||
|
if not isinstance(content, str):
|
||||||
|
continue
|
||||||
|
|
||||||
|
role = msg.get("role")
|
||||||
|
|
||||||
|
# 仅处理带有原生工具输出的助手消息
|
||||||
|
if role == "assistant":
|
||||||
|
# 检测助手内容中的工具输出标记
|
||||||
|
if "tool_call_id:" in content or (
|
||||||
|
content.startswith('"') and "\\"" in content
|
||||||
|
):
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🔍 检测到助手消息中的原生工具输出。",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 提取最终答案(在最后一个工具调用元数据之后)
|
||||||
|
# 模式:匹配转义的 JSON 字符串,如 """...""" 后跟换行符
|
||||||
|
# 我们寻找该模式的最后一次出现,并获取其后的所有内容
|
||||||
|
|
||||||
|
# 1. 尝试匹配特定的 OpenWebUI 工具输出格式:"""..."""
|
||||||
|
tool_output_pattern = r'""".*?"""\s*'
|
||||||
|
|
||||||
|
# 查找所有匹配项
|
||||||
|
matches = list(
|
||||||
|
re.finditer(tool_output_pattern, content, re.DOTALL)
|
||||||
|
)
|
||||||
|
|
||||||
|
if matches:
|
||||||
|
# 获取最后一个匹配项的结束位置
|
||||||
|
last_match_end = matches[-1].end()
|
||||||
|
|
||||||
|
# 最后一个工具输出之后的所有内容即为最终答案
|
||||||
|
final_answer = content[last_match_end:].strip()
|
||||||
|
|
||||||
|
if final_answer:
|
||||||
|
msg["content"] = (
|
||||||
|
f"... [Tool outputs trimmed]\n{final_answer}"
|
||||||
|
)
|
||||||
|
trimmed_count += 1
|
||||||
|
else:
|
||||||
|
# 回退:如果找不到新格式,尝试按 "Arguments:" 分割
|
||||||
|
# (保留向后兼容性或适应不同模型行为)
|
||||||
|
parts = re.split(r"(?:Arguments:\s*\{[^}]+\})\n+", content)
|
||||||
|
if len(parts) > 1:
|
||||||
|
final_answer = parts[-1].strip()
|
||||||
|
if final_answer:
|
||||||
|
msg["content"] = (
|
||||||
|
f"... [Tool outputs trimmed]\n{final_answer}"
|
||||||
|
)
|
||||||
|
trimmed_count += 1
|
||||||
|
|
||||||
|
if trimmed_count > 0 and self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✂️ 已裁剪 {trimmed_count} 条工具输出消息。",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
chat_ctx = self._get_chat_context(body, __metadata__)
|
chat_ctx = self._get_chat_context(body, __metadata__)
|
||||||
chat_id = chat_ctx["chat_id"]
|
chat_id = chat_ctx["chat_id"]
|
||||||
|
|
||||||
|
# 提取系统提示词以进行准确的 Token 计算
|
||||||
|
# 1. 对于自定义模型:检查数据库 (Models.get_model_by_id)
|
||||||
|
# 2. 对于基础模型:检查消息中的 role='system'
|
||||||
|
system_prompt_content = None
|
||||||
|
|
||||||
|
# 尝试从数据库获取 (自定义模型)
|
||||||
|
try:
|
||||||
|
model_id = body.get("model")
|
||||||
|
if model_id:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🔍 尝试从数据库查找模型: {model_id}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 清理模型 ID
|
||||||
|
model_obj = Models.get_model_by_id(model_id)
|
||||||
|
|
||||||
|
if model_obj:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✅ 数据库中找到模型: {model_obj.name} (ID: {model_obj.id})",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
if model_obj.params:
|
||||||
|
try:
|
||||||
|
params = model_obj.params
|
||||||
|
# 处理 params 是 JSON 字符串的情况
|
||||||
|
if isinstance(params, str):
|
||||||
|
params = json.loads(params)
|
||||||
|
|
||||||
|
# 处理字典或 Pydantic 对象
|
||||||
|
if isinstance(params, dict):
|
||||||
|
system_prompt_content = params.get("system")
|
||||||
|
else:
|
||||||
|
# 假设是 Pydantic 模型或对象
|
||||||
|
system_prompt_content = getattr(params, "system", None)
|
||||||
|
|
||||||
|
if system_prompt_content:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 📝 在数据库参数中找到系统提示词 ({len(system_prompt_content)} 字符)",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ 模型参数中缺少 'system' 键",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ❌ 解析模型参数失败: {e}",
|
||||||
|
type="error",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ 模型参数为空",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ❌ 数据库中未找到模型",
|
||||||
|
type="warning",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ❌ 从数据库获取系统提示词错误: {e}",
|
||||||
|
type="error",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
if self.valves.debug_mode:
|
||||||
|
print(f"[Inlet] 从数据库获取系统提示词错误: {e}")
|
||||||
|
|
||||||
|
# 回退:检查消息列表 (基础模型或已包含)
|
||||||
|
if not system_prompt_content:
|
||||||
|
for msg in messages:
|
||||||
|
if msg.get("role") == "system":
|
||||||
|
system_prompt_content = msg.get("content", "")
|
||||||
|
break
|
||||||
|
|
||||||
|
# 构建 system_prompt_msg 用于 Token 计算
|
||||||
|
system_prompt_msg = None
|
||||||
|
if system_prompt_content:
|
||||||
|
system_prompt_msg = {"role": "system", "content": system_prompt_content}
|
||||||
|
if self.valves.debug_mode:
|
||||||
|
print(
|
||||||
|
f"[Inlet] 找到系统提示词 ({len(system_prompt_content)} 字符)。计入预算。"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 记录消息统计信息 (移至此处以包含提取的系统提示词)
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
try:
|
||||||
|
msg_stats = {
|
||||||
|
"user": 0,
|
||||||
|
"assistant": 0,
|
||||||
|
"system": 0,
|
||||||
|
"total": len(messages),
|
||||||
|
}
|
||||||
|
for msg in messages:
|
||||||
|
role = msg.get("role", "unknown")
|
||||||
|
if role in msg_stats:
|
||||||
|
msg_stats[role] += 1
|
||||||
|
|
||||||
|
# 如果系统提示词是从 DB/Model 提取的但不在消息中,则计数
|
||||||
|
if system_prompt_content:
|
||||||
|
# 检查是否已计数 (即是否在消息中)
|
||||||
|
is_in_messages = any(m.get("role") == "system" for m in messages)
|
||||||
|
if not is_in_messages:
|
||||||
|
msg_stats["system"] += 1
|
||||||
|
msg_stats["total"] += 1
|
||||||
|
|
||||||
|
stats_str = f"Total: {msg_stats['total']} | User: {msg_stats['user']} | Assistant: {msg_stats['assistant']} | System: {msg_stats['system']}"
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 消息统计: {stats_str}", event_call=__event_call__
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"[Inlet] 记录消息统计错误: {e}")
|
||||||
|
|
||||||
|
if not chat_id:
|
||||||
|
await self._log(
|
||||||
|
"[Inlet] ❌ metadata 中缺少 chat_id,跳过压缩",
|
||||||
|
type="error",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
return body
|
||||||
|
|
||||||
if self.valves.debug_mode or self.valves.show_debug_log:
|
if self.valves.debug_mode or self.valves.show_debug_log:
|
||||||
await self._log(
|
await self._log(
|
||||||
f"\n{'='*60}\n[Inlet] Chat ID: {chat_id}\n[Inlet] 收到 {len(messages)} 条消息",
|
f"\n{'='*60}\n[Inlet] Chat ID: {chat_id}\n[Inlet] 收到 {len(messages)} 条消息",
|
||||||
@@ -630,10 +854,6 @@ class Filter:
|
|||||||
# 目标是压缩到倒数第 keep_last 条之前
|
# 目标是压缩到倒数第 keep_last 条之前
|
||||||
target_compressed_count = max(0, len(messages) - self.valves.keep_last)
|
target_compressed_count = max(0, len(messages) - self.valves.keep_last)
|
||||||
|
|
||||||
# 记录原始消息的目标压缩进度,供 outlet 使用
|
|
||||||
# 目标是压缩到倒数第 keep_last 条之前
|
|
||||||
target_compressed_count = max(0, len(messages) - self.valves.keep_last)
|
|
||||||
|
|
||||||
await self._log(
|
await self._log(
|
||||||
f"[Inlet] 记录目标压缩进度: {target_compressed_count}",
|
f"[Inlet] 记录目标压缩进度: {target_compressed_count}",
|
||||||
event_call=__event_call__,
|
event_call=__event_call__,
|
||||||
@@ -642,6 +862,14 @@ class Filter:
|
|||||||
# 加载摘要记录
|
# 加载摘要记录
|
||||||
summary_record = await asyncio.to_thread(self._load_summary_record, chat_id)
|
summary_record = await asyncio.to_thread(self._load_summary_record, chat_id)
|
||||||
|
|
||||||
|
# 计算 effective_keep_first 以确保所有系统消息都被保护
|
||||||
|
last_system_index = -1
|
||||||
|
for i, msg in enumerate(messages):
|
||||||
|
if msg.get("role") == "system":
|
||||||
|
last_system_index = i
|
||||||
|
|
||||||
|
effective_keep_first = max(self.valves.keep_first, last_system_index + 1)
|
||||||
|
|
||||||
final_messages = []
|
final_messages = []
|
||||||
|
|
||||||
if summary_record:
|
if summary_record:
|
||||||
@@ -655,8 +883,8 @@ class Filter:
|
|||||||
|
|
||||||
# 1. 头部消息 (Keep First)
|
# 1. 头部消息 (Keep First)
|
||||||
head_messages = []
|
head_messages = []
|
||||||
if self.valves.keep_first > 0:
|
if effective_keep_first > 0:
|
||||||
head_messages = messages[: self.valves.keep_first]
|
head_messages = messages[:effective_keep_first]
|
||||||
|
|
||||||
# 2. 摘要消息 (作为 User 消息插入)
|
# 2. 摘要消息 (作为 User 消息插入)
|
||||||
summary_content = (
|
summary_content = (
|
||||||
@@ -669,29 +897,214 @@ class Filter:
|
|||||||
|
|
||||||
# 3. 尾部消息 (Tail) - 从上次压缩点开始的所有消息
|
# 3. 尾部消息 (Tail) - 从上次压缩点开始的所有消息
|
||||||
# 注意:这里必须确保不重复包含头部消息
|
# 注意:这里必须确保不重复包含头部消息
|
||||||
start_index = max(compressed_count, self.valves.keep_first)
|
start_index = max(compressed_count, effective_keep_first)
|
||||||
tail_messages = messages[start_index:]
|
tail_messages = messages[start_index:]
|
||||||
|
|
||||||
final_messages = head_messages + [summary_msg] + tail_messages
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
tail_preview = [
|
||||||
|
f"{i + start_index}: [{m.get('role')}] {m.get('content', '')[:30]}..."
|
||||||
|
for i, m in enumerate(tail_messages)
|
||||||
|
]
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 📜 尾部消息 (起始索引: {start_index}): {tail_preview}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- 预检检查与预算 (Preflight Check & Budgeting) ---
|
||||||
|
|
||||||
|
# 组装候选消息 (用于输出)
|
||||||
|
candidate_messages = head_messages + [summary_msg] + tail_messages
|
||||||
|
|
||||||
|
# 准备用于 Token 计算的消息 (如果缺少则包含系统提示词)
|
||||||
|
calc_messages = candidate_messages
|
||||||
|
if system_prompt_msg:
|
||||||
|
# 检查系统提示词是否已在 head_messages 中
|
||||||
|
is_in_head = any(m.get("role") == "system" for m in head_messages)
|
||||||
|
if not is_in_head:
|
||||||
|
calc_messages = [system_prompt_msg] + candidate_messages
|
||||||
|
|
||||||
|
# 获取最大上下文限制
|
||||||
|
model = self._clean_model_id(body.get("model"))
|
||||||
|
thresholds = self._get_model_thresholds(model)
|
||||||
|
max_context_tokens = thresholds.get(
|
||||||
|
"max_context_tokens", self.valves.max_context_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
# 计算总 Token
|
||||||
|
total_tokens = await asyncio.to_thread(
|
||||||
|
self._calculate_messages_tokens, calc_messages
|
||||||
|
)
|
||||||
|
|
||||||
|
# 预检检查日志
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🔎 预检检查: {total_tokens}t / {max_context_tokens}t ({(total_tokens/max_context_tokens*100):.1f}%)",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 如果超出预算,缩减历史记录 (Keep Last)
|
||||||
|
if total_tokens > max_context_tokens:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ 候选提示词 ({total_tokens} Tokens) 超过上限 ({max_context_tokens})。正在缩减历史记录...",
|
||||||
|
type="warning",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 动态从 tail_messages 的开头移除消息
|
||||||
|
# 始终尝试保留至少最后一条消息 (通常是用户输入)
|
||||||
|
while total_tokens > max_context_tokens and len(tail_messages) > 1:
|
||||||
|
# 策略 1: 结构化助手消息裁剪 (Structure-Aware Assistant Trimming)
|
||||||
|
# 保留: 标题 (#), 第一行, 最后一行。折叠其余部分。
|
||||||
|
target_msg = None
|
||||||
|
target_idx = -1
|
||||||
|
|
||||||
|
# 查找最旧的、较长且尚未裁剪的助手消息
|
||||||
|
for i, msg in enumerate(tail_messages):
|
||||||
|
# 跳过最后一条消息 (通常是用户输入,保护它)
|
||||||
|
if i == len(tail_messages) - 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
if msg.get("role") == "assistant":
|
||||||
|
content = str(msg.get("content", ""))
|
||||||
|
is_trimmed = msg.get("metadata", {}).get(
|
||||||
|
"is_trimmed", False
|
||||||
|
)
|
||||||
|
# 仅针对相当长 (> 200 字符) 的消息
|
||||||
|
if len(content) > 200 and not is_trimmed:
|
||||||
|
target_msg = msg
|
||||||
|
target_idx = i
|
||||||
|
break
|
||||||
|
|
||||||
|
# 如果找到合适的助手消息,应用结构化裁剪
|
||||||
|
if target_msg:
|
||||||
|
content = str(target_msg.get("content", ""))
|
||||||
|
lines = content.split("\n")
|
||||||
|
kept_lines = []
|
||||||
|
|
||||||
|
# 逻辑: 保留标题, 第一行非空行, 最后一行非空行
|
||||||
|
first_line_found = False
|
||||||
|
last_line_idx = -1
|
||||||
|
|
||||||
|
# 查找最后一行非空行的索引
|
||||||
|
for idx in range(len(lines) - 1, -1, -1):
|
||||||
|
if lines[idx].strip():
|
||||||
|
last_line_idx = idx
|
||||||
|
break
|
||||||
|
|
||||||
|
for idx, line in enumerate(lines):
|
||||||
|
stripped = line.strip()
|
||||||
|
if not stripped:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 保留标题 (H1-H6, 需要 # 后有空格)
|
||||||
|
if re.match(r"^#{1,6}\s+", stripped):
|
||||||
|
kept_lines.append(line)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 保留第一行非空行
|
||||||
|
if not first_line_found:
|
||||||
|
kept_lines.append(line)
|
||||||
|
first_line_found = True
|
||||||
|
# 如果后面还有内容,添加占位符
|
||||||
|
if idx < last_line_idx:
|
||||||
|
kept_lines.append("\n... [Content collapsed] ...\n")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 保留最后一行非空行
|
||||||
|
if idx == last_line_idx:
|
||||||
|
kept_lines.append(line)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 更新消息内容
|
||||||
|
new_content = "\n".join(kept_lines)
|
||||||
|
|
||||||
|
# 安全检查: 如果裁剪没有节省太多 (例如主要是标题),则强制丢弃
|
||||||
|
if len(new_content) > len(content) * 0.8:
|
||||||
|
# 如果结构保留过于冗长,回退到丢弃
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
target_msg["content"] = new_content
|
||||||
|
if "metadata" not in target_msg:
|
||||||
|
target_msg["metadata"] = {}
|
||||||
|
target_msg["metadata"]["is_trimmed"] = True
|
||||||
|
|
||||||
|
# 计算 Token 减少量
|
||||||
|
old_tokens = self._count_tokens(content)
|
||||||
|
new_tokens = self._count_tokens(target_msg["content"])
|
||||||
|
diff = old_tokens - new_tokens
|
||||||
|
total_tokens -= diff
|
||||||
|
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 📉 结构化裁剪助手消息。节省: {diff} tokens。",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 策略 2: 回退 - 完全丢弃最旧的消息 (FIFO)
|
||||||
|
dropped = tail_messages.pop(0)
|
||||||
|
dropped_tokens = self._count_tokens(str(dropped.get("content", "")))
|
||||||
|
total_tokens -= dropped_tokens
|
||||||
|
|
||||||
|
if self.valves.show_debug_log and __event_call__:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 🗑️ 从历史记录中丢弃消息以适应上下文。角色: {dropped.get('role')}, Tokens: {dropped_tokens}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 重新组装
|
||||||
|
candidate_messages = head_messages + [summary_msg] + tail_messages
|
||||||
|
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✂️ 历史记录已缩减。新总数: {total_tokens} Tokens (尾部大小: {len(tail_messages)})",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
final_messages = candidate_messages
|
||||||
|
|
||||||
|
# 计算详细 Token 统计以用于日志
|
||||||
|
system_tokens = (
|
||||||
|
self._count_tokens(system_prompt_msg.get("content", ""))
|
||||||
|
if system_prompt_msg
|
||||||
|
else 0
|
||||||
|
)
|
||||||
|
head_tokens = self._calculate_messages_tokens(head_messages)
|
||||||
|
summary_tokens = self._count_tokens(summary_content)
|
||||||
|
tail_tokens = self._calculate_messages_tokens(tail_messages)
|
||||||
|
|
||||||
|
system_info = (
|
||||||
|
f"System({system_tokens}t)" if system_prompt_msg else "System(0t)"
|
||||||
|
)
|
||||||
|
|
||||||
|
total_section_tokens = (
|
||||||
|
system_tokens + head_tokens + summary_tokens + tail_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] 应用摘要: {system_info} + Head({len(head_messages)} 条, {head_tokens}t) + Summary({summary_tokens}t) + Tail({len(tail_messages)} 条, {tail_tokens}t) = Total({total_section_tokens}t)",
|
||||||
|
type="success",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 准备状态消息 (上下文使用量格式)
|
||||||
|
if max_context_tokens > 0:
|
||||||
|
usage_ratio = total_section_tokens / max_context_tokens
|
||||||
|
status_msg = f"上下文使用量 (预估): {total_section_tokens} / {max_context_tokens} Tokens ({usage_ratio*100:.1f}%)"
|
||||||
|
if usage_ratio > 0.9:
|
||||||
|
status_msg += " | ⚠️ 高负载"
|
||||||
|
else:
|
||||||
|
status_msg = f"已加载历史摘要 (隐藏 {compressed_count} 条历史消息)"
|
||||||
|
|
||||||
# 发送状态通知
|
|
||||||
if __event_emitter__:
|
if __event_emitter__:
|
||||||
await __event_emitter__(
|
await __event_emitter__(
|
||||||
{
|
{
|
||||||
"type": "status",
|
"type": "status",
|
||||||
"data": {
|
"data": {
|
||||||
"description": f"已加载历史摘要 (隐藏 {compressed_count} 条历史消息)",
|
"description": status_msg,
|
||||||
"done": True,
|
"done": True,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
await self._log(
|
|
||||||
f"[Inlet] 应用摘要: Head({len(head_messages)}) + Summary + Tail({len(tail_messages)})",
|
|
||||||
type="success",
|
|
||||||
event_call=__event_call__,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Emit debug log to frontend (Keep the structured log as well)
|
# Emit debug log to frontend (Keep the structured log as well)
|
||||||
await self._emit_debug_log(
|
await self._emit_debug_log(
|
||||||
__event_call__,
|
__event_call__,
|
||||||
@@ -704,8 +1117,73 @@ class Filter:
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# 没有摘要,使用原始消息
|
# 没有摘要,使用原始消息
|
||||||
|
# 但仍然需要检查预算!
|
||||||
final_messages = messages
|
final_messages = messages
|
||||||
|
|
||||||
|
# 包含系统提示词进行计算
|
||||||
|
calc_messages = final_messages
|
||||||
|
if system_prompt_msg:
|
||||||
|
is_in_messages = any(m.get("role") == "system" for m in final_messages)
|
||||||
|
if not is_in_messages:
|
||||||
|
calc_messages = [system_prompt_msg] + final_messages
|
||||||
|
|
||||||
|
# 获取最大上下文限制
|
||||||
|
model = self._clean_model_id(body.get("model"))
|
||||||
|
thresholds = self._get_model_thresholds(model)
|
||||||
|
max_context_tokens = thresholds.get(
|
||||||
|
"max_context_tokens", self.valves.max_context_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
total_tokens = await asyncio.to_thread(
|
||||||
|
self._calculate_messages_tokens, calc_messages
|
||||||
|
)
|
||||||
|
|
||||||
|
if total_tokens > max_context_tokens:
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ⚠️ 原始消息 ({total_tokens} Tokens) 超过上限 ({max_context_tokens})。正在缩减历史记录...",
|
||||||
|
type="warning",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 动态从开头移除消息
|
||||||
|
# 我们将遵守 effective_keep_first 以保护系统提示词
|
||||||
|
|
||||||
|
start_trim_index = effective_keep_first
|
||||||
|
|
||||||
|
while (
|
||||||
|
total_tokens > max_context_tokens
|
||||||
|
and len(final_messages)
|
||||||
|
> start_trim_index + 1 # 保留 keep_first 之后至少 1 条消息
|
||||||
|
):
|
||||||
|
dropped = final_messages.pop(start_trim_index)
|
||||||
|
total_tokens -= self._count_tokens(str(dropped.get("content", "")))
|
||||||
|
|
||||||
|
await self._log(
|
||||||
|
f"[Inlet] ✂️ 消息已缩减。新总数: {total_tokens} Tokens",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 发送状态通知 (上下文使用量格式)
|
||||||
|
if __event_emitter__:
|
||||||
|
status_msg = (
|
||||||
|
f"上下文使用量 (预估): {total_tokens} / {max_context_tokens} Tokens"
|
||||||
|
)
|
||||||
|
if max_context_tokens > 0:
|
||||||
|
usage_ratio = total_tokens / max_context_tokens
|
||||||
|
status_msg += f" ({usage_ratio*100:.1f}%)"
|
||||||
|
if usage_ratio > 0.9:
|
||||||
|
status_msg += " | ⚠️ 高负载"
|
||||||
|
|
||||||
|
await __event_emitter__(
|
||||||
|
{
|
||||||
|
"type": "status",
|
||||||
|
"data": {
|
||||||
|
"description": status_msg,
|
||||||
|
"done": True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
body["messages"] = final_messages
|
body["messages"] = final_messages
|
||||||
|
|
||||||
await self._log(
|
await self._log(
|
||||||
@@ -882,11 +1360,23 @@ class Filter:
|
|||||||
return
|
return
|
||||||
|
|
||||||
middle_messages = messages[start_index:end_index]
|
middle_messages = messages[start_index:end_index]
|
||||||
|
tail_preview_msgs = messages[end_index:]
|
||||||
|
|
||||||
await self._log(
|
if self.valves.show_debug_log and __event_call__:
|
||||||
f"[🤖 异步摘要任务] 待处理中间消息: {len(middle_messages)} 条",
|
middle_preview = [
|
||||||
event_call=__event_call__,
|
f"{i + start_index}: [{m.get('role')}] {m.get('content', '')[:20]}..."
|
||||||
)
|
for i, m in enumerate(middle_messages[:3])
|
||||||
|
]
|
||||||
|
tail_preview = [
|
||||||
|
f"{i + end_index}: [{m.get('role')}] {m.get('content', '')[:20]}..."
|
||||||
|
for i, m in enumerate(tail_preview_msgs)
|
||||||
|
]
|
||||||
|
await self._log(
|
||||||
|
f"[🤖 异步摘要任务] 📊 边界检查:\n"
|
||||||
|
f" - 中间 (压缩): {len(middle_messages)} 条 (索引 {start_index}-{end_index-1}) -> 预览: {middle_preview}\n"
|
||||||
|
f" - 尾部 (保留): {len(tail_preview_msgs)} 条 (索引 {end_index}-End) -> 预览: {tail_preview}",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
# 3. 检查 Token 上限并截断 (Max Context Truncation)
|
# 3. 检查 Token 上限并截断 (Max Context Truncation)
|
||||||
# [优化] 使用摘要模型(如果有)的阈值来决定能处理多少中间消息
|
# [优化] 使用摘要模型(如果有)的阈值来决定能处理多少中间消息
|
||||||
@@ -1020,6 +1510,109 @@ class Filter:
|
|||||||
event_call=__event_call__,
|
event_call=__event_call__,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# --- Token 使用情况状态通知 ---
|
||||||
|
if self.valves.show_token_usage_status and __event_emitter__:
|
||||||
|
try:
|
||||||
|
# 1. 获取系统提示词 (DB 回退)
|
||||||
|
system_prompt_msg = None
|
||||||
|
model_id = body.get("model")
|
||||||
|
if model_id:
|
||||||
|
try:
|
||||||
|
model_obj = Models.get_model_by_id(model_id)
|
||||||
|
if model_obj and model_obj.params:
|
||||||
|
params = model_obj.params
|
||||||
|
if isinstance(params, str):
|
||||||
|
params = json.loads(params)
|
||||||
|
if isinstance(params, dict):
|
||||||
|
sys_content = params.get("system")
|
||||||
|
else:
|
||||||
|
sys_content = getattr(params, "system", None)
|
||||||
|
|
||||||
|
if sys_content:
|
||||||
|
system_prompt_msg = {
|
||||||
|
"role": "system",
|
||||||
|
"content": sys_content,
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
pass # 忽略 DB 错误,尽力而为
|
||||||
|
|
||||||
|
# 2. 计算 Effective Keep First
|
||||||
|
last_system_index = -1
|
||||||
|
for i, msg in enumerate(messages):
|
||||||
|
if msg.get("role") == "system":
|
||||||
|
last_system_index = i
|
||||||
|
effective_keep_first = max(
|
||||||
|
self.valves.keep_first, last_system_index + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. 构建下一个上下文 (Next Context)
|
||||||
|
# Head
|
||||||
|
head_msgs = (
|
||||||
|
messages[:effective_keep_first]
|
||||||
|
if effective_keep_first > 0
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
summary_content = (
|
||||||
|
f"【系统提示:以下是历史对话的摘要,仅供参考上下文,请勿对摘要内容进行回复,直接回答后续的最新问题】\n\n"
|
||||||
|
f"{new_summary}\n\n"
|
||||||
|
f"---\n"
|
||||||
|
f"以下是最近的对话:"
|
||||||
|
)
|
||||||
|
summary_msg = {"role": "assistant", "content": summary_content}
|
||||||
|
|
||||||
|
# Tail (使用 target_compressed_count,这是我们刚刚压缩到的位置)
|
||||||
|
# 注意:target_compressed_count 是要被摘要覆盖的消息数(不包括 keep_last)
|
||||||
|
# 所以 tail 从 max(target_compressed_count, effective_keep_first) 开始
|
||||||
|
start_index = max(target_compressed_count, effective_keep_first)
|
||||||
|
tail_msgs = messages[start_index:]
|
||||||
|
|
||||||
|
# 组装
|
||||||
|
next_context = head_msgs + [summary_msg] + tail_msgs
|
||||||
|
|
||||||
|
# 如果需要,注入系统提示词
|
||||||
|
if system_prompt_msg:
|
||||||
|
is_in_head = any(m.get("role") == "system" for m in head_msgs)
|
||||||
|
if not is_in_head:
|
||||||
|
next_context = [system_prompt_msg] + next_context
|
||||||
|
|
||||||
|
# 4. 计算 Token
|
||||||
|
token_count = self._calculate_messages_tokens(next_context)
|
||||||
|
|
||||||
|
# 5. 获取阈值并计算比例
|
||||||
|
model = self._clean_model_id(body.get("model"))
|
||||||
|
thresholds = self._get_model_thresholds(model)
|
||||||
|
max_context_tokens = thresholds.get(
|
||||||
|
"max_context_tokens", self.valves.max_context_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
# 6. 发送状态
|
||||||
|
status_msg = (
|
||||||
|
f"上下文摘要已更新: {token_count} / {max_context_tokens} Tokens"
|
||||||
|
)
|
||||||
|
if max_context_tokens > 0:
|
||||||
|
ratio = (token_count / max_context_tokens) * 100
|
||||||
|
status_msg += f" ({ratio:.1f}%)"
|
||||||
|
if ratio > 90.0:
|
||||||
|
status_msg += " | ⚠️ 高负载"
|
||||||
|
|
||||||
|
await __event_emitter__(
|
||||||
|
{
|
||||||
|
"type": "status",
|
||||||
|
"data": {
|
||||||
|
"description": status_msg,
|
||||||
|
"done": True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
await self._log(
|
||||||
|
f"[Status] 计算 Token 错误: {e}",
|
||||||
|
type="error",
|
||||||
|
event_call=__event_call__,
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
await self._log(
|
await self._log(
|
||||||
f"[🤖 异步摘要任务] ❌ 错误: {str(e)}",
|
f"[🤖 异步摘要任务] ❌ 错误: {str(e)}",
|
||||||
|
|||||||
@@ -1,61 +1,96 @@
|
|||||||
# Markdown Normalizer Filter
|
# Markdown Normalizer Filter
|
||||||
|
|
||||||
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui)
|
**Author:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **Version:** 1.2.4 | **Project:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **License:** MIT
|
||||||
**Version:** 1.1.2
|
|
||||||
|
|
||||||
A content normalizer filter for Open WebUI that fixes common Markdown formatting issues in LLM outputs. It ensures that code blocks, LaTeX formulas, Mermaid diagrams, and other Markdown elements are rendered correctly.
|
A content normalizer filter for Open WebUI that fixes common Markdown formatting issues in LLM outputs. It ensures that code blocks, LaTeX formulas, Mermaid diagrams, and other Markdown elements are rendered correctly.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* **Mermaid Syntax Fix**: Automatically fixes common Mermaid syntax errors, such as unquoted node labels (including multi-line labels and citations) and unclosed subgraphs. **New in v1.1.2**: Comprehensive protection for edge labels (text on connecting lines) across all link types (solid, dotted, thick).
|
* **Details Tag Normalization**: Ensures proper spacing for `<details>` tags (used for thought chains). Adds a blank line after `</details>` and ensures a newline after self-closing `<details />` tags to prevent rendering issues.
|
||||||
* **Frontend Console Debugging**: Supports printing structured debug logs directly to the browser console (F12) for easier troubleshooting.
|
* **Emphasis Spacing Fix**: Fixes extra spaces inside emphasis markers (e.g., `** text **` -> `**text**`) which can cause rendering failures. Includes safeguards to protect math expressions (e.g., `2 * 3 * 4`) and list variables.
|
||||||
* **Code Block Formatting**: Fixes broken code block prefixes, suffixes, and indentation.
|
* **Mermaid Syntax Fix**: Automatically fixes common Mermaid syntax errors, such as unquoted node labels (including multi-line labels and citations) and unclosed subgraphs. **New in v1.1.2**: Comprehensive protection for edge labels (text on connecting lines) across all link types (solid, dotted, thick).
|
||||||
* **LaTeX Normalization**: Standardizes LaTeX formula delimiters (`\[` -> `$$`, `\(` -> `$`).
|
* **Frontend Console Debugging**: Supports printing structured debug logs directly to the browser console (F12) for easier troubleshooting.
|
||||||
* **Thought Tag Normalization**: Unifies thought tags (`<think>`, `<thinking>` -> `<thought>`).
|
* **Code Block Formatting**: Fixes broken code block prefixes, suffixes, and indentation.
|
||||||
* **Escape Character Fix**: Cleans up excessive escape characters (`\\n`, `\\t`).
|
* **LaTeX Normalization**: Standardizes LaTeX formula delimiters (`\[` -> `$$`, `\(` -> `$`).
|
||||||
* **List Formatting**: Ensures proper newlines in list items.
|
* **Thought Tag Normalization**: Unifies thought tags (`<think>`, `<thinking>` -> `<thought>`).
|
||||||
* **Heading Fix**: Adds missing spaces in headings (`#Heading` -> `# Heading`).
|
* **Escape Character Fix**: Cleans up excessive escape characters (`\\n`, `\\t`).
|
||||||
* **Table Fix**: Adds missing closing pipes in tables.
|
* **List Formatting**: Ensures proper newlines in list items.
|
||||||
* **XML Cleanup**: Removes leftover XML artifacts.
|
* **Heading Fix**: Adds missing spaces in headings (`#Heading` -> `# Heading`).
|
||||||
|
* **Table Fix**: Adds missing closing pipes in tables.
|
||||||
|
* **XML Cleanup**: Removes leftover XML artifacts.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
1. Install the plugin in Open WebUI.
|
1. Install the plugin in Open WebUI.
|
||||||
2. Enable the filter globally or for specific models.
|
2. Enable the filter globally or for specific models.
|
||||||
3. Configure the enabled fixes in the **Valves** settings.
|
3. Configure the enabled fixes in the **Valves** settings.
|
||||||
4. (Optional) **Show Debug Log** is enabled by default in Valves. This prints structured logs to the browser console (F12).
|
4. (Optional) **Show Debug Log** is enabled by default in Valves. This prints structured logs to the browser console (F12).
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> As this is an initial version, some "negative fixes" might occur (e.g., breaking valid Markdown). If you encounter issues, please check the console logs, copy the "Original" vs "Normalized" content, and submit an issue.
|
> As this is an initial version, some "negative fixes" might occur (e.g., breaking valid Markdown). If you encounter issues, please check the console logs, copy the "Original" vs "Normalized" content, and submit an issue.
|
||||||
|
|
||||||
## Configuration (Valves)
|
## Configuration (Valves)
|
||||||
|
|
||||||
* `priority`: Filter priority (default: 50).
|
* `priority`: Filter priority (default: 50).
|
||||||
* `enable_escape_fix`: Fix excessive escape characters.
|
* `enable_escape_fix`: Fix excessive escape characters.
|
||||||
* `enable_thought_tag_fix`: Normalize thought tags.
|
* `enable_thought_tag_fix`: Normalize thought tags.
|
||||||
* `enable_code_block_fix`: Fix code block formatting.
|
* `enable_details_tag_fix`: Normalize details tags (default: True).
|
||||||
* `enable_latex_fix`: Normalize LaTeX formulas.
|
* `enable_code_block_fix`: Fix code block formatting.
|
||||||
* `enable_list_fix`: Fix list item newlines (Experimental).
|
* `enable_latex_fix`: Normalize LaTeX formulas.
|
||||||
* `enable_unclosed_block_fix`: Auto-close unclosed code blocks.
|
* `enable_list_fix`: Fix list item newlines (Experimental).
|
||||||
* `enable_fullwidth_symbol_fix`: Fix full-width symbols in code blocks.
|
* `enable_unclosed_block_fix`: Auto-close unclosed code blocks.
|
||||||
* `enable_mermaid_fix`: Fix Mermaid syntax errors.
|
* `enable_fullwidth_symbol_fix`: Fix full-width symbols in code blocks.
|
||||||
* `enable_heading_fix`: Fix missing space in headings.
|
* `enable_mermaid_fix`: Fix Mermaid syntax errors.
|
||||||
* `enable_table_fix`: Fix missing closing pipe in tables.
|
* `enable_heading_fix`: Fix missing space in headings.
|
||||||
* `enable_xml_tag_cleanup`: Cleanup leftover XML tags.
|
* `enable_table_fix`: Fix missing closing pipe in tables.
|
||||||
* `show_status`: Show status notification when fixes are applied.
|
* `enable_xml_tag_cleanup`: Cleanup leftover XML tags.
|
||||||
* `show_debug_log`: Print debug logs to browser console.
|
* `enable_emphasis_spacing_fix`: Fix extra spaces in emphasis (default: False).
|
||||||
|
* `show_status`: Show status notification when fixes are applied.
|
||||||
|
* `show_debug_log`: Print debug logs to browser console.
|
||||||
|
|
||||||
|
## Troubleshooting ❓
|
||||||
|
|
||||||
|
* **Submit an Issue**: If you encounter any problems, please submit an issue on GitHub: [Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
|
### v1.2.4
|
||||||
|
|
||||||
|
* **Documentation Updates**: Synchronized version numbers across all documentation and code files.
|
||||||
|
|
||||||
|
### v1.2.3
|
||||||
|
|
||||||
|
* **List Marker Protection Enhancement**: Fixed a bug where list markers (`*`) followed by plain text and emphasis were having their spaces incorrectly stripped (e.g., `* U16 forward` became `*U16 forward`).
|
||||||
|
* **Placeholder Support**: Confirmed that 4 or more underscores (e.g., `____`) are correctly treated as placeholders and not modified by the emphasis fix.
|
||||||
|
|
||||||
|
### v1.2.2
|
||||||
|
|
||||||
|
* **Code Block Indentation Fix**: Fixed an issue where code blocks nested inside lists were having their indentation incorrectly stripped. Now preserves proper indentation for nested code blocks.
|
||||||
|
* **Underscore Emphasis Support**: Extended emphasis spacing fix to support `__` (double underscore for bold) and `___` (triple underscore for bold+italic) syntax.
|
||||||
|
* **List Marker Protection**: Fixed a bug where list markers (`*`) followed by emphasis markers (`**`) were incorrectly merged (e.g., `* **Yes**` became `***Yes**`). Added safeguard to prevent this.
|
||||||
|
* **Test Suite**: Added comprehensive pytest test suite with 56 test cases covering all major features.
|
||||||
|
|
||||||
|
### v1.2.1
|
||||||
|
|
||||||
|
* **Emphasis Spacing Fix**: Added a new fix for extra spaces inside emphasis markers (e.g., `** text **` -> `**text**`).
|
||||||
|
* Uses a recursive approach to handle nested emphasis (e.g., `**bold _italic _**`).
|
||||||
|
* Includes safeguards to prevent modifying math expressions (e.g., `2 * 3 * 4`) or list variables.
|
||||||
|
* Controlled by the `enable_emphasis_spacing_fix` valve (default: True).
|
||||||
|
|
||||||
|
### v1.2.0
|
||||||
|
|
||||||
|
* **Details Tag Support**: Added normalization for `<details>` tags.
|
||||||
|
* Ensures a blank line is added after `</details>` closing tags to separate thought content from the main response.
|
||||||
|
* Ensures a newline is added after self-closing `<details ... />` tags to prevent them from interfering with subsequent Markdown headings (e.g., fixing `<details/>#Heading`).
|
||||||
|
* Includes safeguard to prevent modification of `<details>` tags inside code blocks.
|
||||||
|
|
||||||
### v1.1.2
|
### v1.1.2
|
||||||
* **Mermaid Edge Label Protection**: Implemented comprehensive protection for edge labels (text on connecting lines) to prevent them from being incorrectly modified. Now supports all Mermaid link types including solid (`--`), dotted (`-.`), and thick (`==`) lines with or without arrows.
|
|
||||||
* **Bug Fixes**: Fixed an issue where lines without arrows (e.g., `A -- text --- B`) were not correctly protected.
|
* **Mermaid Edge Label Protection**: Implemented comprehensive protection for edge labels (text on connecting lines) to prevent them from being incorrectly modified. Now supports all Mermaid link types including solid (`--`), dotted (`-.`), and thick (`==`) lines with or without arrows.
|
||||||
|
* **Bug Fixes**: Fixed an issue where lines without arrows (e.g., `A -- text --- B`) were not correctly protected.
|
||||||
|
|
||||||
### v1.1.0
|
### v1.1.0
|
||||||
* **Mermaid Fix Refinement**: Improved regex to handle nested parentheses in node labels (e.g., `ID("Label (text)")`) and avoided matching connection labels.
|
|
||||||
* **HTML Safeguard Optimization**: Refined `_contains_html` to allow common tags like `<br/>`, `<b>`, `<i>`, etc., ensuring Mermaid diagrams with these tags are still normalized.
|
|
||||||
* **Full-width Symbol Cleanup**: Fixed duplicate keys and incorrect quote mapping in `FULLWIDTH_MAP`.
|
|
||||||
* **Bug Fixes**: Fixed missing `Dict` import in Python files.
|
|
||||||
|
|
||||||
## License
|
* **Mermaid Fix Refinement**: Improved regex to handle nested parentheses in node labels (e.g., `ID("Label (text)")`) and avoided matching connection labels.
|
||||||
|
* **HTML Safeguard Optimization**: Refined `_contains_html` to allow common tags like `<br/>`, `<b>`, `<i>`, etc., ensuring Mermaid diagrams with these tags are still normalized.
|
||||||
MIT
|
* **Full-width Symbol Cleanup**: Fixed duplicate keys and incorrect quote mapping in `FULLWIDTH_MAP`.
|
||||||
|
* **Bug Fixes**: Fixed missing `Dict` import in Python files.
|
||||||
|
|||||||
@@ -1,61 +1,96 @@
|
|||||||
# Markdown 格式化过滤器 (Markdown Normalizer)
|
# Markdown 格式化过滤器 (Markdown Normalizer)
|
||||||
|
|
||||||
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui)
|
**作者:** [Fu-Jie](https://github.com/Fu-Jie/awesome-openwebui) | **版本:** 1.2.4 | **项目:** [Awesome OpenWebUI](https://github.com/Fu-Jie/awesome-openwebui) | **许可证:** MIT
|
||||||
**版本:** 1.1.2
|
|
||||||
|
|
||||||
这是一个用于 Open WebUI 的内容格式化过滤器,旨在修复 LLM 输出中常见的 Markdown 格式问题。它能确保代码块、LaTeX 公式、Mermaid 图表和其他 Markdown 元素被正确渲染。
|
这是一个用于 Open WebUI 的内容格式化过滤器,旨在修复 LLM 输出中常见的 Markdown 格式问题。它能确保代码块、LaTeX 公式、Mermaid 图表和其他 Markdown 元素被正确渲染。
|
||||||
|
|
||||||
## 功能特性
|
## 功能特性
|
||||||
|
|
||||||
* **Mermaid 语法修复**: 自动修复常见的 Mermaid 语法错误,如未加引号的节点标签(支持多行标签和引用标记)和未闭合的子图 (Subgraph)。**v1.1.2 新增**: 全面保护各种类型的连线标签(实线、虚线、粗线),防止被误修改。
|
* **Details 标签规范化**: 确保 `<details>` 标签(常用于思维链)有正确的间距。在 `</details>` 后添加空行,并在自闭合 `<details />` 标签后添加换行,防止渲染问题。
|
||||||
* **前端控制台调试**: 支持将结构化的调试日志直接打印到浏览器控制台 (F12),方便排查问题。
|
* **强调空格修复**: 修复强调标记内部的多余空格(例如 `** 文本 **` -> `**文本**`),这会导致 Markdown 渲染失败。包含保护机制,防止误修改数学表达式(如 `2 * 3 * 4`)或列表变量。
|
||||||
* **代码块格式化**: 修复破损的代码块前缀、后缀和缩进问题。
|
* **Mermaid 语法修复**: 自动修复常见的 Mermaid 语法错误,如未加引号的节点标签(支持多行标签和引用标记)和未闭合的子图 (Subgraph)。**v1.1.2 新增**: 全面保护各种类型的连线标签(实线、虚线、粗线),防止被误修改。
|
||||||
* **LaTeX 规范化**: 标准化 LaTeX 公式定界符 (`\[` -> `$$`, `\(` -> `$`)。
|
* **前端控制台调试**: 支持将结构化的调试日志直接打印到浏览器控制台 (F12),方便排查问题。
|
||||||
* **思维标签规范化**: 统一思维链标签 (`<think>`, `<thinking>` -> `<thought>`)。
|
* **代码块格式化**: 修复破损的代码块前缀、后缀和缩进问题。
|
||||||
* **转义字符修复**: 清理过度的转义字符 (`\\n`, `\\t`)。
|
* **LaTeX 规范化**: 标准化 LaTeX 公式定界符 (`\[` -> `$$`, `\(` -> `$`)。
|
||||||
* **列表格式化**: 确保列表项有正确的换行。
|
* **思维标签规范化**: 统一思维链标签 (`<think>`, `<thinking>` -> `<thought>`)。
|
||||||
* **标题修复**: 修复标题中缺失的空格 (`#标题` -> `# 标题`)。
|
* **转义字符修复**: 清理过度的转义字符 (`\\n`, `\\t`)。
|
||||||
* **表格修复**: 修复表格中缺失的闭合管道符。
|
* **列表格式化**: 确保列表项有正确的换行。
|
||||||
* **XML 清理**: 移除残留的 XML 标签。
|
* **标题修复**: 修复标题中缺失的空格 (`#标题` -> `# 标题`)。
|
||||||
|
* **表格修复**: 修复表格中缺失的闭合管道符。
|
||||||
|
* **XML 清理**: 移除残留的 XML 标签。
|
||||||
|
|
||||||
## 使用方法
|
## 使用方法
|
||||||
|
|
||||||
1. 在 Open WebUI 中安装此插件。
|
1. 在 Open WebUI 中安装此插件。
|
||||||
2. 全局启用或为特定模型启用此过滤器。
|
2. 全局启用或为特定模型启用此过滤器。
|
||||||
3. 在 **Valves** 设置中配置需要启用的修复项。
|
3. 在 **Valves** 设置中配置需要启用的修复项。
|
||||||
4. (可选) **显示调试日志 (Show Debug Log)** 在 Valves 中默认开启。这会将结构化的日志打印到浏览器控制台 (F12)。
|
4. (可选) **显示调试日志 (Show Debug Log)** 在 Valves 中默认开启。这会将结构化的日志打印到浏览器控制台 (F12)。
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> 由于这是初版,可能会出现“负向修复”的情况(例如破坏了原本正确的格式)。如果您遇到问题,请务必查看控制台日志,复制“原始 (Original)”与“规范化 (Normalized)”的内容对比,并提交 Issue 反馈。
|
> 由于这是初版,可能会出现“负向修复”的情况(例如破坏了原本正确的格式)。如果您遇到问题,请务必查看控制台日志,复制“原始 (Original)”与“规范化 (Normalized)”的内容对比,并提交 Issue 反馈。
|
||||||
|
|
||||||
## 配置项 (Valves)
|
## 配置项 (Valves)
|
||||||
|
|
||||||
* `priority`: 过滤器优先级 (默认: 50)。
|
* `priority`: 过滤器优先级 (默认: 50)。
|
||||||
* `enable_escape_fix`: 修复过度的转义字符。
|
* `enable_escape_fix`: 修复过度的转义字符。
|
||||||
* `enable_thought_tag_fix`: 规范化思维标签。
|
* `enable_thought_tag_fix`: 规范化思维标签。
|
||||||
* `enable_code_block_fix`: 修复代码块格式。
|
* `enable_details_tag_fix`: 规范化 Details 标签 (默认: True)。
|
||||||
* `enable_latex_fix`: 规范化 LaTeX 公式。
|
* `enable_code_block_fix`: 修复代码块格式。
|
||||||
* `enable_list_fix`: 修复列表项换行 (实验性)。
|
* `enable_latex_fix`: 规范化 LaTeX 公式。
|
||||||
* `enable_unclosed_block_fix`: 自动闭合未闭合的代码块。
|
* `enable_list_fix`: 修复列表项换行 (实验性)。
|
||||||
* `enable_fullwidth_symbol_fix`: 修复代码块中的全角符号。
|
* `enable_unclosed_block_fix`: 自动闭合未闭合的代码块。
|
||||||
* `enable_mermaid_fix`: 修复 Mermaid 语法错误。
|
* `enable_fullwidth_symbol_fix`: 修复代码块中的全角符号。
|
||||||
* `enable_heading_fix`: 修复标题中缺失的空格。
|
* `enable_mermaid_fix`: 修复 Mermaid 语法错误。
|
||||||
* `enable_table_fix`: 修复表格中缺失的闭合管道符。
|
* `enable_heading_fix`: 修复标题中缺失的空格。
|
||||||
* `enable_xml_tag_cleanup`: 清理残留的 XML 标签。
|
* `enable_table_fix`: 修复表格中缺失的闭合管道符。
|
||||||
* `show_status`: 应用修复时显示状态通知。
|
* `enable_xml_tag_cleanup`: 清理残留的 XML 标签。
|
||||||
* `show_debug_log`: 在浏览器控制台打印调试日志。
|
* `enable_emphasis_spacing_fix`: 修复强调语法中的多余空格 (默认: True)。
|
||||||
|
* `show_status`: 应用修复时显示状态通知。
|
||||||
|
* `show_debug_log`: 在浏览器控制台打印调试日志。
|
||||||
|
|
||||||
|
## 故障排除 (Troubleshooting) ❓
|
||||||
|
|
||||||
|
* **提交 Issue**: 如果遇到任何问题,请在 GitHub 上提交 Issue:[Awesome OpenWebUI Issues](https://github.com/Fu-Jie/awesome-openwebui/issues)
|
||||||
|
|
||||||
## 更新日志
|
## 更新日志
|
||||||
|
|
||||||
|
### v1.2.4
|
||||||
|
|
||||||
|
* **文档更新**: 同步了所有文档和代码文件的版本号。
|
||||||
|
|
||||||
|
### v1.2.3
|
||||||
|
|
||||||
|
* **列表标记保护增强**: 修复了列表标记 (`*`) 后跟普通文本和强调标记时,空格被错误剥离的问题(例如 `* U16 前锋` 变成 `*U16 前锋`)。
|
||||||
|
* **占位符支持**: 确认 4 个或更多下划线(如 `____`)会被正确视为占位符,不会被强调修复逻辑修改。
|
||||||
|
|
||||||
|
### v1.2.2
|
||||||
|
|
||||||
|
* **代码块缩进修复**: 修复了列表中嵌套代码块的缩进被错误剥离的问题。现在会正确保留嵌套代码块的缩进。
|
||||||
|
* **下划线强调语法支持**: 扩展强调空格修复以支持 `__` (双下划线加粗) 和 `___` (三下划线加粗斜体) 语法。
|
||||||
|
* **列表标记保护**: 修复了列表标记 (`*`) 后跟强调标记 (`**`) 被错误合并的 Bug(例如 `* **是**` 变成 `***是**`)。添加了保护逻辑防止此问题。
|
||||||
|
* **测试套件**: 新增完整的 pytest 测试套件,包含 56 个测试用例,覆盖所有主要功能。
|
||||||
|
|
||||||
|
### v1.2.1
|
||||||
|
|
||||||
|
* **强调空格修复**: 新增了对强调标记内部多余空格的修复(例如 `** 文本 **` -> `**文本**`)。
|
||||||
|
* 采用递归方法处理嵌套强调(例如 `**加粗 _斜体 _**`)。
|
||||||
|
* 包含保护机制,防止误修改数学表达式(如 `2 * 3 * 4`)或列表变量。
|
||||||
|
* 通过 `enable_emphasis_spacing_fix` 开关控制(默认:开启)。
|
||||||
|
|
||||||
|
### v1.2.0
|
||||||
|
|
||||||
|
* **Details 标签支持**: 新增了对 `<details>` 标签的规范化支持。
|
||||||
|
* 确保在 `</details>` 闭合标签后添加空行,将思维内容与正文分隔开。
|
||||||
|
* 确保在自闭合 `<details ... />` 标签后添加换行,防止其干扰后续的 Markdown 标题(例如修复 `<details/>#标题`)。
|
||||||
|
* 包含保护机制,防止修改代码块内部的 `<details>` 标签。
|
||||||
|
|
||||||
### v1.1.2
|
### v1.1.2
|
||||||
* **Mermaid 连线标签保护**: 实现了全面的连线标签保护机制,防止连接线上的文字被误修改。现在支持所有 Mermaid 连线类型,包括实线 (`--`)、虚线 (`-.`) 和粗线 (`==`),无论是否带有箭头。
|
|
||||||
* **Bug 修复**: 修复了无箭头连线(如 `A -- text --- B`)未被正确保护的问题。
|
* **Mermaid 连线标签保护**: 实现了全面的连线标签保护机制,防止连接线上的文字被误修改。现在支持所有 Mermaid 连线类型,包括实线 (`--`)、虚线 (`-.`) 和粗线 (`==`),无论是否带有箭头。
|
||||||
|
* **Bug 修复**: 修复了无箭头连线(如 `A -- text --- B`)未被正确保护的问题。
|
||||||
|
|
||||||
### v1.1.0
|
### v1.1.0
|
||||||
* **Mermaid 修复优化**: 改进了正则表达式以处理节点标签中的嵌套括号(如 `ID("标签 (文本)")`),并避免误匹配连接线上的文字。
|
|
||||||
* **HTML 保护机制优化**: 优化了 `_contains_html` 检测,允许 `<br/>`, `<b>`, `<i>` 等常见标签,确保包含这些标签的 Mermaid 图表能被正常规范化。
|
|
||||||
* **全角符号清理**: 修复了 `FULLWIDTH_MAP` 中的重复键名和错误的引号映射。
|
|
||||||
* **Bug 修复**: 修复了 Python 文件中缺失的 `Dict` 类型导入。
|
|
||||||
|
|
||||||
## 许可证
|
* **Mermaid 修复优化**: 改进了正则表达式以处理节点标签中的嵌套括号(如 `ID("标签 (文本)")`),并避免误匹配连接线上的文字。
|
||||||
|
* **HTML 保护机制优化**: 优化了 `_contains_html` 检测,允许 `<br/>`, `<b>`, `<i>` 等常见标签,确保包含这些标签的 Mermaid 图表能被正常规范化。
|
||||||
MIT
|
* **全角符号清理**: 修复了 `FULLWIDTH_MAP` 中的重复键名和错误的引号映射。
|
||||||
|
* **Bug 修复**: 修复了 Python 文件中缺失的 `Dict` 类型导入。
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ title: Markdown Normalizer
|
|||||||
author: Fu-Jie
|
author: Fu-Jie
|
||||||
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||||
funding_url: https://github.com/open-webui
|
funding_url: https://github.com/open-webui
|
||||||
version: 1.1.2
|
version: 1.2.4
|
||||||
openwebui_id: baaa8732-9348-40b7-8359-7e009660e23c
|
openwebui_id: baaa8732-9348-40b7-8359-7e009660e23c
|
||||||
description: A content normalizer filter that fixes common Markdown formatting issues in LLM outputs, such as broken code blocks, LaTeX formulas, and list formatting.
|
description: A content normalizer filter that fixes common Markdown formatting issues in LLM outputs, such as broken code blocks, LaTeX formulas, and list formatting.
|
||||||
"""
|
"""
|
||||||
@@ -29,6 +29,7 @@ class NormalizerConfig:
|
|||||||
False # Apply escape fix inside code blocks (default: False for safety)
|
False # Apply escape fix inside code blocks (default: False for safety)
|
||||||
)
|
)
|
||||||
enable_thought_tag_fix: bool = True # Normalize thought tags
|
enable_thought_tag_fix: bool = True # Normalize thought tags
|
||||||
|
enable_details_tag_fix: bool = True # Normalize <details> tags (like thought tags)
|
||||||
enable_code_block_fix: bool = True # Fix code block formatting
|
enable_code_block_fix: bool = True # Fix code block formatting
|
||||||
enable_latex_fix: bool = True # Fix LaTeX formula formatting
|
enable_latex_fix: bool = True # Fix LaTeX formula formatting
|
||||||
enable_list_fix: bool = (
|
enable_list_fix: bool = (
|
||||||
@@ -42,6 +43,7 @@ class NormalizerConfig:
|
|||||||
)
|
)
|
||||||
enable_table_fix: bool = True # Fix missing closing pipe in tables
|
enable_table_fix: bool = True # Fix missing closing pipe in tables
|
||||||
enable_xml_tag_cleanup: bool = True # Cleanup leftover XML tags
|
enable_xml_tag_cleanup: bool = True # Cleanup leftover XML tags
|
||||||
|
enable_emphasis_spacing_fix: bool = False # Fix spaces inside **emphasis**
|
||||||
|
|
||||||
# Custom cleaner functions (for advanced extension)
|
# Custom cleaner functions (for advanced extension)
|
||||||
custom_cleaners: List[Callable[[str], str]] = field(default_factory=list)
|
custom_cleaners: List[Callable[[str], str]] = field(default_factory=list)
|
||||||
@@ -52,8 +54,8 @@ class ContentNormalizer:
|
|||||||
|
|
||||||
# --- 1. Pre-compiled Regex Patterns (Performance Optimization) ---
|
# --- 1. Pre-compiled Regex Patterns (Performance Optimization) ---
|
||||||
_PATTERNS = {
|
_PATTERNS = {
|
||||||
# Code block prefix: if ``` is not at start of line or file
|
# Code block prefix: if ``` is not at start of line (ignoring whitespace)
|
||||||
"code_block_prefix": re.compile(r"(?<!^)(?<!\n)(```)", re.MULTILINE),
|
"code_block_prefix": re.compile(r"(\S[ \t]*)(```)"),
|
||||||
# Code block suffix: ```lang followed by non-whitespace (no newline)
|
# Code block suffix: ```lang followed by non-whitespace (no newline)
|
||||||
"code_block_suffix": re.compile(r"(```[\w\+\-\.]*)[ \t]+([^\n\r])"),
|
"code_block_suffix": re.compile(r"(```[\w\+\-\.]*)[ \t]+([^\n\r])"),
|
||||||
# Code block indent: whitespace at start of line + ```
|
# Code block indent: whitespace at start of line + ```
|
||||||
@@ -63,6 +65,12 @@ class ContentNormalizer:
|
|||||||
r"</(thought|think|thinking)>[ \t]*\n*", re.IGNORECASE
|
r"</(thought|think|thinking)>[ \t]*\n*", re.IGNORECASE
|
||||||
),
|
),
|
||||||
"thought_start": re.compile(r"<(thought|think|thinking)>", re.IGNORECASE),
|
"thought_start": re.compile(r"<(thought|think|thinking)>", re.IGNORECASE),
|
||||||
|
# Details tag: </details> followed by optional whitespace/newlines
|
||||||
|
"details_end": re.compile(r"</details>[ \t]*\n*", re.IGNORECASE),
|
||||||
|
# Self-closing details tag: <details ... /> followed by optional whitespace (but NOT already having newline)
|
||||||
|
"details_self_closing": re.compile(
|
||||||
|
r"(<details[^>]*/\s*>)(?!\n)", re.IGNORECASE
|
||||||
|
),
|
||||||
# LaTeX block: \[ ... \]
|
# LaTeX block: \[ ... \]
|
||||||
"latex_bracket_block": re.compile(r"\\\[(.+?)\\\]", re.DOTALL),
|
"latex_bracket_block": re.compile(r"\\\[(.+?)\\\]", re.DOTALL),
|
||||||
# LaTeX inline: \( ... \)
|
# LaTeX inline: \( ... \)
|
||||||
@@ -101,6 +109,14 @@ class ContentNormalizer:
|
|||||||
"heading_space": re.compile(r"^(#+)([^ \n#])", re.MULTILINE),
|
"heading_space": re.compile(r"^(#+)([^ \n#])", re.MULTILINE),
|
||||||
# Table: | col1 | col2 -> | col1 | col2 |
|
# Table: | col1 | col2 -> | col1 | col2 |
|
||||||
"table_pipe": re.compile(r"^(\|.*[^|\n])$", re.MULTILINE),
|
"table_pipe": re.compile(r"^(\|.*[^|\n])$", re.MULTILINE),
|
||||||
|
# Emphasis spacing: ** text ** -> **text**, __ text __ -> __text__
|
||||||
|
# Matches emphasis blocks within a single line. We use a recursive approach
|
||||||
|
# in _fix_emphasis_spacing to handle nesting and spaces correctly.
|
||||||
|
# NOTE: We use [^\n] instead of . to prevent cross-line matching.
|
||||||
|
# Supports: * (italic), ** (bold), *** (bold+italic), _ (italic), __ (bold), ___ (bold+italic)
|
||||||
|
"emphasis_spacing": re.compile(
|
||||||
|
r"(?<!\*|_)(\*{1,3}|_{1,3})(?P<inner>[^\n]*?)(\1)(?!\*|_)"
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, config: Optional[NormalizerConfig] = None):
|
def __init__(self, config: Optional[NormalizerConfig] = None):
|
||||||
@@ -130,7 +146,14 @@ class ContentNormalizer:
|
|||||||
if content != original:
|
if content != original:
|
||||||
self.applied_fixes.append("Normalize Thought Tags")
|
self.applied_fixes.append("Normalize Thought Tags")
|
||||||
|
|
||||||
# 3. Code block formatting fix
|
# 3. Details tag normalization (must be before heading fix)
|
||||||
|
if self.config.enable_details_tag_fix:
|
||||||
|
original = content
|
||||||
|
content = self._fix_details_tags(content)
|
||||||
|
if content != original:
|
||||||
|
self.applied_fixes.append("Normalize Details Tags")
|
||||||
|
|
||||||
|
# 4. Code block formatting fix
|
||||||
if self.config.enable_code_block_fix:
|
if self.config.enable_code_block_fix:
|
||||||
original = content
|
original = content
|
||||||
content = self._fix_code_blocks(content)
|
content = self._fix_code_blocks(content)
|
||||||
@@ -193,6 +216,13 @@ class ContentNormalizer:
|
|||||||
if content != original:
|
if content != original:
|
||||||
self.applied_fixes.append("Cleanup XML Tags")
|
self.applied_fixes.append("Cleanup XML Tags")
|
||||||
|
|
||||||
|
# 12. Emphasis spacing fix
|
||||||
|
if self.config.enable_emphasis_spacing_fix:
|
||||||
|
original = content
|
||||||
|
content = self._fix_emphasis_spacing(content)
|
||||||
|
if content != original:
|
||||||
|
self.applied_fixes.append("Fix Emphasis Spacing")
|
||||||
|
|
||||||
# 9. Custom cleaners
|
# 9. Custom cleaners
|
||||||
for cleaner in self.config.custom_cleaners:
|
for cleaner in self.config.custom_cleaners:
|
||||||
original = content
|
original = content
|
||||||
@@ -249,10 +279,26 @@ class ContentNormalizer:
|
|||||||
# 2. Standardize end tag and ensure newlines: </think> -> </thought>\n\n
|
# 2. Standardize end tag and ensure newlines: </think> -> </thought>\n\n
|
||||||
return self._PATTERNS["thought_end"].sub("</thought>\n\n", content)
|
return self._PATTERNS["thought_end"].sub("</thought>\n\n", content)
|
||||||
|
|
||||||
|
def _fix_details_tags(self, content: str) -> str:
|
||||||
|
"""Normalize <details> tags: ensure proper spacing after closing tags
|
||||||
|
|
||||||
|
Handles two cases:
|
||||||
|
1. </details> followed by content -> ensure double newline
|
||||||
|
2. <details .../> (self-closing) followed by content -> ensure newline
|
||||||
|
|
||||||
|
Note: Only applies outside of code blocks to avoid breaking code examples.
|
||||||
|
"""
|
||||||
|
parts = content.split("```")
|
||||||
|
for i in range(0, len(parts), 2): # Even indices are markdown text
|
||||||
|
# 1. Ensure double newline after </details>
|
||||||
|
parts[i] = self._PATTERNS["details_end"].sub("</details>\n\n", parts[i])
|
||||||
|
# 2. Ensure newline after self-closing <details ... />
|
||||||
|
parts[i] = self._PATTERNS["details_self_closing"].sub(r"\1\n", parts[i])
|
||||||
|
|
||||||
|
return "```".join(parts)
|
||||||
|
|
||||||
def _fix_code_blocks(self, content: str) -> str:
|
def _fix_code_blocks(self, content: str) -> str:
|
||||||
"""Fix code block formatting (prefixes, suffixes, indentation)"""
|
"""Fix code block formatting (prefixes, suffixes, indentation)"""
|
||||||
# Remove indentation before code blocks
|
|
||||||
content = self._PATTERNS["code_block_indent"].sub(r"\1", content)
|
|
||||||
# Ensure newline before ```
|
# Ensure newline before ```
|
||||||
content = self._PATTERNS["code_block_prefix"].sub(r"\n\1", content)
|
content = self._PATTERNS["code_block_prefix"].sub(r"\n\1", content)
|
||||||
# Ensure newline after ```lang
|
# Ensure newline after ```lang
|
||||||
@@ -411,6 +457,61 @@ class ContentNormalizer:
|
|||||||
"""Remove leftover XML tags"""
|
"""Remove leftover XML tags"""
|
||||||
return self._PATTERNS["xml_artifacts"].sub("", content)
|
return self._PATTERNS["xml_artifacts"].sub("", content)
|
||||||
|
|
||||||
|
def _fix_emphasis_spacing(self, content: str) -> str:
|
||||||
|
"""Fix spaces inside **emphasis** or _emphasis_
|
||||||
|
Example: ** text ** -> **text**, **text ** -> **text**, ** text** -> **text**
|
||||||
|
"""
|
||||||
|
|
||||||
|
def replacer(match):
|
||||||
|
symbol = match.group(1)
|
||||||
|
inner = match.group("inner")
|
||||||
|
|
||||||
|
# Recursive step: Fix emphasis spacing INSIDE the current block first
|
||||||
|
# This ensures that ** _ italic _ ** becomes ** _italic_ ** before we strip outer spaces.
|
||||||
|
inner = self._PATTERNS["emphasis_spacing"].sub(replacer, inner)
|
||||||
|
|
||||||
|
# If no leading/trailing whitespace, nothing to fix at this level
|
||||||
|
stripped_inner = inner.strip()
|
||||||
|
if stripped_inner == inner:
|
||||||
|
return f"{symbol}{inner}{symbol}"
|
||||||
|
|
||||||
|
# Safeguard: If inner content is just whitespace, don't touch it
|
||||||
|
if not stripped_inner:
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
# Safeguard: If it looks like a math expression or list of variables (e.g. " * 3 * " or " _ b _ ")
|
||||||
|
# If the symbol is surrounded by spaces in the original text, it's likely an operator.
|
||||||
|
if inner.startswith(" ") and inner.endswith(" "):
|
||||||
|
# If it's single '*' or '_', and both sides have spaces, it's almost certainly an operator.
|
||||||
|
if symbol in ["*", "_"]:
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
# Safeguard: List marker protection
|
||||||
|
# If symbol is single '*' and inner content starts with whitespace followed by emphasis markers,
|
||||||
|
# this is likely a list item like "* **bold**" - don't merge them.
|
||||||
|
# Pattern: "* **text**" should NOT become "***text**"
|
||||||
|
if symbol == "*" and inner.lstrip().startswith(("*", "_")):
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
# Extended list marker protection:
|
||||||
|
# If symbol is single '*' and inner starts with multiple spaces (list indentation pattern),
|
||||||
|
# this is likely a list item like "* text" - don't strip the spaces.
|
||||||
|
# Pattern: "* U16 forward **Kuang**" should NOT become "*U16 forward **Kuang**"
|
||||||
|
if symbol == "*" and inner.startswith(" "):
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
return f"{symbol}{stripped_inner}{symbol}"
|
||||||
|
|
||||||
|
parts = content.split("```")
|
||||||
|
for i in range(0, len(parts), 2): # Even indices are markdown text
|
||||||
|
# We use a while loop to handle overlapping or multiple occurrences at the top level
|
||||||
|
while True:
|
||||||
|
new_part = self._PATTERNS["emphasis_spacing"].sub(replacer, parts[i])
|
||||||
|
if new_part == parts[i]:
|
||||||
|
break
|
||||||
|
parts[i] = new_part
|
||||||
|
return "```".join(parts)
|
||||||
|
|
||||||
|
|
||||||
class Filter:
|
class Filter:
|
||||||
class Valves(BaseModel):
|
class Valves(BaseModel):
|
||||||
@@ -428,6 +529,10 @@ class Filter:
|
|||||||
enable_thought_tag_fix: bool = Field(
|
enable_thought_tag_fix: bool = Field(
|
||||||
default=True, description="Normalize </thought> tags"
|
default=True, description="Normalize </thought> tags"
|
||||||
)
|
)
|
||||||
|
enable_details_tag_fix: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Normalize <details> tags (add blank line after </details> and handle self-closing tags)",
|
||||||
|
)
|
||||||
enable_code_block_fix: bool = Field(
|
enable_code_block_fix: bool = Field(
|
||||||
default=True,
|
default=True,
|
||||||
description="Fix code block formatting (indentation, newlines)",
|
description="Fix code block formatting (indentation, newlines)",
|
||||||
@@ -458,6 +563,10 @@ class Filter:
|
|||||||
enable_xml_tag_cleanup: bool = Field(
|
enable_xml_tag_cleanup: bool = Field(
|
||||||
default=True, description="Cleanup leftover XML tags"
|
default=True, description="Cleanup leftover XML tags"
|
||||||
)
|
)
|
||||||
|
enable_emphasis_spacing_fix: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="Fix spaces inside **emphasis** (e.g. ** text ** -> **text**)",
|
||||||
|
)
|
||||||
show_status: bool = Field(
|
show_status: bool = Field(
|
||||||
default=True, description="Show status notification when fixes are applied"
|
default=True, description="Show status notification when fixes are applied"
|
||||||
)
|
)
|
||||||
@@ -586,11 +695,21 @@ class Filter:
|
|||||||
if self._contains_html(content):
|
if self._contains_html(content):
|
||||||
return body
|
return body
|
||||||
|
|
||||||
|
# Skip if content contains tool output markers (native function calling)
|
||||||
|
# Pattern: """...""" or tool_call_id or <details type="tool_calls"...>
|
||||||
|
if (
|
||||||
|
'"""' in content
|
||||||
|
or "tool_call_id" in content
|
||||||
|
or '<details type="tool_calls"' in content
|
||||||
|
):
|
||||||
|
return body
|
||||||
|
|
||||||
# Configure normalizer based on valves
|
# Configure normalizer based on valves
|
||||||
config = NormalizerConfig(
|
config = NormalizerConfig(
|
||||||
enable_escape_fix=self.valves.enable_escape_fix,
|
enable_escape_fix=self.valves.enable_escape_fix,
|
||||||
enable_escape_fix_in_code_blocks=self.valves.enable_escape_fix_in_code_blocks,
|
enable_escape_fix_in_code_blocks=self.valves.enable_escape_fix_in_code_blocks,
|
||||||
enable_thought_tag_fix=self.valves.enable_thought_tag_fix,
|
enable_thought_tag_fix=self.valves.enable_thought_tag_fix,
|
||||||
|
enable_details_tag_fix=self.valves.enable_details_tag_fix,
|
||||||
enable_code_block_fix=self.valves.enable_code_block_fix,
|
enable_code_block_fix=self.valves.enable_code_block_fix,
|
||||||
enable_latex_fix=self.valves.enable_latex_fix,
|
enable_latex_fix=self.valves.enable_latex_fix,
|
||||||
enable_list_fix=self.valves.enable_list_fix,
|
enable_list_fix=self.valves.enable_list_fix,
|
||||||
@@ -600,6 +719,7 @@ class Filter:
|
|||||||
enable_heading_fix=self.valves.enable_heading_fix,
|
enable_heading_fix=self.valves.enable_heading_fix,
|
||||||
enable_table_fix=self.valves.enable_table_fix,
|
enable_table_fix=self.valves.enable_table_fix,
|
||||||
enable_xml_tag_cleanup=self.valves.enable_xml_tag_cleanup,
|
enable_xml_tag_cleanup=self.valves.enable_xml_tag_cleanup,
|
||||||
|
enable_emphasis_spacing_fix=self.valves.enable_emphasis_spacing_fix,
|
||||||
)
|
)
|
||||||
|
|
||||||
normalizer = ContentNormalizer(config)
|
normalizer = ContentNormalizer(config)
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ title: Markdown 格式修复器 (Markdown Normalizer)
|
|||||||
author: Fu-Jie
|
author: Fu-Jie
|
||||||
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
||||||
funding_url: https://github.com/open-webui
|
funding_url: https://github.com/open-webui
|
||||||
version: 1.1.2
|
version: 1.2.4
|
||||||
description: 内容规范化过滤器,修复 LLM 输出中常见的 Markdown 格式问题,如损坏的代码块、LaTeX 公式、Mermaid 图表和列表格式。
|
description: 内容规范化过滤器,修复 LLM 输出中常见的 Markdown 格式问题,如损坏的代码块、LaTeX 公式、Mermaid 图表和列表格式。
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -24,7 +24,11 @@ class NormalizerConfig:
|
|||||||
"""配置类,用于启用/禁用特定的规范化规则"""
|
"""配置类,用于启用/禁用特定的规范化规则"""
|
||||||
|
|
||||||
enable_escape_fix: bool = True # 修复过度的转义字符
|
enable_escape_fix: bool = True # 修复过度的转义字符
|
||||||
|
enable_escape_fix_in_code_blocks: bool = (
|
||||||
|
False # 在代码块内部应用转义修复 (默认:关闭,以确保安全)
|
||||||
|
)
|
||||||
enable_thought_tag_fix: bool = True # 规范化思维链标签
|
enable_thought_tag_fix: bool = True # 规范化思维链标签
|
||||||
|
enable_details_tag_fix: bool = True # 规范化 <details> 标签(类似思维链标签)
|
||||||
enable_code_block_fix: bool = True # 修复代码块格式
|
enable_code_block_fix: bool = True # 修复代码块格式
|
||||||
enable_latex_fix: bool = True # 修复 LaTeX 公式格式
|
enable_latex_fix: bool = True # 修复 LaTeX 公式格式
|
||||||
enable_list_fix: bool = False # 修复列表项换行 (默认关闭,因为可能过于激进)
|
enable_list_fix: bool = False # 修复列表项换行 (默认关闭,因为可能过于激进)
|
||||||
@@ -34,6 +38,7 @@ class NormalizerConfig:
|
|||||||
enable_heading_fix: bool = True # 修复标题中缺失的空格 (#Header -> # Header)
|
enable_heading_fix: bool = True # 修复标题中缺失的空格 (#Header -> # Header)
|
||||||
enable_table_fix: bool = True # 修复表格中缺失的闭合管道符
|
enable_table_fix: bool = True # 修复表格中缺失的闭合管道符
|
||||||
enable_xml_tag_cleanup: bool = True # 清理残留的 XML 标签
|
enable_xml_tag_cleanup: bool = True # 清理残留的 XML 标签
|
||||||
|
enable_emphasis_spacing_fix: bool = False # 修复 **强调内容** 中的多余空格
|
||||||
|
|
||||||
# 自定义清理函数 (用于高级扩展)
|
# 自定义清理函数 (用于高级扩展)
|
||||||
custom_cleaners: List[Callable[[str], str]] = field(default_factory=list)
|
custom_cleaners: List[Callable[[str], str]] = field(default_factory=list)
|
||||||
@@ -44,8 +49,8 @@ class ContentNormalizer:
|
|||||||
|
|
||||||
# --- 1. Pre-compiled Regex Patterns (Performance Optimization) ---
|
# --- 1. Pre-compiled Regex Patterns (Performance Optimization) ---
|
||||||
_PATTERNS = {
|
_PATTERNS = {
|
||||||
# Code block prefix: if ``` is not at start of line or file
|
# Code block prefix: if ``` is not at start of line (ignoring whitespace)
|
||||||
"code_block_prefix": re.compile(r"(?<!^)(?<!\n)(```)", re.MULTILINE),
|
"code_block_prefix": re.compile(r"(\S[ \t]*)(```)"),
|
||||||
# Code block suffix: ```lang followed by non-whitespace (no newline)
|
# Code block suffix: ```lang followed by non-whitespace (no newline)
|
||||||
"code_block_suffix": re.compile(r"(```[\w\+\-\.]*)[ \t]+([^\n\r])"),
|
"code_block_suffix": re.compile(r"(```[\w\+\-\.]*)[ \t]+([^\n\r])"),
|
||||||
# Code block indent: whitespace at start of line + ```
|
# Code block indent: whitespace at start of line + ```
|
||||||
@@ -55,6 +60,12 @@ class ContentNormalizer:
|
|||||||
r"</(thought|think|thinking)>[ \t]*\n*", re.IGNORECASE
|
r"</(thought|think|thinking)>[ \t]*\n*", re.IGNORECASE
|
||||||
),
|
),
|
||||||
"thought_start": re.compile(r"<(thought|think|thinking)>", re.IGNORECASE),
|
"thought_start": re.compile(r"<(thought|think|thinking)>", re.IGNORECASE),
|
||||||
|
# Details tag: </details> followed by optional whitespace/newlines
|
||||||
|
"details_end": re.compile(r"</details>[ \t]*\n*", re.IGNORECASE),
|
||||||
|
# Self-closing details tag: <details ... /> followed by optional whitespace (but NOT already having newline)
|
||||||
|
"details_self_closing": re.compile(
|
||||||
|
r"(<details[^>]*/\s*>)(?!\n)", re.IGNORECASE
|
||||||
|
),
|
||||||
# LaTeX block: \[ ... \]
|
# LaTeX block: \[ ... \]
|
||||||
"latex_bracket_block": re.compile(r"\\\[(.+?)\\\]", re.DOTALL),
|
"latex_bracket_block": re.compile(r"\\\[(.+?)\\\]", re.DOTALL),
|
||||||
# LaTeX inline: \( ... \)
|
# LaTeX inline: \( ... \)
|
||||||
@@ -93,6 +104,14 @@ class ContentNormalizer:
|
|||||||
"heading_space": re.compile(r"^(#+)([^ \n#])", re.MULTILINE),
|
"heading_space": re.compile(r"^(#+)([^ \n#])", re.MULTILINE),
|
||||||
# Table: | col1 | col2 -> | col1 | col2 |
|
# Table: | col1 | col2 -> | col1 | col2 |
|
||||||
"table_pipe": re.compile(r"^(\|.*[^|\n])$", re.MULTILINE),
|
"table_pipe": re.compile(r"^(\|.*[^|\n])$", re.MULTILINE),
|
||||||
|
# Emphasis spacing: ** text ** -> **text**, __ text __ -> __text__
|
||||||
|
# Matches emphasis blocks within a single line. We use a recursive approach
|
||||||
|
# in _fix_emphasis_spacing to handle nesting and spaces correctly.
|
||||||
|
# NOTE: We use [^\n] instead of . to prevent cross-line matching.
|
||||||
|
# Supports: * (italic), ** (bold), *** (bold+italic), _ (italic), __ (bold), ___ (bold+italic)
|
||||||
|
"emphasis_spacing": re.compile(
|
||||||
|
r"(?<!\*|_)(\*{1,3}|_{1,3})(?P<inner>[^\n]*?)(\1)(?!\*|_)"
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, config: Optional[NormalizerConfig] = None):
|
def __init__(self, config: Optional[NormalizerConfig] = None):
|
||||||
@@ -122,7 +141,14 @@ class ContentNormalizer:
|
|||||||
if content != original:
|
if content != original:
|
||||||
self.applied_fixes.append("Normalize Thought Tags")
|
self.applied_fixes.append("Normalize Thought Tags")
|
||||||
|
|
||||||
# 3. Code block formatting fix
|
# 3. Details tag normalization (must be before heading fix)
|
||||||
|
if self.config.enable_details_tag_fix:
|
||||||
|
original = content
|
||||||
|
content = self._fix_details_tags(content)
|
||||||
|
if content != original:
|
||||||
|
self.applied_fixes.append("Normalize Details Tags")
|
||||||
|
|
||||||
|
# 4. Code block formatting fix
|
||||||
if self.config.enable_code_block_fix:
|
if self.config.enable_code_block_fix:
|
||||||
original = content
|
original = content
|
||||||
content = self._fix_code_blocks(content)
|
content = self._fix_code_blocks(content)
|
||||||
@@ -185,6 +211,13 @@ class ContentNormalizer:
|
|||||||
if content != original:
|
if content != original:
|
||||||
self.applied_fixes.append("Cleanup XML Tags")
|
self.applied_fixes.append("Cleanup XML Tags")
|
||||||
|
|
||||||
|
# 12. Emphasis spacing fix
|
||||||
|
if self.config.enable_emphasis_spacing_fix:
|
||||||
|
original = content
|
||||||
|
content = self._fix_emphasis_spacing(content)
|
||||||
|
if content != original:
|
||||||
|
self.applied_fixes.append("Fix Emphasis Spacing")
|
||||||
|
|
||||||
# 9. Custom cleaners
|
# 9. Custom cleaners
|
||||||
for cleaner in self.config.custom_cleaners:
|
for cleaner in self.config.custom_cleaners:
|
||||||
original = content
|
original = content
|
||||||
@@ -209,12 +242,27 @@ class ContentNormalizer:
|
|||||||
return content
|
return content
|
||||||
|
|
||||||
def _fix_escape_characters(self, content: str) -> str:
|
def _fix_escape_characters(self, content: str) -> str:
|
||||||
"""Fix excessive escape characters"""
|
"""修复过度的转义字符
|
||||||
content = content.replace("\\r\\n", "\n")
|
|
||||||
content = content.replace("\\n", "\n")
|
如果 enable_escape_fix_in_code_blocks 为 False (默认),此方法将仅修复代码块外部的转义字符,
|
||||||
content = content.replace("\\t", "\t")
|
以避免破坏有效的代码示例 (例如,带有 \\n 的 JSON 字符串、正则表达式模式等)。
|
||||||
content = content.replace("\\\\", "\\")
|
"""
|
||||||
return content
|
if self.config.enable_escape_fix_in_code_blocks:
|
||||||
|
# 全局应用 (原始行为)
|
||||||
|
content = content.replace("\\r\\n", "\n")
|
||||||
|
content = content.replace("\\n", "\n")
|
||||||
|
content = content.replace("\\t", "\t")
|
||||||
|
content = content.replace("\\\\", "\\")
|
||||||
|
return content
|
||||||
|
else:
|
||||||
|
# 仅在代码块外部应用 (安全模式)
|
||||||
|
parts = content.split("```")
|
||||||
|
for i in range(0, len(parts), 2): # 偶数索引是 Markdown 文本 (非代码)
|
||||||
|
parts[i] = parts[i].replace("\\r\\n", "\n")
|
||||||
|
parts[i] = parts[i].replace("\\n", "\n")
|
||||||
|
parts[i] = parts[i].replace("\\t", "\t")
|
||||||
|
parts[i] = parts[i].replace("\\\\", "\\")
|
||||||
|
return "```".join(parts)
|
||||||
|
|
||||||
def _fix_thought_tags(self, content: str) -> str:
|
def _fix_thought_tags(self, content: str) -> str:
|
||||||
"""Normalize thought tags: unify naming and fix spacing"""
|
"""Normalize thought tags: unify naming and fix spacing"""
|
||||||
@@ -223,10 +271,26 @@ class ContentNormalizer:
|
|||||||
# 2. Standardize end tag and ensure newlines: </think> -> </thought>\n\n
|
# 2. Standardize end tag and ensure newlines: </think> -> </thought>\n\n
|
||||||
return self._PATTERNS["thought_end"].sub("</thought>\n\n", content)
|
return self._PATTERNS["thought_end"].sub("</thought>\n\n", content)
|
||||||
|
|
||||||
|
def _fix_details_tags(self, content: str) -> str:
|
||||||
|
"""规范化 <details> 标签:确保闭合标签后的正确间距
|
||||||
|
|
||||||
|
处理两种情况:
|
||||||
|
1. </details> 后跟内容 -> 确保有双换行
|
||||||
|
2. <details .../> (自闭合) 后跟内容 -> 确保有换行
|
||||||
|
|
||||||
|
注意:仅在代码块外部应用,以避免破坏代码示例。
|
||||||
|
"""
|
||||||
|
parts = content.split("```")
|
||||||
|
for i in range(0, len(parts), 2): # 偶数索引是 Markdown 文本
|
||||||
|
# 1. 确保 </details> 后有双换行
|
||||||
|
parts[i] = self._PATTERNS["details_end"].sub("</details>\n\n", parts[i])
|
||||||
|
# 2. 确保自闭合 <details ... /> 后有换行
|
||||||
|
parts[i] = self._PATTERNS["details_self_closing"].sub(r"\1\n", parts[i])
|
||||||
|
|
||||||
|
return "```".join(parts)
|
||||||
|
|
||||||
def _fix_code_blocks(self, content: str) -> str:
|
def _fix_code_blocks(self, content: str) -> str:
|
||||||
"""Fix code block formatting (prefixes, suffixes, indentation)"""
|
"""Fix code block formatting (prefixes, suffixes, indentation)"""
|
||||||
# Remove indentation before code blocks
|
|
||||||
content = self._PATTERNS["code_block_indent"].sub(r"\1", content)
|
|
||||||
# Ensure newline before ```
|
# Ensure newline before ```
|
||||||
content = self._PATTERNS["code_block_prefix"].sub(r"\n\1", content)
|
content = self._PATTERNS["code_block_prefix"].sub(r"\n\1", content)
|
||||||
# Ensure newline after ```lang
|
# Ensure newline after ```lang
|
||||||
@@ -390,6 +454,61 @@ class ContentNormalizer:
|
|||||||
"""Remove leftover XML tags"""
|
"""Remove leftover XML tags"""
|
||||||
return self._PATTERNS["xml_artifacts"].sub("", content)
|
return self._PATTERNS["xml_artifacts"].sub("", content)
|
||||||
|
|
||||||
|
def _fix_emphasis_spacing(self, content: str) -> str:
|
||||||
|
"""Fix spaces inside **emphasis** or _emphasis_
|
||||||
|
Example: ** text ** -> **text**, **text ** -> **text**, ** text** -> **text**
|
||||||
|
"""
|
||||||
|
|
||||||
|
def replacer(match):
|
||||||
|
symbol = match.group(1)
|
||||||
|
inner = match.group("inner")
|
||||||
|
|
||||||
|
# Recursive step: Fix emphasis spacing INSIDE the current block first
|
||||||
|
# This ensures that ** _ italic _ ** becomes ** _italic_ ** before we strip outer spaces.
|
||||||
|
inner = self._PATTERNS["emphasis_spacing"].sub(replacer, inner)
|
||||||
|
|
||||||
|
# If no leading/trailing whitespace, nothing to fix at this level
|
||||||
|
stripped_inner = inner.strip()
|
||||||
|
if stripped_inner == inner:
|
||||||
|
return f"{symbol}{inner}{symbol}"
|
||||||
|
|
||||||
|
# Safeguard: If inner content is just whitespace, don't touch it
|
||||||
|
if not stripped_inner:
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
# Safeguard: If it looks like a math expression or list of variables (e.g. " * 3 * " or " _ b _ ")
|
||||||
|
# If the symbol is surrounded by spaces in the original text, it's likely an operator.
|
||||||
|
if inner.startswith(" ") and inner.endswith(" "):
|
||||||
|
# If it's single '*' or '_', and both sides have spaces, it's almost certainly an operator.
|
||||||
|
if symbol in ["*", "_"]:
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
# Safeguard: List marker protection
|
||||||
|
# If symbol is single '*' and inner content starts with whitespace followed by emphasis markers,
|
||||||
|
# this is likely a list item like "* **bold**" - don't merge them.
|
||||||
|
# Pattern: "* **text**" should NOT become "***text**"
|
||||||
|
if symbol == "*" and inner.lstrip().startswith(("*", "_")):
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
# Extended list marker protection:
|
||||||
|
# If symbol is single '*' and inner starts with multiple spaces (list indentation pattern),
|
||||||
|
# this is likely a list item like "* text" - don't strip the spaces.
|
||||||
|
# Pattern: "* U16 forward **Kuang**" should NOT become "*U16 forward **Kuang**"
|
||||||
|
if symbol == "*" and inner.startswith(" "):
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
return f"{symbol}{stripped_inner}{symbol}"
|
||||||
|
|
||||||
|
parts = content.split("```")
|
||||||
|
for i in range(0, len(parts), 2): # Even indices are markdown text
|
||||||
|
# We use a while loop to handle overlapping or multiple occurrences at the top level
|
||||||
|
while True:
|
||||||
|
new_part = self._PATTERNS["emphasis_spacing"].sub(replacer, parts[i])
|
||||||
|
if new_part == parts[i]:
|
||||||
|
break
|
||||||
|
parts[i] = new_part
|
||||||
|
return "```".join(parts)
|
||||||
|
|
||||||
|
|
||||||
class Filter:
|
class Filter:
|
||||||
class Valves(BaseModel):
|
class Valves(BaseModel):
|
||||||
@@ -400,9 +519,17 @@ class Filter:
|
|||||||
enable_escape_fix: bool = Field(
|
enable_escape_fix: bool = Field(
|
||||||
default=True, description="修复过度的转义字符 (\\n, \\t 等)"
|
default=True, description="修复过度的转义字符 (\\n, \\t 等)"
|
||||||
)
|
)
|
||||||
|
enable_escape_fix_in_code_blocks: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="在代码块内部应用转义修复 (⚠️ 警告:可能会破坏有效的代码,如 JSON 字符串或正则模式。默认:关闭,以确保安全)",
|
||||||
|
)
|
||||||
enable_thought_tag_fix: bool = Field(
|
enable_thought_tag_fix: bool = Field(
|
||||||
default=True, description="规范化思维链标签 (<think> -> <thought>)"
|
default=True, description="规范化思维链标签 (<think> -> <thought>)"
|
||||||
)
|
)
|
||||||
|
enable_details_tag_fix: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="规范化 <details> 标签 (在 </details> 后添加空行,处理自闭合标签)",
|
||||||
|
)
|
||||||
enable_code_block_fix: bool = Field(
|
enable_code_block_fix: bool = Field(
|
||||||
default=True,
|
default=True,
|
||||||
description="修复代码块格式 (缩进、换行)",
|
description="修复代码块格式 (缩进、换行)",
|
||||||
@@ -433,6 +560,10 @@ class Filter:
|
|||||||
enable_xml_tag_cleanup: bool = Field(
|
enable_xml_tag_cleanup: bool = Field(
|
||||||
default=True, description="清理残留的 XML 标签"
|
default=True, description="清理残留的 XML 标签"
|
||||||
)
|
)
|
||||||
|
enable_emphasis_spacing_fix: bool = Field(
|
||||||
|
default=False,
|
||||||
|
description="修复强调语法中的多余空格 (例如 ** 文本 ** -> **文本**)",
|
||||||
|
)
|
||||||
show_status: bool = Field(default=True, description="应用修复时显示状态通知")
|
show_status: bool = Field(default=True, description="应用修复时显示状态通知")
|
||||||
show_debug_log: bool = Field(
|
show_debug_log: bool = Field(
|
||||||
default=True, description="在浏览器控制台打印调试日志 (F12)"
|
default=True, description="在浏览器控制台打印调试日志 (F12)"
|
||||||
@@ -494,6 +625,7 @@ class Filter:
|
|||||||
fix_map = {
|
fix_map = {
|
||||||
"Fix Escape Chars": "转义字符",
|
"Fix Escape Chars": "转义字符",
|
||||||
"Normalize Thought Tags": "思维标签",
|
"Normalize Thought Tags": "思维标签",
|
||||||
|
"Normalize Details Tags": "Details标签",
|
||||||
"Fix Code Blocks": "代码块",
|
"Fix Code Blocks": "代码块",
|
||||||
"Normalize LaTeX": "LaTeX公式",
|
"Normalize LaTeX": "LaTeX公式",
|
||||||
"Fix List Format": "列表格式",
|
"Fix List Format": "列表格式",
|
||||||
@@ -503,6 +635,7 @@ class Filter:
|
|||||||
"Fix Headings": "标题格式",
|
"Fix Headings": "标题格式",
|
||||||
"Fix Tables": "表格格式",
|
"Fix Tables": "表格格式",
|
||||||
"Cleanup XML Tags": "XML清理",
|
"Cleanup XML Tags": "XML清理",
|
||||||
|
"Fix Emphasis Spacing": "强调空格",
|
||||||
"Custom Cleaner": "自定义清理",
|
"Custom Cleaner": "自定义清理",
|
||||||
}
|
}
|
||||||
translated_fixes = [fix_map.get(fix, fix) for fix in applied_fixes]
|
translated_fixes = [fix_map.get(fix, fix) for fix in applied_fixes]
|
||||||
@@ -571,14 +704,25 @@ class Filter:
|
|||||||
content = last.get("content", "") or ""
|
content = last.get("content", "") or ""
|
||||||
|
|
||||||
if last.get("role") == "assistant" and isinstance(content, str):
|
if last.get("role") == "assistant" and isinstance(content, str):
|
||||||
# Skip if content looks like HTML to avoid breaking it
|
# 如果内容看起来像 HTML,则跳过以避免破坏它
|
||||||
if self._contains_html(content):
|
if self._contains_html(content):
|
||||||
return body
|
return body
|
||||||
|
|
||||||
# Configure normalizer based on valves
|
# 如果内容包含工具输出标记 (原生函数调用),则跳过
|
||||||
|
# 模式:"""...""" 或 tool_call_id 或 <details type="tool_calls"...>
|
||||||
|
if (
|
||||||
|
'"""' in content
|
||||||
|
or "tool_call_id" in content
|
||||||
|
or '<details type="tool_calls"' in content
|
||||||
|
):
|
||||||
|
return body
|
||||||
|
|
||||||
|
# 根据 Valves 配置 Normalizer
|
||||||
config = NormalizerConfig(
|
config = NormalizerConfig(
|
||||||
enable_escape_fix=self.valves.enable_escape_fix,
|
enable_escape_fix=self.valves.enable_escape_fix,
|
||||||
|
enable_escape_fix_in_code_blocks=self.valves.enable_escape_fix_in_code_blocks,
|
||||||
enable_thought_tag_fix=self.valves.enable_thought_tag_fix,
|
enable_thought_tag_fix=self.valves.enable_thought_tag_fix,
|
||||||
|
enable_details_tag_fix=self.valves.enable_details_tag_fix,
|
||||||
enable_code_block_fix=self.valves.enable_code_block_fix,
|
enable_code_block_fix=self.valves.enable_code_block_fix,
|
||||||
enable_latex_fix=self.valves.enable_latex_fix,
|
enable_latex_fix=self.valves.enable_latex_fix,
|
||||||
enable_list_fix=self.valves.enable_list_fix,
|
enable_list_fix=self.valves.enable_list_fix,
|
||||||
@@ -588,6 +732,7 @@ class Filter:
|
|||||||
enable_heading_fix=self.valves.enable_heading_fix,
|
enable_heading_fix=self.valves.enable_heading_fix,
|
||||||
enable_table_fix=self.valves.enable_table_fix,
|
enable_table_fix=self.valves.enable_table_fix,
|
||||||
enable_xml_tag_cleanup=self.valves.enable_xml_tag_cleanup,
|
enable_xml_tag_cleanup=self.valves.enable_xml_tag_cleanup,
|
||||||
|
enable_emphasis_spacing_fix=self.valves.enable_emphasis_spacing_fix,
|
||||||
)
|
)
|
||||||
|
|
||||||
normalizer = ContentNormalizer(config)
|
normalizer = ContentNormalizer(config)
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ class TestMarkdownNormalizer(unittest.TestCase):
|
|||||||
self.config = NormalizerConfig(
|
self.config = NormalizerConfig(
|
||||||
enable_escape_fix=True,
|
enable_escape_fix=True,
|
||||||
enable_thought_tag_fix=True,
|
enable_thought_tag_fix=True,
|
||||||
|
enable_details_tag_fix=True,
|
||||||
enable_code_block_fix=True,
|
enable_code_block_fix=True,
|
||||||
enable_latex_fix=True,
|
enable_latex_fix=True,
|
||||||
enable_list_fix=True,
|
enable_list_fix=True,
|
||||||
@@ -21,6 +22,7 @@ class TestMarkdownNormalizer(unittest.TestCase):
|
|||||||
enable_fullwidth_symbol_fix=True,
|
enable_fullwidth_symbol_fix=True,
|
||||||
enable_mermaid_fix=True,
|
enable_mermaid_fix=True,
|
||||||
enable_xml_tag_cleanup=True,
|
enable_xml_tag_cleanup=True,
|
||||||
|
enable_heading_fix=True,
|
||||||
)
|
)
|
||||||
self.normalizer = ContentNormalizer(self.config)
|
self.normalizer = ContentNormalizer(self.config)
|
||||||
|
|
||||||
@@ -42,6 +44,32 @@ class TestMarkdownNormalizer(unittest.TestCase):
|
|||||||
self.normalizer.normalize(input_text_deepseek), expected_deepseek
|
self.normalizer.normalize(input_text_deepseek), expected_deepseek
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_details_tag_fix(self):
|
||||||
|
# Case 1: </details> followed by content without blank line
|
||||||
|
input_text = (
|
||||||
|
"<details><summary>Thought</summary>\n> Thinking\n</details>Next paragraph"
|
||||||
|
)
|
||||||
|
expected = "<details><summary>Thought</summary>\n> Thinking\n</details>\n\nNext paragraph"
|
||||||
|
self.assertEqual(self.normalizer.normalize(input_text), expected)
|
||||||
|
|
||||||
|
# Case 2: Self-closing <details /> followed by heading
|
||||||
|
input_text_self_closing = '<details id="__DETAIL_0__"/>#Heading'
|
||||||
|
result = self.normalizer.normalize(input_text_self_closing)
|
||||||
|
self.assertIn("# Heading", result) # Heading should be fixed
|
||||||
|
self.assertIn(
|
||||||
|
'<details id="__DETAIL_0__"/>\n', result
|
||||||
|
) # Should have newline after
|
||||||
|
|
||||||
|
# Case 3: </details> already has proper spacing (should not add extra)
|
||||||
|
input_already_good = "</details>\n\nNext"
|
||||||
|
self.assertEqual(
|
||||||
|
self.normalizer.normalize(input_already_good), input_already_good
|
||||||
|
)
|
||||||
|
|
||||||
|
# Case 4: Details tag inside code block (should NOT be modified)
|
||||||
|
input_code_block = "```html\n<details>\n</details>\n```"
|
||||||
|
self.assertEqual(self.normalizer.normalize(input_code_block), input_code_block)
|
||||||
|
|
||||||
def test_code_block_fix(self):
|
def test_code_block_fix(self):
|
||||||
# Case 1: Indentation
|
# Case 1: Indentation
|
||||||
self.assertEqual(self.normalizer._fix_code_blocks(" ```python"), "```python")
|
self.assertEqual(self.normalizer._fix_code_blocks(" ```python"), "```python")
|
||||||
|
|||||||
37
plugins/filters/markdown_normalizer/test_side_effects.py
Normal file
37
plugins/filters/markdown_normalizer/test_side_effects.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
from markdown_normalizer import ContentNormalizer, NormalizerConfig
|
||||||
|
|
||||||
|
|
||||||
|
def test_side_effects():
|
||||||
|
normalizer = ContentNormalizer(NormalizerConfig(enable_details_tag_fix=True))
|
||||||
|
|
||||||
|
# Scenario 1: HTML code block
|
||||||
|
code_block = """```html
|
||||||
|
<details>
|
||||||
|
<summary>Click</summary>
|
||||||
|
Content
|
||||||
|
</details>
|
||||||
|
```"""
|
||||||
|
|
||||||
|
# Scenario 2: Python string
|
||||||
|
python_code = """```python
|
||||||
|
html = "</details>"
|
||||||
|
print(html)
|
||||||
|
```"""
|
||||||
|
|
||||||
|
print("--- Scenario 1: HTML Code Block ---")
|
||||||
|
res1 = normalizer.normalize(code_block)
|
||||||
|
print(repr(res1))
|
||||||
|
if "</details>\n\n" in res1 and "```" in res1:
|
||||||
|
print("WARNING: Modified inside HTML code block")
|
||||||
|
|
||||||
|
print("\n--- Scenario 2: Python String ---")
|
||||||
|
res2 = normalizer.normalize(python_code)
|
||||||
|
print(repr(res2))
|
||||||
|
if 'html = "</details>\n\n"' in res2:
|
||||||
|
print("CRITICAL: Broke Python string literal")
|
||||||
|
else:
|
||||||
|
print("OK")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_side_effects()
|
||||||
1
plugins/filters/markdown_normalizer/tests/__init__.py
Normal file
1
plugins/filters/markdown_normalizer/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Markdown Normalizer Test Suite
|
||||||
75
plugins/filters/markdown_normalizer/tests/conftest.py
Normal file
75
plugins/filters/markdown_normalizer/tests/conftest.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
"""
|
||||||
|
Shared fixtures for Markdown Normalizer tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Add the parent directory to sys.path for imports
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from markdown_normalizer import ContentNormalizer, NormalizerConfig
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def normalizer():
|
||||||
|
"""Default normalizer with all fixes enabled."""
|
||||||
|
config = NormalizerConfig(
|
||||||
|
enable_escape_fix=True,
|
||||||
|
enable_thought_tag_fix=True,
|
||||||
|
enable_details_tag_fix=True,
|
||||||
|
enable_code_block_fix=True,
|
||||||
|
enable_latex_fix=True,
|
||||||
|
enable_list_fix=False, # Experimental, keep off by default
|
||||||
|
enable_unclosed_block_fix=True,
|
||||||
|
enable_fullwidth_symbol_fix=False,
|
||||||
|
enable_mermaid_fix=True,
|
||||||
|
enable_heading_fix=True,
|
||||||
|
enable_table_fix=True,
|
||||||
|
enable_xml_tag_cleanup=True,
|
||||||
|
enable_emphasis_spacing_fix=True,
|
||||||
|
)
|
||||||
|
return ContentNormalizer(config)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def emphasis_only_normalizer():
|
||||||
|
"""Normalizer with only emphasis spacing fix enabled."""
|
||||||
|
config = NormalizerConfig(
|
||||||
|
enable_escape_fix=False,
|
||||||
|
enable_thought_tag_fix=False,
|
||||||
|
enable_details_tag_fix=False,
|
||||||
|
enable_code_block_fix=False,
|
||||||
|
enable_latex_fix=False,
|
||||||
|
enable_list_fix=False,
|
||||||
|
enable_unclosed_block_fix=False,
|
||||||
|
enable_fullwidth_symbol_fix=False,
|
||||||
|
enable_mermaid_fix=False,
|
||||||
|
enable_heading_fix=False,
|
||||||
|
enable_table_fix=False,
|
||||||
|
enable_xml_tag_cleanup=False,
|
||||||
|
enable_emphasis_spacing_fix=True,
|
||||||
|
)
|
||||||
|
return ContentNormalizer(config)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mermaid_only_normalizer():
|
||||||
|
"""Normalizer with only Mermaid fix enabled."""
|
||||||
|
config = NormalizerConfig(
|
||||||
|
enable_escape_fix=False,
|
||||||
|
enable_thought_tag_fix=False,
|
||||||
|
enable_details_tag_fix=False,
|
||||||
|
enable_code_block_fix=False,
|
||||||
|
enable_latex_fix=False,
|
||||||
|
enable_list_fix=False,
|
||||||
|
enable_unclosed_block_fix=False,
|
||||||
|
enable_fullwidth_symbol_fix=False,
|
||||||
|
enable_mermaid_fix=True,
|
||||||
|
enable_heading_fix=False,
|
||||||
|
enable_table_fix=False,
|
||||||
|
enable_xml_tag_cleanup=False,
|
||||||
|
enable_emphasis_spacing_fix=False,
|
||||||
|
)
|
||||||
|
return ContentNormalizer(config)
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
"""
|
||||||
|
Tests for code block formatting fixes.
|
||||||
|
Covers: prefix, suffix, indentation preservation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
class TestCodeBlockFix:
|
||||||
|
"""Test code block formatting normalization."""
|
||||||
|
|
||||||
|
def test_code_block_indentation_preserved(self, normalizer):
|
||||||
|
"""Indented code blocks (e.g., in lists) should preserve indentation."""
|
||||||
|
input_str = """
|
||||||
|
* List item 1
|
||||||
|
```python
|
||||||
|
def foo():
|
||||||
|
print("bar")
|
||||||
|
```
|
||||||
|
* List item 2
|
||||||
|
"""
|
||||||
|
# Indentation should be preserved
|
||||||
|
assert " ```python" in normalizer.normalize(input_str)
|
||||||
|
|
||||||
|
def test_inline_code_block_prefix(self, normalizer):
|
||||||
|
"""Code block that follows text on same line should be modified."""
|
||||||
|
input_str = "text```python\ncode\n```"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
# Just verify the code block markers are present
|
||||||
|
assert "```" in result
|
||||||
|
|
||||||
|
def test_code_block_suffix_fix(self, normalizer):
|
||||||
|
"""Code block with content on same line after lang should be fixed."""
|
||||||
|
input_str = "```python code\nmore code\n```"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
# Content should be on new line
|
||||||
|
assert "```python\n" in result or "```python " in result
|
||||||
|
|
||||||
|
|
||||||
|
class TestUnclosedCodeBlock:
|
||||||
|
"""Test auto-closing of unclosed code blocks."""
|
||||||
|
|
||||||
|
def test_unclosed_code_block_is_closed(self, normalizer):
|
||||||
|
"""Unclosed code blocks should be automatically closed."""
|
||||||
|
input_str = "```python\ncode here"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
# Should have closing ```
|
||||||
|
assert result.endswith("```") or result.count("```") == 2
|
||||||
|
|
||||||
|
def test_balanced_code_blocks_unchanged(self, normalizer):
|
||||||
|
"""Already balanced code blocks should not get extra closing."""
|
||||||
|
input_str = "```python\ncode\n```"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
assert result.count("```") == 2
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
"""
|
||||||
|
Tests for details tag normalization.
|
||||||
|
Covers: </details> spacing, self-closing tags.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
class TestDetailsTagFix:
|
||||||
|
"""Test details tag normalization."""
|
||||||
|
|
||||||
|
def test_details_end_gets_newlines(self, normalizer):
|
||||||
|
"""</details> should be followed by double newline."""
|
||||||
|
input_str = "</details>Content after"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
assert "</details>\n\n" in result
|
||||||
|
|
||||||
|
def test_self_closing_details_gets_newline(self, normalizer):
|
||||||
|
"""Self-closing <details .../> should get newline after."""
|
||||||
|
input_str = "<details open />## Heading"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
# Should have newline between tag and heading
|
||||||
|
assert "/>\n" in result or "/> \n" in result
|
||||||
|
|
||||||
|
def test_details_in_code_block_unchanged(self, normalizer):
|
||||||
|
"""Details tags inside code blocks should not be modified."""
|
||||||
|
input_str = "```html\n<details>content</details>more\n```"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
# Content inside code block should be unchanged
|
||||||
|
assert "</details>more" in result
|
||||||
|
|
||||||
|
|
||||||
|
class TestThoughtTagFix:
|
||||||
|
"""Test thought tag normalization."""
|
||||||
|
|
||||||
|
def test_think_tag_normalized(self, normalizer):
|
||||||
|
"""<think> should be normalized to <thought>."""
|
||||||
|
input_str = "<think>content</think>"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
assert "<thought>" in result
|
||||||
|
assert "</thought>" in result
|
||||||
|
|
||||||
|
def test_thinking_tag_normalized(self, normalizer):
|
||||||
|
"""<thinking> should be normalized to <thought>."""
|
||||||
|
input_str = "<thinking>content</thinking>"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
assert "<thought>" in result
|
||||||
|
assert "</thought>" in result
|
||||||
@@ -0,0 +1,138 @@
|
|||||||
|
"""
|
||||||
|
Tests for emphasis spacing fix.
|
||||||
|
Covers: *, **, ***, _, __, ___ with spaces inside.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
class TestEmphasisSpacingFix:
|
||||||
|
"""Test emphasis spacing normalization."""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"input_str,expected",
|
||||||
|
[
|
||||||
|
# Double asterisk (bold)
|
||||||
|
("** bold **", "**bold**"),
|
||||||
|
("** bold text **", "**bold text**"),
|
||||||
|
("**text **", "**text**"),
|
||||||
|
("** text**", "**text**"),
|
||||||
|
# Triple asterisk (bold+italic)
|
||||||
|
("*** bold italic ***", "***bold italic***"),
|
||||||
|
# Double underscore (bold)
|
||||||
|
("__ bold __", "__bold__"),
|
||||||
|
("__ bold text __", "__bold text__"),
|
||||||
|
("__text __", "__text__"),
|
||||||
|
("__ text__", "__text__"),
|
||||||
|
# Triple underscore (bold+italic)
|
||||||
|
("___ bold italic ___", "___bold italic___"),
|
||||||
|
# Mixed markers
|
||||||
|
("** bold ** and __ also __", "**bold** and __also__"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_emphasis_with_spaces_fixed(
|
||||||
|
self, emphasis_only_normalizer, input_str, expected
|
||||||
|
):
|
||||||
|
"""Test that emphasis with spaces is correctly fixed."""
|
||||||
|
assert emphasis_only_normalizer.normalize(input_str) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"input_str",
|
||||||
|
[
|
||||||
|
# Single * and _ with spaces on both sides - treated as operator (safeguard)
|
||||||
|
"* italic *",
|
||||||
|
"_ italic _",
|
||||||
|
# Already correct emphasis
|
||||||
|
"**bold**",
|
||||||
|
"__bold__",
|
||||||
|
"*italic*",
|
||||||
|
"_italic_",
|
||||||
|
"***bold italic***",
|
||||||
|
"___bold italic___",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_safeguard_and_correct_emphasis_unchanged(
|
||||||
|
self, emphasis_only_normalizer, input_str
|
||||||
|
):
|
||||||
|
"""Test that safeguard cases and already correct emphasis are not modified."""
|
||||||
|
assert emphasis_only_normalizer.normalize(input_str) == input_str
|
||||||
|
|
||||||
|
|
||||||
|
class TestEmphasisSideEffects:
|
||||||
|
"""Test that emphasis fix does NOT affect unrelated content."""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"input_str,description",
|
||||||
|
[
|
||||||
|
# URLs with underscores
|
||||||
|
("https://example.com/path_with_underscore", "URL"),
|
||||||
|
("Visit https://api.example.com/get_user_info for info", "URL in text"),
|
||||||
|
# Variable names (snake_case)
|
||||||
|
("The `my_variable_name` is important", "Variable in backticks"),
|
||||||
|
("Use `get_user_data()` function", "Function name"),
|
||||||
|
# File names
|
||||||
|
("Edit the `config_file_name.py` file", "File name"),
|
||||||
|
("See `my_script__v2.py` for details", "Double underscore in filename"),
|
||||||
|
# Math-like subscripts
|
||||||
|
("The variable a_1 and b_2 are defined", "Math subscripts"),
|
||||||
|
# Single underscores not matching emphasis pattern
|
||||||
|
("word_with_underscore", "Underscore in word"),
|
||||||
|
("a_b_c_d", "Multiple underscores"),
|
||||||
|
# Horizontal rules
|
||||||
|
("---", "HR with dashes"),
|
||||||
|
("***", "HR with asterisks"),
|
||||||
|
("___", "HR with underscores"),
|
||||||
|
# List items
|
||||||
|
("- item_one\n- item_two", "List items"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_no_side_effects(self, emphasis_only_normalizer, input_str, description):
|
||||||
|
"""Test that various content types are NOT modified by emphasis fix."""
|
||||||
|
assert (
|
||||||
|
emphasis_only_normalizer.normalize(input_str) == input_str
|
||||||
|
), f"Failed for: {description}"
|
||||||
|
|
||||||
|
def test_list_marker_not_merged_with_emphasis(self, emphasis_only_normalizer):
|
||||||
|
"""Test that list markers (*) are not merged with emphasis (**).
|
||||||
|
|
||||||
|
Regression test for: "* **Yes**" should NOT become "***Yes**"
|
||||||
|
"""
|
||||||
|
input_str = """1. **Start**: The user opens the login page.
|
||||||
|
* **Yes**: Login successful.
|
||||||
|
* **No**: Show error message."""
|
||||||
|
result = emphasis_only_normalizer.normalize(input_str)
|
||||||
|
assert (
|
||||||
|
"* **Yes**" in result
|
||||||
|
), "List marker was incorrectly merged with emphasis"
|
||||||
|
assert (
|
||||||
|
"* **No**" in result
|
||||||
|
), "List marker was incorrectly merged with emphasis"
|
||||||
|
assert "***Yes**" not in result, "BUG: List marker merged with emphasis"
|
||||||
|
assert "***No**" not in result, "BUG: List marker merged with emphasis"
|
||||||
|
|
||||||
|
def test_list_marker_with_plain_text_then_emphasis(self, emphasis_only_normalizer):
|
||||||
|
"""Test that list items with plain text before emphasis are preserved.
|
||||||
|
|
||||||
|
Regression test for: "* U16 forward **Kuang**" should NOT become "*U16 forward **Kuang**"
|
||||||
|
"""
|
||||||
|
input_str = "* U16 China forward **Kuang Zhaolei**"
|
||||||
|
result = emphasis_only_normalizer.normalize(input_str)
|
||||||
|
assert "* U16" in result, "List marker spaces were incorrectly stripped"
|
||||||
|
assert (
|
||||||
|
"*U16" not in result or "* U16" in result
|
||||||
|
), "BUG: List marker spaces stripped"
|
||||||
|
|
||||||
|
|
||||||
|
class TestEmphasisInCodeBlocks:
|
||||||
|
"""Test that emphasis inside code blocks is NOT modified."""
|
||||||
|
|
||||||
|
def test_emphasis_in_code_block_unchanged(self, emphasis_only_normalizer):
|
||||||
|
"""Code blocks should be completely skipped."""
|
||||||
|
input_str = "```python\nmy_var = get_data__from_api()\n```"
|
||||||
|
assert emphasis_only_normalizer.normalize(input_str) == input_str
|
||||||
|
|
||||||
|
def test_mixed_emphasis_and_code(self, emphasis_only_normalizer):
|
||||||
|
"""Text outside code blocks should be fixed, inside should not."""
|
||||||
|
input_str = "** bold ** text\n```python\n** not bold **\n```"
|
||||||
|
expected = "**bold** text\n```python\n** not bold **\n```"
|
||||||
|
assert emphasis_only_normalizer.normalize(input_str) == expected
|
||||||
@@ -0,0 +1,51 @@
|
|||||||
|
"""
|
||||||
|
Tests for heading fix.
|
||||||
|
Covers: Missing space after # in headings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
class TestHeadingFix:
|
||||||
|
"""Test heading space normalization."""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"input_str,expected",
|
||||||
|
[
|
||||||
|
("#Heading", "# Heading"),
|
||||||
|
("##Heading", "## Heading"),
|
||||||
|
("###Heading", "### Heading"),
|
||||||
|
("#中文标题", "# 中文标题"),
|
||||||
|
("#123", "# 123"), # Numbers after # also get space
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_missing_space_added(self, normalizer, input_str, expected):
|
||||||
|
"""Headings missing space after # should be fixed."""
|
||||||
|
assert normalizer.normalize(input_str) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"input_str",
|
||||||
|
[
|
||||||
|
"# Heading",
|
||||||
|
"## Already Correct",
|
||||||
|
"###", # Just hashes
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_correct_headings_unchanged(self, normalizer, input_str):
|
||||||
|
"""Already correct headings should not be modified."""
|
||||||
|
assert normalizer.normalize(input_str) == input_str
|
||||||
|
|
||||||
|
|
||||||
|
class TestTableFix:
|
||||||
|
"""Test table pipe normalization."""
|
||||||
|
|
||||||
|
def test_missing_closing_pipe_added(self, normalizer):
|
||||||
|
"""Tables missing closing | should have it added."""
|
||||||
|
input_str = "| col1 | col2"
|
||||||
|
result = normalizer.normalize(input_str)
|
||||||
|
assert result.endswith("|") or "col2 |" in result
|
||||||
|
|
||||||
|
def test_already_closed_table_unchanged(self, normalizer):
|
||||||
|
"""Tables with closing | should not be modified."""
|
||||||
|
input_str = "| col1 | col2 |"
|
||||||
|
assert normalizer.normalize(input_str) == input_str
|
||||||
@@ -1,221 +0,0 @@
|
|||||||
"""
|
|
||||||
title: Multi-Model Context Merger
|
|
||||||
author: Fu-Jie
|
|
||||||
author_url: https://github.com/Fu-Jie/awesome-openwebui
|
|
||||||
funding_url: https://github.com/Fu-Jie/awesome-openwebui
|
|
||||||
version: 0.1.0
|
|
||||||
description: Automatically merges context from multiple model responses in the previous turn.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
from typing import List, Optional, Dict
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from fastapi import Request
|
|
||||||
|
|
||||||
from open_webui.models.chats import Chats
|
|
||||||
|
|
||||||
|
|
||||||
class Filter:
|
|
||||||
class Valves(BaseModel):
|
|
||||||
# 注入的系统消息的前缀
|
|
||||||
CONTEXT_PREFIX: str = Field(
|
|
||||||
default="下面是多个匿名AI模型给出的回答,使用<response>标签包裹:\n\n",
|
|
||||||
description="Prefix for the injected system message containing the raw merged context.",
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.valves = self.Valves()
|
|
||||||
self.toggle = True
|
|
||||||
self.type = "filter"
|
|
||||||
self.name = "合并回答"
|
|
||||||
self.description = "在用户提问时,自动注入之前多个模型回答的上下文。"
|
|
||||||
|
|
||||||
async def inlet(
|
|
||||||
self,
|
|
||||||
body: Dict,
|
|
||||||
__user__: Dict,
|
|
||||||
__metadata__: Dict,
|
|
||||||
__request__: Request,
|
|
||||||
__event_emitter__,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
此方法是过滤器的入口点。它会检查上一回合是否为多模型响应,
|
|
||||||
如果是,则将这些响应直接格式化,并将格式化后的上下文作为系统消息注入到当前请求中。
|
|
||||||
"""
|
|
||||||
print(f"*********** Filter '{self.name}' triggered ***********")
|
|
||||||
chat_id = __metadata__.get("chat_id")
|
|
||||||
if not chat_id:
|
|
||||||
print(
|
|
||||||
f"DEBUG: Filter '{self.name}' skipped: chat_id not found in metadata."
|
|
||||||
)
|
|
||||||
return body
|
|
||||||
|
|
||||||
print(f"DEBUG: Chat ID found: {chat_id}")
|
|
||||||
|
|
||||||
# 1. 从数据库获取完整的聊天历史
|
|
||||||
try:
|
|
||||||
chat = await asyncio.to_thread(Chats.get_chat_by_id, chat_id)
|
|
||||||
|
|
||||||
if (
|
|
||||||
not chat
|
|
||||||
or not hasattr(chat, "chat")
|
|
||||||
or not chat.chat.get("history")
|
|
||||||
or not chat.chat.get("history").get("messages")
|
|
||||||
):
|
|
||||||
print(
|
|
||||||
f"DEBUG: Filter '{self.name}' skipped: Chat history not found or empty for chat_id: {chat_id}"
|
|
||||||
)
|
|
||||||
return body
|
|
||||||
|
|
||||||
messages_map = chat.chat["history"]["messages"]
|
|
||||||
print(
|
|
||||||
f"DEBUG: Successfully loaded {len(messages_map)} messages from history."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Count the number of user messages in the history
|
|
||||||
user_message_count = sum(
|
|
||||||
1 for msg in messages_map.values() if msg.get("role") == "user"
|
|
||||||
)
|
|
||||||
|
|
||||||
# If there are less than 2 user messages, there's no previous turn to merge.
|
|
||||||
if user_message_count < 2:
|
|
||||||
print(
|
|
||||||
f"DEBUG: Filter '{self.name}' skipped: Not enough user messages in history to have a previous turn (found {user_message_count}, required >= 2)."
|
|
||||||
)
|
|
||||||
return body
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(
|
|
||||||
f"ERROR: Filter '{self.name}' failed to get chat history from DB: {e}"
|
|
||||||
)
|
|
||||||
return body
|
|
||||||
|
|
||||||
# This filter rebuilds the entire chat history to consolidate all multi-response turns.
|
|
||||||
|
|
||||||
# 1. Get all messages from history and sort by timestamp
|
|
||||||
all_messages = list(messages_map.values())
|
|
||||||
all_messages.sort(key=lambda x: x.get("timestamp", 0))
|
|
||||||
|
|
||||||
# 2. Pre-group all assistant messages by their parentId for efficient lookup
|
|
||||||
assistant_groups = {}
|
|
||||||
for msg in all_messages:
|
|
||||||
if msg.get("role") == "assistant":
|
|
||||||
parent_id = msg.get("parentId")
|
|
||||||
if parent_id:
|
|
||||||
if parent_id not in assistant_groups:
|
|
||||||
assistant_groups[parent_id] = []
|
|
||||||
assistant_groups[parent_id].append(msg)
|
|
||||||
|
|
||||||
final_messages = []
|
|
||||||
processed_parent_ids = set()
|
|
||||||
|
|
||||||
# 3. Iterate through the sorted historical messages to build the final, clean list
|
|
||||||
for msg in all_messages:
|
|
||||||
msg_id = msg.get("id")
|
|
||||||
role = msg.get("role")
|
|
||||||
parent_id = msg.get("parentId")
|
|
||||||
|
|
||||||
if role == "user":
|
|
||||||
# Add user messages directly
|
|
||||||
final_messages.append(msg)
|
|
||||||
|
|
||||||
elif role == "assistant":
|
|
||||||
# If this assistant's parent group has already been processed, skip it
|
|
||||||
if parent_id in processed_parent_ids:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Process the group of siblings for this parent_id
|
|
||||||
if parent_id in assistant_groups:
|
|
||||||
siblings = assistant_groups[parent_id]
|
|
||||||
|
|
||||||
# Only perform a merge if there are multiple siblings
|
|
||||||
if len(siblings) > 1:
|
|
||||||
print(
|
|
||||||
f"DEBUG: Found a group of {len(siblings)} siblings for parent_id {parent_id}. Merging..."
|
|
||||||
)
|
|
||||||
|
|
||||||
# --- MERGE LOGIC ---
|
|
||||||
merged_content = None
|
|
||||||
merged_message_id = None
|
|
||||||
# Sort siblings by timestamp before processing
|
|
||||||
siblings.sort(key=lambda s: s.get("timestamp", 0))
|
|
||||||
merged_message_timestamp = siblings[0].get("timestamp", 0)
|
|
||||||
|
|
||||||
# Case A: Check for system pre-merged content (merged.status: true and content not empty)
|
|
||||||
merged_content_msg = next(
|
|
||||||
(
|
|
||||||
s
|
|
||||||
for s in siblings
|
|
||||||
if s.get("merged", {}).get("status")
|
|
||||||
and s.get("merged", {}).get("content")
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
|
|
||||||
if merged_content_msg:
|
|
||||||
merged_content = merged_content_msg["merged"]["content"]
|
|
||||||
merged_message_id = merged_content_msg["id"]
|
|
||||||
merged_message_timestamp = merged_content_msg.get(
|
|
||||||
"timestamp", merged_message_timestamp
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"DEBUG: Using pre-merged content from message ID: {merged_message_id}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Case B: Manually merge content
|
|
||||||
combined_content = []
|
|
||||||
first_sibling_id = None
|
|
||||||
counter = 0
|
|
||||||
|
|
||||||
for s in siblings:
|
|
||||||
if not first_sibling_id:
|
|
||||||
first_sibling_id = s["id"]
|
|
||||||
|
|
||||||
content = s.get("content", "")
|
|
||||||
if (
|
|
||||||
content
|
|
||||||
and content
|
|
||||||
!= "The requested model is not supported."
|
|
||||||
):
|
|
||||||
response_id = chr(ord("a") + counter)
|
|
||||||
combined_content.append(
|
|
||||||
f'<response id="{response_id}">\n{content}\n</response>'
|
|
||||||
)
|
|
||||||
counter += 1
|
|
||||||
|
|
||||||
if combined_content:
|
|
||||||
merged_content = "\n\n".join(combined_content)
|
|
||||||
merged_message_id = first_sibling_id or parent_id
|
|
||||||
|
|
||||||
if merged_content:
|
|
||||||
merged_message = {
|
|
||||||
"id": merged_message_id,
|
|
||||||
"parentId": parent_id,
|
|
||||||
"role": "assistant",
|
|
||||||
"content": f"{self.valves.CONTEXT_PREFIX}{merged_content}",
|
|
||||||
"timestamp": merged_message_timestamp,
|
|
||||||
}
|
|
||||||
final_messages.append(merged_message)
|
|
||||||
else:
|
|
||||||
# If there's only one sibling, add it directly
|
|
||||||
final_messages.append(siblings[0])
|
|
||||||
|
|
||||||
# Mark this group as processed
|
|
||||||
processed_parent_ids.add(parent_id)
|
|
||||||
|
|
||||||
# 4. The new user message from the current request is not in the historical messages_map,
|
|
||||||
# so we need to append it to our newly constructed message list.
|
|
||||||
if body.get("messages"):
|
|
||||||
new_user_message_from_body = body["messages"][-1]
|
|
||||||
# Ensure we don't add a historical message that might be in the body for context
|
|
||||||
if new_user_message_from_body.get("id") not in messages_map:
|
|
||||||
final_messages.append(new_user_message_from_body)
|
|
||||||
|
|
||||||
# 5. Replace the original message list with the new, cleaned-up list
|
|
||||||
body["messages"] = final_messages
|
|
||||||
print(
|
|
||||||
f"DEBUG: Rebuilt message history with {len(final_messages)} messages, consolidating all multi-response turns."
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"*********** Filter '{self.name}' finished successfully ***********")
|
|
||||||
return body
|
|
||||||
@@ -63,7 +63,3 @@ When adding a new pipe plugin, please follow these steps:
|
|||||||
|
|
||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|||||||
@@ -63,7 +63,3 @@
|
|||||||
|
|
||||||
Fu-Jie
|
Fu-Jie
|
||||||
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
GitHub: [Fu-Jie/awesome-openwebui](https://github.com/Fu-Jie/awesome-openwebui)
|
||||||
|
|
||||||
## 许可证
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|||||||
6
pytest.ini
Normal file
6
pytest.ini
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
[pytest]
|
||||||
|
testpaths = plugins
|
||||||
|
python_files = test_*.py
|
||||||
|
python_classes = Test*
|
||||||
|
python_functions = test_*
|
||||||
|
addopts = -v --tb=short
|
||||||
Reference in New Issue
Block a user