从openwebui 显示 R1 思维链 [更新硅基流动支持][支持火山引擎][支持腾讯云]说起
介绍
佬的原始版本只支持填入一个模型,对于有多个模型的需求无法支持。
因此,对函数进行修改,主要修改了DEEPSEEK_MODELS变量。使其支持多个模型的变量
代码
函数,适用于0.5.6以上版本
"""
title: DeepSeek R1
author: zgccrui
description: 在OpwenWebUI中显示DeepSeek R1模型的思维链 - 仅支持0.5.6及以上版本
version: 1.2.6
licence: MIT
"""
import json
import httpx
import re
from typing import AsyncGenerator, Callable, Awaitable
from pydantic import BaseModel, Field
import asyncio
class Pipe:
class Valves(BaseModel):
DEEPSEEK_API_BASE_URL: str = Field(
default="https://api.deepseek.com/v1",
description="DeepSeek API的基础请求地址",
)
DEEPSEEK_API_KEY: str = Field(
default="", description="用于身份验证的DeepSeek API密钥,可从控制台获取"
)
DEEPSEEK_MODELS: str = Field(
default="deepseek-r1,deepseek-reasoner,deepseek-r1-search",
description="支持的DeepSeek模型列表,使用逗号分隔",
)
def __init__(self):
self.valves = self.Valves()
self.data_prefix = "data: "
self.thinking = -1 # -1:未开始 0:思考中 1:已回答
self.emitter = None
def _get_supported_models(self) -> list:
"""获取支持的模型列表"""
return [
model.strip()
for model in self.valves.DEEPSEEK_MODELS.split(",")
if model.strip()
]
def pipes(self):
# 每次调用时重新获取支持的模型列表
return [
{
"id": model_name,
"name": model_name,
}
for model_name in self._get_supported_models()
]
async def pipe(
self, body: dict, __event_emitter__: Callable[[dict], Awaitable[None]] = None
) -> AsyncGenerator[str, None]:
"""主处理管道"""
self.thinking = -1
self.emitter = __event_emitter__
# 验证配置
if not self.valves.DEEPSEEK_API_KEY:
yield json.dumps({"error": "未配置API密钥"}, ensure_ascii=False)
return
# 验证模型是否支持
model_id = body["model"].split(".", 1)[-1]
if model_id not in self._get_supported_models():
yield json.dumps({"error": f"不支持的模型: {model_id}"}, ensure_ascii=False)
return
# 准备请求参数
headers = {
"Authorization": f"Bearer {self.valves.DEEPSEEK_API_KEY}",
"Content-Type": "application/json",
}
try:
# 模型ID提取
payload = {**body, "model": model_id}
# 处理消息以防止连续的相同角色
messages = payload["messages"]
i = 0
while i < len(messages) - 1:
if messages[i]["role"] == messages[i + 1]["role"]:
# 插入具有替代角色的占位符消息
alternate_role = (
"assistant" if messages[i]["role"] == "user" else "user"
)
messages.insert(
i + 1,
{"role": alternate_role, "content": "[Unfinished thinking]"},
)
i += 1
# yield json.dumps(payload, ensure_ascii=False)
# 发起API请求
async with httpx.AsyncClient(http2=True) as client:
async with client.stream(
"POST",
f"{self.valves.DEEPSEEK_API_BASE_URL}/chat/completions",
json=payload,
headers=headers,
timeout=300,
) as response:
# 错误处理
if response.status_code != 200:
error = await response.aread()
yield self._format_error(response.status_code, error)
return
# 流式处理响应
async for line in response.aiter_lines():
if not line.startswith(self.data_prefix):
continue
# 截取 JSON 字符串
json_str = line[len(self.data_prefix) :]
try:
data = json.loads(json_str)
except json.JSONDecodeError as e:
# 格式化错误信息,这里传入错误类型和详细原因(包括出错内容和异常信息)
error_detail = f"解析失败 - 内容:{json_str},原因:{e}"
yield self._format_error("JSONDecodeError", error_detail)
return
choice = data.get("choices", [{}])[0]
# 结束条件判断
if choice.get("finish_reason"):
return
# 状态机处理
state_output = await self._update_thinking_state(
choice.get("delta", {})
)
if state_output:
yield state_output # 直接发送状态标记
if state_output == "<think>":
yield "\n"
# 内容处理并立即发送
content = self._process_content(choice["delta"])
if content:
if content.startswith("<think>"):
match = re.match(r"^<think>", content)
if match:
content = re.sub(r"^<think>", "", content)
yield "<think>"
await asyncio.sleep(0.1)
yield "\n"
elif content.startswith("</think>"):
match = re.match(r"^</think>", content)
if match:
content = re.sub(r"^</think>", "", content)
yield "</think>"
await asyncio.sleep(0.1)
yield "\n"
yield content
except Exception as e:
yield self._format_exception(e)
async def _update_thinking_state(self, delta: dict) -> str:
"""更新思考状态机(简化版)"""
state_output = ""
# 状态转换:未开始 -> 思考中
if self.thinking == -1 and delta.get("reasoning_content"):
self.thinking = 0
state_output = "<think>"
# 状态转换:思考中 -> 已回答
elif (
self.thinking == 0
and not delta.get("reasoning_content")
and delta.get("content")
):
self.thinking = 1
state_output = "\n</think>\n\n"
return state_output
def _process_content(self, delta: dict) -> str:
"""直接返回处理后的内容"""
return delta.get("reasoning_content", "") or delta.get("content", "")
def _format_error(self, status_code: int, error: bytes) -> str:
"""错误格式化保持不变"""
try:
err_msg = json.loads(error).get("message", error.decode(errors="ignore"))[
:200
]
except:
err_msg = error.decode(errors="ignore")[:200]
return json.dumps(
{"error": f"HTTP {status_code}: {err_msg}"}, ensure_ascii=False
)
def _format_exception(self, e: Exception) -> str:
"""异常格式化保持不变"""
err_type = type(e).__name__
return json.dumps({"error": f"{err_type}: {str(e)}"}, ensure_ascii=False)
用法
-
添加函数
如图所示,复制上述代码,添加函数
命名仅用于辨识,此处为DeepSeek R1_multi
-
函数设置
输入BaseUrl、API Key以及Models。支持的Models以逗号进行分隔。例如
deepseek-r1,deepseek-reasoner,deepseek-r1-search,test
设置如图
-
开启模型
到管理员设置中的模型界面,查找模型,前缀为函数名称即是要的模型,本文设置函数名称为DeepSeek R1_multi,因此,图中DeepSeek R1_multi前缀的即是目标。
-
效果



