Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions astrbot/core/config/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -1206,6 +1206,19 @@
"custom_headers": {"User-Agent": "claude-code/0.1.0"},
"anth_thinking_config": {"type": "", "budget": 0, "effort": ""},
},
"OpenCode Go": {
"id": "opencode-go",
"provider": "opencode-go",
"type": "opencode_go_chat_completion",
"provider_type": "chat_completion",
"enable": True,
"key": [],
"api_base": "https://opencode.ai/zen/go/v1",
"model": "opencode-go/kimi-k2.6",
"timeout": 120,
"proxy": "",
"custom_headers": {},
},
"Moonshot": {
"id": "moonshot",
"provider": "moonshot",
Expand Down
4 changes: 4 additions & 0 deletions astrbot/core/provider/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,10 @@ def dynamic_import_provider(self, type: str) -> None:
from .sources.openai_source import (
ProviderOpenAIOfficial as ProviderOpenAIOfficial,
)
case "opencode_go_chat_completion":
from .sources.opencode_go_source import (
ProviderOpenCodeGo as ProviderOpenCodeGo,
)
case "longcat_chat_completion":
from .sources.longcat_source import ProviderLongCat as ProviderLongCat
case "minimax_token_plan":
Expand Down
43 changes: 43 additions & 0 deletions astrbot/core/provider/sources/openai_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,6 +515,47 @@ def _apply_provider_specific_extra_body_overrides(
extra_body.pop("think", None)
extra_body["reasoning_effort"] = "none"

def _requires_tool_call_reasoning_content(
self,
payloads: dict,
extra_body: dict[str, Any],
) -> bool:
thinking = extra_body.get("thinking")
if isinstance(thinking, dict) and thinking.get("type") == "disabled":
return False

provider = str(self.provider_config.get("provider", "")).lower()
api_base = str(self.provider_config.get("api_base", "")).lower()
model = str(payloads.get("model", "")).lower()

return (
provider in {"moonshot", "opencode-go"}
or "moonshot" in api_base
or "api.kimi" in api_base
or model.startswith(("kimi-k2.5", "kimi-k2.6", "kimi-k2-thinking"))
)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

为了提高代码的可维护性和可扩展性,建议将判断是否需要 reasoning_content 的逻辑从硬编码的字符串匹配改为通过提供商配置中的一个标志位来控制。
当前实现硬编码了 moonshotopencode-go 等提供商名称和 api.kimi 等 URL 片段,当未来需要支持更多有类似需求的提供商时,都需要修改此处的代码,这违反了开闭原则。

建议在提供商配置中增加一个布尔类型的配置项,例如 force_tool_call_reasoning_content: true。然后 _requires_tool_call_reasoning_content 方法可以直接检查此配置项。

例如,在 astrbot/core/config/default.py 中为 Moonshot 和 OpenCode Go 添加该配置:

"Moonshot": {
    ...
    "force_tool_call_reasoning_content": True,
},
"OpenCode Go": {
    ...
    "force_tool_call_reasoning_content": True,
}

然后 _requires_tool_call_reasoning_content 方法可以简化为:

def _requires_tool_call_reasoning_content(
    self,
    payloads: dict,
    extra_body: dict[str, Any],
) -> bool:
    thinking = extra_body.get("thinking")
    if isinstance(thinking, dict) and thinking.get("type") == "disabled":
        return False

    return self.provider_config.get("force_tool_call_reasoning_content", False)

这样修改后,未来添加新的提供商时,只需在配置中声明即可,无需改动 openai_source.py 的代码。


def _ensure_tool_call_reasoning_content(
self,
payloads: dict,
extra_body: dict[str, Any],
) -> None:
if not self._requires_tool_call_reasoning_content(payloads, extra_body):
return

messages = payloads.get("messages")
if not isinstance(messages, list):
return

for message in messages:
if not isinstance(message, dict):
continue
if message.get("role") != "assistant" or not message.get("tool_calls"):
continue
reasoning_content = message.get("reasoning_content")
if not isinstance(reasoning_content, str) or not reasoning_content.strip():
message["reasoning_content"] = " "

async def get_models(self):
try:
models_str = []
Expand Down Expand Up @@ -591,6 +632,7 @@ async def _query(self, payloads: dict, tools: ToolSet | None) -> LLMResponse:

model = payloads.get("model", "").lower()

self._ensure_tool_call_reasoning_content(payloads, extra_body)
self._sanitize_assistant_messages(payloads)

completion = await self.client.chat.completions.create(
Expand Down Expand Up @@ -643,6 +685,7 @@ async def _query_stream(
del payloads[key]
self._apply_provider_specific_extra_body_overrides(extra_body)

self._ensure_tool_call_reasoning_content(payloads, extra_body)
self._sanitize_assistant_messages(payloads)

stream = await self.client.chat.completions.create(
Expand Down
146 changes: 146 additions & 0 deletions astrbot/core/provider/sources/opencode_go_source.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
from collections.abc import AsyncGenerator
from typing import Literal

from astrbot.api.provider import Provider
from astrbot.core.agent.message import ContentPart, Message
from astrbot.core.agent.tool import ToolSet
from astrbot.core.provider.entities import LLMResponse, ToolCallsResult

from ..register import register_provider_adapter
from .openai_source import ProviderOpenAIOfficial

OPENCODE_GO_API_BASE = "https://opencode.ai/zen/go/v1"
OPENCODE_GO_MODEL_PREFIX = "opencode-go/"
OPENCODE_GO_DEFAULT_MODEL = "kimi-k2.6"
OPENCODE_GO_MESSAGES_ONLY_MODELS = {"minimax-m2.5", "minimax-m2.7"}


@register_provider_adapter(
"opencode_go_chat_completion",
"OpenCode Go Subscription Provider Adapter",
)
class ProviderOpenCodeGo(Provider):
def __init__(self, provider_config: dict, provider_settings: dict) -> None:
super().__init__(provider_config, provider_settings)
self.api_base = provider_config.get("api_base", OPENCODE_GO_API_BASE).rstrip(
"/"
)
self.timeout = provider_config.get("timeout", 120)
if isinstance(self.timeout, str):
self.timeout = int(self.timeout)

model = self._to_api_model(
provider_config.get("model", OPENCODE_GO_DEFAULT_MODEL)
)
self.set_model(model)

self.openai_provider = ProviderOpenAIOfficial(
self._build_delegate_config(model=self._to_api_model(model)),
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

__init__ 方法中,self._to_api_model 方法被重复调用。变量 model 已经是 _to_api_model 处理后的结果,在构建 openai_provider 时无需再次调用。
建议直接使用已经处理过的 model 变量,以避免冗余并提高代码清晰度。

Suggested change
self._build_delegate_config(model=self._to_api_model(model)),
self._build_delegate_config(model=model),

provider_settings,
)

def _build_delegate_config(self, *, model: str) -> dict:
config = dict(self.provider_config)
config["api_base"] = self.api_base
config["model"] = model
return config

@classmethod
def _to_api_model(cls, model: str | None) -> str:
resolved_model = (model or OPENCODE_GO_DEFAULT_MODEL).strip()
if resolved_model.startswith(OPENCODE_GO_MODEL_PREFIX):
return resolved_model.removeprefix(OPENCODE_GO_MODEL_PREFIX)
return resolved_model

@classmethod
def _ensure_chat_completions_model(cls, model: str | None) -> str:
api_model = cls._to_api_model(model)
if api_model in OPENCODE_GO_MESSAGES_ONLY_MODELS:
raise ValueError(
f"OpenCode Go model {OPENCODE_GO_MODEL_PREFIX}{api_model} uses "
"/v1/messages. This adapter currently supports "
"/v1/chat/completions models only."
)
return api_model

def get_current_key(self) -> str:
return self.openai_provider.get_current_key()

def get_keys(self) -> list[str]:
return self.openai_provider.get_keys()

def set_key(self, key: str) -> None:
self.openai_provider.set_key(key)

async def get_models(self) -> list[str]:
models = await self.openai_provider.get_models()
return sorted(
self._to_api_model(model)
for model in models
if model.strip()
and self._to_api_model(model) not in OPENCODE_GO_MESSAGES_ONLY_MODELS
)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

get_models 方法中,列表推导式对每个 model 都调用了两次 self._to_api_model(model),这既影响性能也降低了代码的可读性。
建议进行重构,避免重复调用。可以使用 for 循环或者 Python 3.8+ 的海象运算符(:=)来优化。

        api_models = []
        for model in models:
            if not model.strip():
                continue
            api_model = self._to_api_model(model)
            if api_model not in OPENCODE_GO_MESSAGES_ONLY_MODELS:
                api_models.append(api_model)
        return sorted(api_models)


async def text_chat(
self,
prompt: str | None = None,
session_id: str | None = None,
image_urls: list[str] | None = None,
audio_urls: list[str] | None = None,
func_tool: ToolSet | None = None,
contexts: list[Message] | list[dict] | None = None,
system_prompt: str | None = None,
tool_calls_result: ToolCallsResult | list[ToolCallsResult] | None = None,
model: str | None = None,
extra_user_content_parts: list[ContentPart] | None = None,
tool_choice: Literal["auto", "required"] = "auto",
**kwargs,
) -> LLMResponse:
requested_model = model or self.get_model()
return await self.openai_provider.text_chat(
prompt=prompt,
session_id=session_id,
image_urls=image_urls,
audio_urls=audio_urls,
func_tool=func_tool,
contexts=contexts,
system_prompt=system_prompt,
tool_calls_result=tool_calls_result,
model=self._ensure_chat_completions_model(requested_model),
extra_user_content_parts=extra_user_content_parts,
tool_choice=tool_choice,
**kwargs,
)

async def text_chat_stream(
self,
prompt: str | None = None,
session_id: str | None = None,
image_urls: list[str] | None = None,
audio_urls: list[str] | None = None,
func_tool: ToolSet | None = None,
contexts: list[Message] | list[dict] | None = None,
system_prompt: str | None = None,
tool_calls_result: ToolCallsResult | list[ToolCallsResult] | None = None,
model: str | None = None,
tool_choice: Literal["auto", "required"] = "auto",
**kwargs,
) -> AsyncGenerator[LLMResponse, None]:
requested_model = model or self.get_model()
async for response in self.openai_provider.text_chat_stream(
prompt=prompt,
session_id=session_id,
image_urls=image_urls,
audio_urls=audio_urls,
func_tool=func_tool,
contexts=contexts,
system_prompt=system_prompt,
tool_calls_result=tool_calls_result,
model=self._ensure_chat_completions_model(requested_model),
tool_choice=tool_choice,
**kwargs,
):
yield response
Comment on lines +94 to +151
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

text_chattext_chat_stream 方法有重复的逻辑来解析和验证模型名称。为了遵循 DRY (Don't Repeat Yourself) 原则并提高代码可维护性,建议将这部分逻辑提取到一个辅助方法中。

例如,可以创建一个 _resolve_model 方法:

def _resolve_model(self, model: str | None) -> str:
    requested_model = model or self.get_model()
    return self._ensure_chat_completions_model(requested_model)

此外,新功能的实现(如该 Provider 的核心逻辑)应当伴随相应的单元测试以确保稳定性。

References
  1. When implementing similar functionality for different cases, refactor the logic into a shared helper function to avoid code duplication.
  2. New functionality, such as handling attachments, should be accompanied by corresponding unit tests.


async def terminate(self) -> None:
await self.openai_provider.terminate()
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 2 additions & 2 deletions dashboard/src/composables/useProviderSources.ts
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ export function useProviderSources(options: UseProviderSourcesOptions) {
types.push({
value: templateName,
label: templateName,
icon: getProviderIcon(template.provider)
icon: getProviderIcon(template.provider || template.id || template.type || templateName)
})
}
}
Expand Down Expand Up @@ -272,7 +272,7 @@ export function useProviderSources(options: UseProviderSourcesOptions) {

function resolveSourceIcon(source: any) {
if (!source) return ''
return getProviderIcon(source.provider) || ''
return getProviderIcon(source.provider || source.id || source.type || source.templateKey) || ''
}

function getSourceDisplayName(source: any) {
Expand Down
7 changes: 6 additions & 1 deletion dashboard/src/utils/providerUtils.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,15 @@
* 提供商相关的工具函数
*/

const opencodeGoIcon = new URL('@/assets/images/provider_logos/opencode-go.png', import.meta.url).href;

/**
* 获取提供商类型对应的图标
* @param {string} type - 提供商类型
* @returns {string} 图标 URL
*/
export function getProviderIcon(type) {
const providerType = type?.toString().trim().toLowerCase();
const icons = {
'openai': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/openai.svg',
'azure': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/azure.svg',
Expand All @@ -23,6 +26,8 @@ export function getProviderIcon(type) {
'moonshot': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/kimi.svg',
'kimi': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/kimi.svg',
'kimi-code': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/kimi.svg',
'opencode-go': opencodeGoIcon,
'opencode_go_chat_completion': opencodeGoIcon,
'longcat': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/longcat-color.svg',
'ppio': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/ppio.svg',
'dify': 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/dify-color.svg',
Expand All @@ -47,7 +52,7 @@ export function getProviderIcon(type) {
"bailian": "https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/bailian-color.svg",
"volcengine": 'https://cdn.jsdelivr.net/npm/@lobehub/icons-static-svg@latest/icons/volcengine-color.svg',
};
return icons[type] || '';
return icons[providerType] || '';
}

/**
Expand Down