-
-
Notifications
You must be signed in to change notification settings - Fork 2.2k
Feat: Opencode Go Subcription as Provider #8179
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 5 commits
5ff796e
af88dcb
a5a74da
b6a5ade
45a5e3e
340d4e6
7fbfa80
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,146 @@ | ||
| from collections.abc import AsyncGenerator | ||
| from typing import Literal | ||
|
|
||
| from astrbot.api.provider import Provider | ||
| from astrbot.core.agent.message import ContentPart, Message | ||
| from astrbot.core.agent.tool import ToolSet | ||
| from astrbot.core.provider.entities import LLMResponse, ToolCallsResult | ||
|
|
||
| from ..register import register_provider_adapter | ||
| from .openai_source import ProviderOpenAIOfficial | ||
|
|
||
| OPENCODE_GO_API_BASE = "https://opencode.ai/zen/go/v1" | ||
| OPENCODE_GO_MODEL_PREFIX = "opencode-go/" | ||
| OPENCODE_GO_DEFAULT_MODEL = "kimi-k2.6" | ||
| OPENCODE_GO_MESSAGES_ONLY_MODELS = {"minimax-m2.5", "minimax-m2.7"} | ||
|
|
||
|
|
||
| @register_provider_adapter( | ||
| "opencode_go_chat_completion", | ||
| "OpenCode Go Subscription Provider Adapter", | ||
| ) | ||
| class ProviderOpenCodeGo(Provider): | ||
| def __init__(self, provider_config: dict, provider_settings: dict) -> None: | ||
| super().__init__(provider_config, provider_settings) | ||
| self.api_base = provider_config.get("api_base", OPENCODE_GO_API_BASE).rstrip( | ||
| "/" | ||
| ) | ||
| self.timeout = provider_config.get("timeout", 120) | ||
| if isinstance(self.timeout, str): | ||
| self.timeout = int(self.timeout) | ||
|
|
||
| model = self._to_api_model( | ||
| provider_config.get("model", OPENCODE_GO_DEFAULT_MODEL) | ||
| ) | ||
| self.set_model(model) | ||
|
|
||
| self.openai_provider = ProviderOpenAIOfficial( | ||
| self._build_delegate_config(model=self._to_api_model(model)), | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| provider_settings, | ||
| ) | ||
|
|
||
| def _build_delegate_config(self, *, model: str) -> dict: | ||
| config = dict(self.provider_config) | ||
| config["api_base"] = self.api_base | ||
| config["model"] = model | ||
| return config | ||
|
|
||
| @classmethod | ||
| def _to_api_model(cls, model: str | None) -> str: | ||
| resolved_model = (model or OPENCODE_GO_DEFAULT_MODEL).strip() | ||
| if resolved_model.startswith(OPENCODE_GO_MODEL_PREFIX): | ||
| return resolved_model.removeprefix(OPENCODE_GO_MODEL_PREFIX) | ||
| return resolved_model | ||
|
|
||
| @classmethod | ||
| def _ensure_chat_completions_model(cls, model: str | None) -> str: | ||
| api_model = cls._to_api_model(model) | ||
| if api_model in OPENCODE_GO_MESSAGES_ONLY_MODELS: | ||
| raise ValueError( | ||
| f"OpenCode Go model {OPENCODE_GO_MODEL_PREFIX}{api_model} uses " | ||
| "/v1/messages. This adapter currently supports " | ||
| "/v1/chat/completions models only." | ||
| ) | ||
| return api_model | ||
|
|
||
| def get_current_key(self) -> str: | ||
| return self.openai_provider.get_current_key() | ||
|
|
||
| def get_keys(self) -> list[str]: | ||
| return self.openai_provider.get_keys() | ||
|
|
||
| def set_key(self, key: str) -> None: | ||
| self.openai_provider.set_key(key) | ||
|
|
||
| async def get_models(self) -> list[str]: | ||
| models = await self.openai_provider.get_models() | ||
| return sorted( | ||
| self._to_api_model(model) | ||
| for model in models | ||
| if model.strip() | ||
| and self._to_api_model(model) not in OPENCODE_GO_MESSAGES_ONLY_MODELS | ||
| ) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 在 api_models = []
for model in models:
if not model.strip():
continue
api_model = self._to_api_model(model)
if api_model not in OPENCODE_GO_MESSAGES_ONLY_MODELS:
api_models.append(api_model)
return sorted(api_models) |
||
|
|
||
| async def text_chat( | ||
| self, | ||
| prompt: str | None = None, | ||
| session_id: str | None = None, | ||
| image_urls: list[str] | None = None, | ||
| audio_urls: list[str] | None = None, | ||
| func_tool: ToolSet | None = None, | ||
| contexts: list[Message] | list[dict] | None = None, | ||
| system_prompt: str | None = None, | ||
| tool_calls_result: ToolCallsResult | list[ToolCallsResult] | None = None, | ||
| model: str | None = None, | ||
| extra_user_content_parts: list[ContentPart] | None = None, | ||
| tool_choice: Literal["auto", "required"] = "auto", | ||
| **kwargs, | ||
| ) -> LLMResponse: | ||
| requested_model = model or self.get_model() | ||
| return await self.openai_provider.text_chat( | ||
| prompt=prompt, | ||
| session_id=session_id, | ||
| image_urls=image_urls, | ||
| audio_urls=audio_urls, | ||
| func_tool=func_tool, | ||
| contexts=contexts, | ||
| system_prompt=system_prompt, | ||
| tool_calls_result=tool_calls_result, | ||
| model=self._ensure_chat_completions_model(requested_model), | ||
| extra_user_content_parts=extra_user_content_parts, | ||
| tool_choice=tool_choice, | ||
| **kwargs, | ||
| ) | ||
|
|
||
| async def text_chat_stream( | ||
| self, | ||
| prompt: str | None = None, | ||
| session_id: str | None = None, | ||
| image_urls: list[str] | None = None, | ||
| audio_urls: list[str] | None = None, | ||
| func_tool: ToolSet | None = None, | ||
| contexts: list[Message] | list[dict] | None = None, | ||
| system_prompt: str | None = None, | ||
| tool_calls_result: ToolCallsResult | list[ToolCallsResult] | None = None, | ||
| model: str | None = None, | ||
| tool_choice: Literal["auto", "required"] = "auto", | ||
| **kwargs, | ||
| ) -> AsyncGenerator[LLMResponse, None]: | ||
| requested_model = model or self.get_model() | ||
| async for response in self.openai_provider.text_chat_stream( | ||
| prompt=prompt, | ||
| session_id=session_id, | ||
| image_urls=image_urls, | ||
| audio_urls=audio_urls, | ||
| func_tool=func_tool, | ||
| contexts=contexts, | ||
| system_prompt=system_prompt, | ||
| tool_calls_result=tool_calls_result, | ||
| model=self._ensure_chat_completions_model(requested_model), | ||
| tool_choice=tool_choice, | ||
| **kwargs, | ||
| ): | ||
| yield response | ||
|
Comment on lines
+94
to
+151
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
例如,可以创建一个 def _resolve_model(self, model: str | None) -> str:
requested_model = model or self.get_model()
return self._ensure_chat_completions_model(requested_model)此外,新功能的实现(如该 Provider 的核心逻辑)应当伴随相应的单元测试以确保稳定性。 References
|
||
|
|
||
| async def terminate(self) -> None: | ||
| await self.openai_provider.terminate() | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
为了提高代码的可维护性和可扩展性,建议将判断是否需要
reasoning_content的逻辑从硬编码的字符串匹配改为通过提供商配置中的一个标志位来控制。当前实现硬编码了
moonshot、opencode-go等提供商名称和api.kimi等 URL 片段,当未来需要支持更多有类似需求的提供商时,都需要修改此处的代码,这违反了开闭原则。建议在提供商配置中增加一个布尔类型的配置项,例如
force_tool_call_reasoning_content: true。然后_requires_tool_call_reasoning_content方法可以直接检查此配置项。例如,在
astrbot/core/config/default.py中为 Moonshot 和 OpenCode Go 添加该配置:然后
_requires_tool_call_reasoning_content方法可以简化为:这样修改后,未来添加新的提供商时,只需在配置中声明即可,无需改动
openai_source.py的代码。