mirror of
https://github.com/YspCoder/clawgo.git
synced 2026-05-11 22:28:58 +08:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78d546989c | ||
|
|
c1cbec551b |
@@ -108,6 +108,8 @@ clawgo provider login codex --manual
|
||||
- 额度或限流失败时自动切到 OAuth 账号池
|
||||
- 仍保留多账号轮换和后台刷新
|
||||
|
||||
如果某个 OpenAI 兼容服务商只支持 `POST /v1/chat/completions`,可以在对应 provider 配置里设置 `responses.api: "chat_completions"`;默认值是 `responses`。
|
||||
|
||||
### 4. 启动
|
||||
|
||||
交互模式:
|
||||
|
||||
@@ -119,6 +119,8 @@ If you have both an `API key` and OAuth accounts for the same upstream, prefer c
|
||||
- the provider runtime panel shows current candidate ordering, the most recent successful credential, and recent hit/error history
|
||||
- to persist runtime history across restarts, configure `runtime_persist`, `runtime_history_file`, and `runtime_history_max` on the provider
|
||||
|
||||
If an OpenAI-compatible provider only supports `POST /v1/chat/completions`, set `responses.api: "chat_completions"` on that provider. The default remains `responses`.
|
||||
|
||||
### 4. Start
|
||||
|
||||
Interactive mode:
|
||||
|
||||
@@ -4,9 +4,12 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/YspCoder/clawgo/pkg/config"
|
||||
)
|
||||
|
||||
func TestConfigFileFingerprintSameContentIgnoresTouch(t *testing.T) {
|
||||
@@ -115,3 +118,43 @@ func TestGatewayConfigWatcherTouchDoesNotReload(t *testing.T) {
|
||||
t.Fatalf("expected touch-only update to skip reload, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeHotReloadChannelsConfigIgnoresWeixinRuntimeState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
base := config.ChannelsConfig{
|
||||
Weixin: config.WeixinConfig{
|
||||
Enabled: true,
|
||||
BaseURL: "https://ilinkai.weixin.qq.com",
|
||||
DefaultBotID: "bot-a",
|
||||
Accounts: []config.WeixinAccountConfig{
|
||||
{
|
||||
BotID: "bot-a",
|
||||
BotToken: "token-a",
|
||||
IlinkUserID: "u-1",
|
||||
ContextToken: "ctx-a",
|
||||
GetUpdatesBuf: "buf-a",
|
||||
},
|
||||
},
|
||||
ContextToken: "root-ctx",
|
||||
GetUpdatesBuf: "root-buf",
|
||||
},
|
||||
}
|
||||
next := base
|
||||
next.Weixin.ContextToken = "root-ctx-next"
|
||||
next.Weixin.GetUpdatesBuf = "root-buf-next"
|
||||
next.Weixin.Accounts[0].ContextToken = "ctx-b"
|
||||
next.Weixin.Accounts[0].GetUpdatesBuf = "buf-b"
|
||||
|
||||
left := normalizeHotReloadChannelsConfig(base)
|
||||
right := normalizeHotReloadChannelsConfig(next)
|
||||
if !reflect.DeepEqual(left, right) {
|
||||
t.Fatalf("expected weixin runtime state changes to be ignored during hot reload comparison")
|
||||
}
|
||||
|
||||
next.Weixin.BaseURL = "https://redirect.example"
|
||||
right = normalizeHotReloadChannelsConfig(next)
|
||||
if reflect.DeepEqual(left, right) {
|
||||
t.Fatalf("expected durable weixin config changes to remain visible to hot reload comparison")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,10 +67,12 @@ func (r *gatewayReloader) trigger(source string, forceRuntimeReload bool) error
|
||||
r.state.cfg.Gateway.Host, r.state.cfg.Gateway.Port, newCfg.Gateway.Host, newCfg.Gateway.Port)
|
||||
}
|
||||
|
||||
currentChannels := normalizeHotReloadChannelsConfig(r.state.cfg.Channels)
|
||||
nextChannels := normalizeHotReloadChannelsConfig(newCfg.Channels)
|
||||
runtimeSame := reflect.DeepEqual(r.state.cfg.Agents, newCfg.Agents) &&
|
||||
reflect.DeepEqual(r.state.cfg.Models, newCfg.Models) &&
|
||||
reflect.DeepEqual(r.state.cfg.Tools, newCfg.Tools) &&
|
||||
reflect.DeepEqual(r.state.cfg.Channels, newCfg.Channels)
|
||||
reflect.DeepEqual(currentChannels, nextChannels)
|
||||
|
||||
if runtimeSame && !forceRuntimeReload {
|
||||
configureLogging(newCfg)
|
||||
@@ -146,6 +148,16 @@ func (r *gatewayReloader) bindWeixinChannel() {
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeHotReloadChannelsConfig(cfg config.ChannelsConfig) config.ChannelsConfig {
|
||||
cfg.Weixin.ContextToken = ""
|
||||
cfg.Weixin.GetUpdatesBuf = ""
|
||||
for i := range cfg.Weixin.Accounts {
|
||||
cfg.Weixin.Accounts[i].ContextToken = ""
|
||||
cfg.Weixin.Accounts[i].GetUpdatesBuf = ""
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
type configFileFingerprint struct {
|
||||
Size int64
|
||||
ModUnixNano int64
|
||||
|
||||
@@ -180,6 +180,7 @@
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"responses": {
|
||||
"api": "responses",
|
||||
"web_search_enabled": false,
|
||||
"web_search_context_size": "",
|
||||
"file_search_vector_store_ids": [],
|
||||
@@ -208,6 +209,7 @@
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"responses": {
|
||||
"api": "responses",
|
||||
"web_search_enabled": false,
|
||||
"web_search_context_size": "",
|
||||
"file_search_vector_store_ids": [],
|
||||
@@ -237,6 +239,7 @@
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"responses": {
|
||||
"api": "responses",
|
||||
"web_search_enabled": false,
|
||||
"web_search_context_size": "",
|
||||
"file_search_vector_store_ids": [],
|
||||
@@ -253,6 +256,7 @@
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"responses": {
|
||||
"api": "responses",
|
||||
"web_search_enabled": false,
|
||||
"web_search_context_size": "",
|
||||
"file_search_vector_store_ids": [],
|
||||
@@ -280,6 +284,7 @@
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"responses": {
|
||||
"api": "responses",
|
||||
"web_search_enabled": false,
|
||||
"web_search_context_size": "",
|
||||
"file_search_vector_store_ids": [],
|
||||
@@ -306,6 +311,7 @@
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"responses": {
|
||||
"api": "responses",
|
||||
"web_search_enabled": false,
|
||||
"web_search_context_size": "",
|
||||
"file_search_vector_store_ids": [],
|
||||
|
||||
@@ -260,6 +260,7 @@ type ProviderOAuthConfig struct {
|
||||
}
|
||||
|
||||
type ProviderResponsesConfig struct {
|
||||
API string `json:"api,omitempty"`
|
||||
WebSearchEnabled bool `json:"web_search_enabled"`
|
||||
WebSearchContextSize string `json:"web_search_context_size"`
|
||||
FileSearchVectorStoreIDs []string `json:"file_search_vector_store_ids"`
|
||||
|
||||
@@ -11,6 +11,9 @@ func TestNormalizedViewProjectsCoreAndRuntime(t *testing.T) {
|
||||
MaxTokens: 12288,
|
||||
Temperature: 0.35,
|
||||
TimeoutSec: 90,
|
||||
Responses: ProviderResponsesConfig{
|
||||
API: "chat_completions",
|
||||
},
|
||||
}
|
||||
cfg.Agents.Subagents["coder"] = SubagentConfig{
|
||||
Enabled: true,
|
||||
@@ -40,4 +43,7 @@ func TestNormalizedViewProjectsCoreAndRuntime(t *testing.T) {
|
||||
if got := view.Runtime.Providers["openai"].Temperature; got != 0.35 {
|
||||
t.Fatalf("expected provider temperature in normalized runtime view, got %v", got)
|
||||
}
|
||||
if got := view.Runtime.Providers["openai"].Responses.API; got != "chat_completions" {
|
||||
t.Fatalf("expected provider responses.api in normalized runtime view, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,6 +515,13 @@ func validateProviderConfig(path string, p ProviderConfig) []error {
|
||||
if p.OAuth.CooldownSec < 0 {
|
||||
errs = append(errs, fmt.Errorf("%s.oauth.cooldown_sec must be >= 0", path))
|
||||
}
|
||||
if p.Responses.API != "" {
|
||||
switch strings.TrimSpace(p.Responses.API) {
|
||||
case "responses", "chat_completions":
|
||||
default:
|
||||
errs = append(errs, fmt.Errorf("%s.responses.api must be one of: responses, chat_completions", path))
|
||||
}
|
||||
}
|
||||
if p.Responses.WebSearchContextSize != "" {
|
||||
switch p.Responses.WebSearchContextSize {
|
||||
case "low", "medium", "high":
|
||||
|
||||
@@ -247,3 +247,27 @@ func TestValidateProviderHybridRequiresOAuthProvider(t *testing.T) {
|
||||
t.Fatalf("expected oauth.provider validation error, got %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateProviderResponsesAPIRejectsUnknownValue(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
pc := cfg.Models.Providers["openai"]
|
||||
pc.Responses.API = "legacy"
|
||||
cfg.Models.Providers["openai"] = pc
|
||||
|
||||
errs := Validate(cfg)
|
||||
if len(errs) == 0 {
|
||||
t.Fatalf("expected validation errors")
|
||||
}
|
||||
found := false
|
||||
for _, err := range errs {
|
||||
if strings.Contains(err.Error(), "models.providers.openai.responses.api") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("expected responses.api validation error, got %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ type HTTPProvider struct {
|
||||
apiBase string
|
||||
defaultModel string
|
||||
supportsResponsesCompact bool
|
||||
responsesAPI string
|
||||
authMode string
|
||||
timeout time.Duration
|
||||
httpClient *http.Client
|
||||
@@ -48,6 +49,7 @@ func NewHTTPProvider(providerName, apiKey, apiBase, defaultModel string, support
|
||||
apiBase: normalizedBase,
|
||||
defaultModel: strings.TrimSpace(defaultModel),
|
||||
supportsResponsesCompact: supportsResponsesCompact,
|
||||
responsesAPI: "responses",
|
||||
authMode: authMode,
|
||||
timeout: timeout,
|
||||
httpClient: &http.Client{Timeout: timeout},
|
||||
@@ -79,7 +81,7 @@ func (p *HTTPProvider) Chat(ctx context.Context, messages []Message, tools []Too
|
||||
if !json.Valid(body) {
|
||||
return nil, fmt.Errorf("API error (status %d, content-type %q): non-JSON response: %s", statusCode, contentType, previewResponseBody(body))
|
||||
}
|
||||
if p.useOpenAICompatChatUpstream() {
|
||||
if p.useOpenAICompatChatUpstream() || p.useConfiguredOpenAICompatChat() {
|
||||
return parseOpenAICompatResponse(body)
|
||||
}
|
||||
return parseResponsesAPIResponse(body)
|
||||
@@ -102,7 +104,7 @@ func (p *HTTPProvider) ChatStream(ctx context.Context, messages []Message, tools
|
||||
if !json.Valid(body) {
|
||||
return nil, fmt.Errorf("API error (status %d, content-type %q): non-JSON response: %s", status, ctype, previewResponseBody(body))
|
||||
}
|
||||
if p.useOpenAICompatChatUpstream() {
|
||||
if p.useOpenAICompatChatUpstream() || p.useConfiguredOpenAICompatChat() {
|
||||
return parseOpenAICompatResponse(body)
|
||||
}
|
||||
return parseResponsesAPIResponse(body)
|
||||
|
||||
@@ -112,6 +112,18 @@ func (p *HTTPProvider) useOpenAICompatChatUpstream() bool {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *HTTPProvider) useConfiguredOpenAICompatChat() bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
switch strings.ToLower(strings.TrimSpace(p.responsesAPI)) {
|
||||
case "chat_completions":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *HTTPProvider) compatBase() string {
|
||||
switch p.oauthProvider() {
|
||||
case defaultQwenOAuthProvider:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
@@ -180,3 +181,37 @@ func TestBuildOpenAICompatChatRequestStripsKimiPrefixAndSuffix(t *testing.T) {
|
||||
t.Fatalf("reasoning_effort = %#v, want auto", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPProviderChatUsesConfiguredChatCompletionsAPI(t *testing.T) {
|
||||
var gotPath string
|
||||
var gotBody map[string]interface{}
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotPath = r.URL.Path
|
||||
if err := json.NewDecoder(r.Body).Decode(&gotBody); err != nil {
|
||||
t.Fatalf("decode request: %v", err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"choices":[{"message":{"content":"hello from chat"},"finish_reason":"stop"}],"usage":{"prompt_tokens":1,"completion_tokens":2,"total_tokens":3}}`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
provider := NewHTTPProvider("openai", "token", server.URL+"/v1", "gpt-5", false, "api_key", 5*time.Second, nil)
|
||||
provider.responsesAPI = "chat_completions"
|
||||
|
||||
resp, err := provider.Chat(t.Context(), []Message{{Role: "user", Content: "hi"}}, nil, "gpt-5", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Chat error: %v", err)
|
||||
}
|
||||
if gotPath != "/v1/chat/completions" {
|
||||
t.Fatalf("path = %q, want /v1/chat/completions", gotPath)
|
||||
}
|
||||
if gotBody["model"] != "gpt-5" {
|
||||
t.Fatalf("model = %#v, want gpt-5", gotBody["model"])
|
||||
}
|
||||
if resp.Content != "hello from chat" {
|
||||
t.Fatalf("content = %q, want hello from chat", resp.Content)
|
||||
}
|
||||
if resp.Usage == nil || resp.Usage.TotalTokens != 3 {
|
||||
t.Fatalf("usage = %#v, want total_tokens=3", resp.Usage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,7 +116,11 @@ func CreateProviderByName(cfg *config.Config, name string) (LLMProvider, error)
|
||||
if oauthProvider == defaultIFlowOAuthProvider || strings.EqualFold(routeName, defaultIFlowOAuthProvider) {
|
||||
return NewIFlowProvider(routeName, pc.APIKey, pc.APIBase, defaultModel, pc.SupportsResponsesCompact, pc.Auth, time.Duration(pc.TimeoutSec)*time.Second, oauth), nil
|
||||
}
|
||||
return NewHTTPProvider(routeName, pc.APIKey, pc.APIBase, defaultModel, pc.SupportsResponsesCompact, pc.Auth, time.Duration(pc.TimeoutSec)*time.Second, oauth), nil
|
||||
provider := NewHTTPProvider(routeName, pc.APIKey, pc.APIBase, defaultModel, pc.SupportsResponsesCompact, pc.Auth, time.Duration(pc.TimeoutSec)*time.Second, oauth)
|
||||
if api := strings.TrimSpace(pc.Responses.API); api != "" {
|
||||
provider.responsesAPI = api
|
||||
}
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func ProviderSupportsResponsesCompact(cfg *config.Config, name string) bool {
|
||||
|
||||
@@ -44,7 +44,7 @@ func (p *HTTPProvider) callResponses(ctx context.Context, messages []Message, to
|
||||
if prevID, ok := stringOption(options, "responses_previous_response_id"); ok && prevID != "" {
|
||||
requestBody["previous_response_id"] = prevID
|
||||
}
|
||||
if p.useOpenAICompatChatUpstream() {
|
||||
if p.useOpenAICompatChatUpstream() || p.useConfiguredOpenAICompatChat() {
|
||||
chatBody := p.buildOpenAICompatChatRequest(messages, tools, model, options)
|
||||
return p.postJSON(ctx, endpointFor(p.compatBase(), "/chat/completions"), chatBody)
|
||||
}
|
||||
@@ -309,7 +309,7 @@ func (p *HTTPProvider) callResponsesStream(ctx context.Context, messages []Messa
|
||||
if streamOpts, ok := mapOption(options, "responses_stream_options"); ok && len(streamOpts) > 0 {
|
||||
requestBody["stream_options"] = streamOpts
|
||||
}
|
||||
if p.useOpenAICompatChatUpstream() {
|
||||
if p.useOpenAICompatChatUpstream() || p.useConfiguredOpenAICompatChat() {
|
||||
chatBody := p.buildOpenAICompatChatRequest(messages, tools, model, options)
|
||||
chatBody["stream"] = true
|
||||
streamOptions := map[string]interface{}{"include_usage": true}
|
||||
|
||||
Reference in New Issue
Block a user