This commit is contained in:
lpf
2026-02-18 23:13:17 +08:00
parent ddca0605c4
commit d47b6428c8
9 changed files with 231 additions and 37 deletions

View File

@@ -11,7 +11,7 @@
- **自主协作能力**:支持自然语言驱动的自主执行、自动学习与启动自检。
- **多智能体编排**:支持 Pipeline 协议(`role + goal + depends_on + shared_state`)。
- **记忆与上下文治理**:支持分层记忆、`memory_search` 与自动上下文压缩。
- **可靠性增强**模型请求支持 fallback覆盖配额、路由、网关瞬时错误等场景。
- **可靠性增强**支持代理内模型切换与跨代理切换(`proxy_fallbacks`,覆盖配额、路由、网关瞬时错误等场景。
- **安全防护**Shell Risk Gate、Sentinel 巡检与自动修复能力。
- **技能扩展**:支持内置技能与 GitHub 技能安装,支持原子脚本执行。
@@ -136,6 +136,7 @@ clawgo channel test --channel telegram --to <chat_id> -m "ping"
- 启动会读取 `AGENTS.md``SOUL.md``USER.md` 作为行为约束与语义上下文。
- 网关启动后会执行一次自检任务,结合历史会话与 `memory/HEARTBEAT.md` 判断是否继续未完成任务。
- 上下文压缩同时按消息数量阈值和上下文体积阈值触发,控制 token 成本与长会话稳定性。
- 上下文压缩模式支持 `summary``responses_compact``hybrid``responses_compact` 需要代理配置 `protocol=responses``supports_responses_compact=true`
- 分层记忆支持 `profile / project / procedures / recent notes`
上下文压缩配置示例:
@@ -145,6 +146,7 @@ clawgo channel test --channel telegram --to <chat_id> -m "ping"
"defaults": {
"context_compaction": {
"enabled": true,
"mode": "summary",
"trigger_messages": 60,
"keep_recent_messages": 20,
"max_summary_chars": 6000,
@@ -167,7 +169,7 @@ clawgo channel test --channel telegram --to <chat_id> -m "ping"
## 🛡️ 风险防护与稳定性
- **Model fallback**主模型失败时可回退到候选模型,覆盖限流、配额、网关瞬时异常、上游路由异常
- **Proxy/Model fallback**先在当前代理中按 `models` 顺序切换,全部失败后再按 `proxy_fallbacks` 切换代理
- **HTTP 兼容处理**:可识别非 JSON 错页并给出响应预览;兼容从 `<function_call>` 文本块提取工具调用。
- **Shell Risk Gate**:高风险命令默认阻断,支持 dry-run 与 force 策略。
- **Sentinel**:周期巡检配置/内存/日志目录,支持自动修复与告警转发。

View File

@@ -11,7 +11,7 @@
- **Autonomous collaboration**: natural-language autonomy, auto-learning, and startup self-check.
- **Multi-agent orchestration**: built-in Pipeline protocol (`role + goal + depends_on + shared_state`).
- **Memory and context governance**: layered memory, `memory_search`, and automatic context compaction.
- **Reliability enhancements**: model fallback for quota, routing, and transient gateway failures.
- **Reliability enhancements**: in-proxy model switching and cross-proxy fallback (`proxy_fallbacks`) for quota, routing, and transient gateway failures.
- **Safety controls**: Shell Risk Gate, Sentinel inspection, and auto-heal support.
- **Skill extensibility**: built-in skills plus GitHub skill installation and atomic script execution.
@@ -136,6 +136,7 @@ clawgo channel test --channel telegram --to <chat_id> -m "ping"
- On startup, the agent loads `AGENTS.md`, `SOUL.md`, and `USER.md` as behavior and semantic constraints.
- Gateway startup triggers a self-check task using history and `memory/HEARTBEAT.md` to decide whether unfinished tasks should continue.
- Context compaction is triggered by both message-count and transcript-size thresholds.
- Compaction modes are `summary`, `responses_compact`, and `hybrid`; `responses_compact` requires `protocol=responses` and `supports_responses_compact=true` on the active proxy.
- Layered memory supports `profile / project / procedures / recent notes`.
Context compaction config example:
@@ -145,6 +146,7 @@ Context compaction config example:
"defaults": {
"context_compaction": {
"enabled": true,
"mode": "summary",
"trigger_messages": 60,
"keep_recent_messages": 20,
"max_summary_chars": 6000,
@@ -167,7 +169,7 @@ Useful for complex task decomposition, role-based execution, and shared state wo
## 🛡️ Safety and Reliability
- **Model fallback**: retries with fallback models on quota/rate limits, transient gateway failures, and upstream auth-routing errors.
- **Proxy/model fallback**: retries models in the current proxy first, then switches proxies in `proxy_fallbacks` when all models fail.
- **HTTP compatibility handling**: detects non-JSON error pages with body preview; parses tool calls from `<function_call>` blocks.
- **Shell Risk Gate**: blocks destructive operations by default; supports dry-run and force policies.
- **Sentinel**: periodic checks for config/memory/log resources with optional auto-heal and notifications.

View File

@@ -296,6 +296,7 @@ func onboard() {
fmt.Println("\nNext steps:")
fmt.Println(" 1. Configure CLIProxyAPI at", configPath)
fmt.Println(" Ensure CLIProxyAPI is running: https://github.com/router-for-me/CLIProxyAPI")
fmt.Println(" Set providers.<name>.protocol/models; use supports_responses_compact=true only with protocol=responses")
fmt.Println(" 2. Chat: clawgo agent -m \"Hello!\"")
}
@@ -1509,9 +1510,11 @@ func statusCmd() {
if _, err := os.Stat(configPath); err == nil {
activeProvider := cfg.Providers.Proxy
activeProxyName := "proxy"
if name := strings.TrimSpace(cfg.Agents.Defaults.Proxy); name != "" && name != "proxy" {
if p, ok := cfg.Providers.Proxies[name]; ok {
activeProvider = p
activeProxyName = name
}
}
activeModel := ""
@@ -1522,7 +1525,9 @@ func statusCmd() {
}
}
fmt.Printf("Model: %s\n", activeModel)
fmt.Printf("Proxy: %s\n", activeProxyName)
fmt.Printf("CLIProxyAPI Base: %s\n", cfg.Providers.Proxy.APIBase)
fmt.Printf("Supports /v1/responses/compact: %v\n", providers.ProviderSupportsResponsesCompact(cfg, activeProxyName))
hasKey := cfg.Providers.Proxy.APIKey != ""
status := "not set"
if hasKey {

View File

@@ -9,6 +9,7 @@
"max_tool_iterations": 20,
"context_compaction": {
"enabled": true,
"mode": "summary",
"trigger_messages": 60,
"keep_recent_messages": 20,
"max_summary_chars": 6000,
@@ -59,6 +60,7 @@
"api_base": "http://localhost:8080/v1",
"protocol": "chat_completions",
"models": ["glm-4.7", "gpt-4o-mini"],
"supports_responses_compact": false,
"auth": "bearer",
"timeout_sec": 90
},
@@ -68,6 +70,7 @@
"api_base": "http://localhost:8081/v1",
"protocol": "responses",
"models": ["gpt-4o-mini", "deepseek-chat"],
"supports_responses_compact": true,
"auth": "bearer",
"timeout_sec": 90
}

View File

@@ -2051,7 +2051,7 @@ func (al *AgentLoop) maybeCompactContext(ctx context.Context, sessionKey string)
compactUntil := len(history) - cfg.KeepRecentMessages
compactCtx, cancel := context.WithTimeout(ctx, 25*time.Second)
defer cancel()
newSummary, err := al.buildCompactedSummary(compactCtx, summary, history[:compactUntil], cfg.MaxTranscriptChars)
newSummary, err := al.buildCompactedSummary(compactCtx, summary, history[:compactUntil], cfg.MaxTranscriptChars, cfg.MaxSummaryChars, cfg.Mode)
if err != nil {
return err
}
@@ -2081,12 +2081,34 @@ func (al *AgentLoop) buildCompactedSummary(
existingSummary string,
messages []providers.Message,
maxTranscriptChars int,
maxSummaryChars int,
mode string,
) (string, error) {
mode = normalizeCompactionMode(mode)
transcript := formatCompactionTranscript(messages, maxTranscriptChars)
if strings.TrimSpace(transcript) == "" {
return strings.TrimSpace(existingSummary), nil
}
if mode == "responses_compact" || mode == "hybrid" {
if compactor, ok := al.provider.(providers.ResponsesCompactor); ok && compactor.SupportsResponsesCompact() {
compactSummary, err := compactor.BuildSummaryViaResponsesCompact(ctx, al.model, existingSummary, messages, maxSummaryChars)
if err == nil && strings.TrimSpace(compactSummary) != "" {
if mode == "responses_compact" {
return compactSummary, nil
}
existingSummary = strings.TrimSpace(existingSummary + "\n\n" + compactSummary)
} else if mode == "responses_compact" {
if err != nil {
return "", err
}
return "", fmt.Errorf("responses_compact produced empty summary")
}
} else if mode == "responses_compact" {
return "", fmt.Errorf("responses_compact mode requires provider support and protocol=responses")
}
}
systemPrompt := al.withBootstrapPolicy(`You are a conversation compactor. Merge prior summary and transcript into a concise, factual memory for future turns. Keep user preferences, constraints, decisions, unresolved tasks, and key technical context. Do not include speculative content.`)
userPrompt := fmt.Sprintf("Existing summary:\n%s\n\nTranscript to compact:\n%s\n\nReturn a compact markdown summary with sections: Key Facts, Decisions, Open Items, Next Steps.",
strings.TrimSpace(existingSummary), transcript)
@@ -2104,6 +2126,19 @@ func (al *AgentLoop) buildCompactedSummary(
return resp.Content, nil
}
func normalizeCompactionMode(raw string) string {
switch strings.TrimSpace(raw) {
case "", "summary":
return "summary"
case "responses_compact":
return "responses_compact"
case "hybrid":
return "hybrid"
default:
return "summary"
}
}
func formatCompactionTranscript(messages []providers.Message, maxChars int) string {
if maxChars <= 0 || len(messages) == 0 {
return ""
@@ -2571,9 +2606,21 @@ func (al *AgentLoop) handleSlashCommand(ctx context.Context, msg bus.InboundMess
if err != nil {
return true, "", fmt.Errorf("status failed: %w", err)
}
return true, fmt.Sprintf("Model: %s\nAPI Base: %s\nLogging: %v\nConfig: %s",
activeProxy := strings.TrimSpace(al.proxy)
if activeProxy == "" {
activeProxy = "proxy"
}
activeBase := cfg.Providers.Proxy.APIBase
if activeProxy != "proxy" {
if p, ok := cfg.Providers.Proxies[activeProxy]; ok {
activeBase = p.APIBase
}
}
return true, fmt.Sprintf("Model: %s\nProxy: %s\nAPI Base: %s\nResponses Compact: %v\nLogging: %v\nConfig: %s",
al.model,
cfg.Providers.Proxy.APIBase,
activeProxy,
activeBase,
providers.ProviderSupportsResponsesCompact(cfg, activeProxy),
cfg.Logging.Enabled,
al.getConfigPathForCommands(),
), nil

View File

@@ -38,11 +38,12 @@ type AgentDefaults struct {
}
type ContextCompactionConfig struct {
Enabled bool `json:"enabled" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_ENABLED"`
TriggerMessages int `json:"trigger_messages" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_TRIGGER_MESSAGES"`
KeepRecentMessages int `json:"keep_recent_messages" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_KEEP_RECENT_MESSAGES"`
MaxSummaryChars int `json:"max_summary_chars" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_MAX_SUMMARY_CHARS"`
MaxTranscriptChars int `json:"max_transcript_chars" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_MAX_TRANSCRIPT_CHARS"`
Enabled bool `json:"enabled" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_ENABLED"`
Mode string `json:"mode" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_MODE"`
TriggerMessages int `json:"trigger_messages" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_TRIGGER_MESSAGES"`
KeepRecentMessages int `json:"keep_recent_messages" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_KEEP_RECENT_MESSAGES"`
MaxSummaryChars int `json:"max_summary_chars" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_MAX_SUMMARY_CHARS"`
MaxTranscriptChars int `json:"max_transcript_chars" env:"CLAWGO_AGENTS_DEFAULTS_CONTEXT_COMPACTION_MAX_TRANSCRIPT_CHARS"`
}
type ChannelsConfig struct {
@@ -109,12 +110,13 @@ type ProvidersConfig struct {
}
type ProviderConfig struct {
APIKey string `json:"api_key" env:"CLAWGO_PROVIDERS_{{.Name}}_API_KEY"`
APIBase string `json:"api_base" env:"CLAWGO_PROVIDERS_{{.Name}}_API_BASE"`
Protocol string `json:"protocol" env:"CLAWGO_PROVIDERS_{{.Name}}_PROTOCOL"`
Models []string `json:"models" env:"CLAWGO_PROVIDERS_{{.Name}}_MODELS"`
Auth string `json:"auth" env:"CLAWGO_PROVIDERS_{{.Name}}_AUTH"`
TimeoutSec int `json:"timeout_sec" env:"CLAWGO_PROVIDERS_PROXY_TIMEOUT_SEC"`
APIKey string `json:"api_key" env:"CLAWGO_PROVIDERS_{{.Name}}_API_KEY"`
APIBase string `json:"api_base" env:"CLAWGO_PROVIDERS_{{.Name}}_API_BASE"`
Protocol string `json:"protocol" env:"CLAWGO_PROVIDERS_{{.Name}}_PROTOCOL"`
Models []string `json:"models" env:"CLAWGO_PROVIDERS_{{.Name}}_MODELS"`
SupportsResponsesCompact bool `json:"supports_responses_compact" env:"CLAWGO_PROVIDERS_{{.Name}}_SUPPORTS_RESPONSES_COMPACT"`
Auth string `json:"auth" env:"CLAWGO_PROVIDERS_{{.Name}}_AUTH"`
TimeoutSec int `json:"timeout_sec" env:"CLAWGO_PROVIDERS_PROXY_TIMEOUT_SEC"`
}
type GatewayConfig struct {
@@ -238,6 +240,7 @@ func DefaultConfig() *Config {
MaxToolIterations: 20,
ContextCompaction: ContextCompactionConfig{
Enabled: true,
Mode: "summary",
TriggerMessages: 60,
KeepRecentMessages: 20,
MaxSummaryChars: 6000,

View File

@@ -1,6 +1,9 @@
package config
import "fmt"
import (
"fmt"
"strings"
)
// Validate returns configuration problems found in cfg.
// It does not mutate cfg.
@@ -16,6 +19,13 @@ func Validate(cfg *Config) []error {
}
if cfg.Agents.Defaults.ContextCompaction.Enabled {
cc := cfg.Agents.Defaults.ContextCompaction
if cc.Mode != "" {
switch cc.Mode {
case "summary", "responses_compact", "hybrid":
default:
errs = append(errs, fmt.Errorf("agents.defaults.context_compaction.mode must be one of: summary, responses_compact, hybrid"))
}
}
if cc.TriggerMessages <= 0 {
errs = append(errs, fmt.Errorf("agents.defaults.context_compaction.trigger_messages must be > 0 when enabled=true"))
}
@@ -50,6 +60,15 @@ func Validate(cfg *Config) []error {
errs = append(errs, fmt.Errorf("agents.defaults.proxy_fallbacks contains unknown proxy %q", name))
}
}
if cfg.Agents.Defaults.ContextCompaction.Enabled && cfg.Agents.Defaults.ContextCompaction.Mode == "responses_compact" {
active := cfg.Agents.Defaults.Proxy
if active == "" {
active = "proxy"
}
if pc, ok := providerConfigByName(cfg, active); !ok || !pc.SupportsResponsesCompact || pc.Protocol != "responses" {
errs = append(errs, fmt.Errorf("context_compaction.mode=responses_compact requires active proxy %q with protocol=responses and supports_responses_compact=true", active))
}
}
if cfg.Gateway.Port <= 0 || cfg.Gateway.Port > 65535 {
errs = append(errs, fmt.Errorf("gateway.port must be in 1..65535"))
@@ -150,6 +169,9 @@ func validateProviderConfig(path string, p ProviderConfig) []error {
errs = append(errs, fmt.Errorf("%s.protocol must be one of: chat_completions, responses", path))
}
}
if p.SupportsResponsesCompact && p.Protocol != "responses" {
errs = append(errs, fmt.Errorf("%s.supports_responses_compact=true requires protocol=responses", path))
}
if p.TimeoutSec <= 0 {
errs = append(errs, fmt.Errorf("%s.timeout_sec must be > 0", path))
}
@@ -169,3 +191,11 @@ func providerExists(cfg *Config, name string) bool {
_, ok := cfg.Providers.Proxies[name]
return ok
}
func providerConfigByName(cfg *Config, name string) (ProviderConfig, bool) {
if strings.TrimSpace(name) == "proxy" {
return cfg.Providers.Proxy, true
}
pc, ok := cfg.Providers.Proxies[name]
return pc, ok
}

View File

@@ -32,17 +32,18 @@ const (
)
type HTTPProvider struct {
apiKey string
apiBase string
protocol string
defaultModel string
authMode string
timeout time.Duration
httpClient *http.Client
client openai.Client
apiKey string
apiBase string
protocol string
defaultModel string
supportsResponsesCompact bool
authMode string
timeout time.Duration
httpClient *http.Client
client openai.Client
}
func NewHTTPProvider(apiKey, apiBase, protocol, defaultModel, authMode string, timeout time.Duration) *HTTPProvider {
func NewHTTPProvider(apiKey, apiBase, protocol, defaultModel string, supportsResponsesCompact bool, authMode string, timeout time.Duration) *HTTPProvider {
normalizedBase := normalizeAPIBase(apiBase)
resolvedProtocol := normalizeProtocol(protocol)
resolvedDefaultModel := strings.TrimSpace(defaultModel)
@@ -64,14 +65,15 @@ func NewHTTPProvider(apiKey, apiBase, protocol, defaultModel, authMode string, t
}
return &HTTPProvider{
apiKey: apiKey,
apiBase: normalizedBase,
protocol: resolvedProtocol,
defaultModel: resolvedDefaultModel,
authMode: authMode,
timeout: timeout,
httpClient: httpClient,
client: openai.NewClient(clientOpts...),
apiKey: apiKey,
apiBase: normalizedBase,
protocol: resolvedProtocol,
defaultModel: resolvedDefaultModel,
supportsResponsesCompact: supportsResponsesCompact,
authMode: authMode,
timeout: timeout,
httpClient: httpClient,
client: openai.NewClient(clientOpts...),
}
}
@@ -575,6 +577,80 @@ func (p *HTTPProvider) GetDefaultModel() string {
return p.defaultModel
}
func (p *HTTPProvider) SupportsResponsesCompact() bool {
return p != nil && p.supportsResponsesCompact && p.protocol == ProtocolResponses
}
func (p *HTTPProvider) BuildSummaryViaResponsesCompact(
ctx context.Context,
model string,
existingSummary string,
messages []Message,
maxSummaryChars int,
) (string, error) {
if !p.SupportsResponsesCompact() {
return "", fmt.Errorf("responses compact is not enabled for this provider")
}
inputItems := make(responses.ResponseInputParam, 0, len(messages)+1)
if strings.TrimSpace(existingSummary) != "" {
inputItems = append(inputItems, responses.ResponseInputItemParamOfMessage(
"Existing summary:\n"+strings.TrimSpace(existingSummary),
responses.EasyInputMessageRoleSystem,
))
}
for _, msg := range messages {
inputItems = append(inputItems, toResponsesInputItems(msg)...)
}
if len(inputItems) == 0 {
return strings.TrimSpace(existingSummary), nil
}
compacted, err := p.client.Responses.Compact(ctx, responses.ResponseCompactParams{
Model: responses.ResponseCompactParamsModel(model),
Input: responses.ResponseCompactParamsInputUnion{
OfResponseInputItemArray: inputItems,
},
})
if err != nil {
return "", fmt.Errorf("responses compact request failed: %w", err)
}
payload, err := json.Marshal(compacted.Output)
if err != nil {
return "", fmt.Errorf("failed to serialize compact output: %w", err)
}
compactedPayload := strings.TrimSpace(string(payload))
if compactedPayload == "" {
return "", fmt.Errorf("empty compact output")
}
if len(compactedPayload) > 12000 {
compactedPayload = compactedPayload[:12000] + "..."
}
summaryPrompt := fmt.Sprintf(
"Compacted conversation JSON:\n%s\n\nReturn a concise markdown summary with sections: Key Facts, Decisions, Open Items, Next Steps.",
compactedPayload,
)
summaryResp, err := p.client.Responses.New(ctx, responses.ResponseNewParams{
Model: model,
Input: responses.ResponseNewParamsInputUnion{
OfString: param.NewOpt(summaryPrompt),
},
})
if err != nil {
return "", fmt.Errorf("responses summary request failed: %w", err)
}
summary := strings.TrimSpace(summaryResp.OutputText())
if summary == "" {
return "", fmt.Errorf("empty summary after responses compact")
}
if maxSummaryChars > 0 && len(summary) > maxSummaryChars {
summary = summary[:maxSummaryChars]
}
return summary, nil
}
func CreateProvider(cfg *config.Config) (LLMProvider, error) {
name := strings.TrimSpace(cfg.Agents.Defaults.Proxy)
if name == "" {
@@ -598,7 +674,15 @@ func CreateProviderByName(cfg *config.Config, name string) (LLMProvider, error)
if len(pc.Models) > 0 {
defaultModel = pc.Models[0]
}
return NewHTTPProvider(pc.APIKey, pc.APIBase, pc.Protocol, defaultModel, pc.Auth, time.Duration(pc.TimeoutSec)*time.Second), nil
return NewHTTPProvider(
pc.APIKey,
pc.APIBase,
pc.Protocol,
defaultModel,
pc.SupportsResponsesCompact,
pc.Auth,
time.Duration(pc.TimeoutSec)*time.Second,
), nil
}
func CreateProviders(cfg *config.Config) (map[string]LLMProvider, error) {
@@ -635,6 +719,17 @@ func GetProviderModels(cfg *config.Config, name string) []string {
return out
}
func ProviderSupportsResponsesCompact(cfg *config.Config, name string) bool {
pc, err := getProviderConfigByName(cfg, name)
if err != nil {
return false
}
if !pc.SupportsResponsesCompact {
return false
}
return normalizeProtocol(pc.Protocol) == ProtocolResponses
}
func ListProviderNames(cfg *config.Config) []string {
configs := getAllProviderConfigs(cfg)
if len(configs) == 0 {

View File

@@ -40,6 +40,13 @@ type LLMProvider interface {
GetDefaultModel() string
}
// ResponsesCompactor is an optional capability interface.
// Providers that support OpenAI /v1/responses/compact can implement this.
type ResponsesCompactor interface {
SupportsResponsesCompact() bool
BuildSummaryViaResponsesCompact(ctx context.Context, model string, existingSummary string, messages []Message, maxSummaryChars int) (string, error)
}
type ToolDefinition struct {
Type string `json:"type"`
Function ToolFunctionDefinition `json:"function"`