refactor(agent): extract reusable tool loop and make subagents independent
Extract core LLM tool loop logic into shared RunToolLoop function that can be used by both main agent and subagents. Subagents now run their own tool loop with dedicated tool registry, enabling full independence. Key changes: - New pkg/tools/toolloop.go with reusable tool execution logic - Subagents use message tool to communicate directly with users - Heartbeat processing is now stateless via ProcessHeartbeat - Simplified system message routing without result forwarding - Shared tool registry creation for consistency between agents This architecture follows openclaw's design where async tools notify via bus and subagents handle their own user communication.
This commit is contained in:
@@ -28,6 +28,8 @@ type SubagentManager struct {
|
||||
defaultModel string
|
||||
bus *bus.MessageBus
|
||||
workspace string
|
||||
tools *ToolRegistry
|
||||
maxIterations int
|
||||
nextID int
|
||||
}
|
||||
|
||||
@@ -38,10 +40,27 @@ func NewSubagentManager(provider providers.LLMProvider, defaultModel, workspace
|
||||
defaultModel: defaultModel,
|
||||
bus: bus,
|
||||
workspace: workspace,
|
||||
tools: NewToolRegistry(),
|
||||
maxIterations: 10,
|
||||
nextID: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// SetTools sets the tool registry for subagent execution.
|
||||
// If not set, subagent will have access to the provided tools.
|
||||
func (sm *SubagentManager) SetTools(tools *ToolRegistry) {
|
||||
sm.mu.Lock()
|
||||
defer sm.mu.Unlock()
|
||||
sm.tools = tools
|
||||
}
|
||||
|
||||
// RegisterTool registers a tool for subagent execution.
|
||||
func (sm *SubagentManager) RegisterTool(tool Tool) {
|
||||
sm.mu.Lock()
|
||||
defer sm.mu.Unlock()
|
||||
sm.tools.Register(tool)
|
||||
}
|
||||
|
||||
func (sm *SubagentManager) Spawn(ctx context.Context, task, label, originChannel, originChatID string, callback AsyncCallback) (string, error) {
|
||||
sm.mu.Lock()
|
||||
defer sm.mu.Unlock()
|
||||
@@ -73,10 +92,15 @@ func (sm *SubagentManager) runTask(ctx context.Context, task *SubagentTask, call
|
||||
task.Status = "running"
|
||||
task.Created = time.Now().UnixMilli()
|
||||
|
||||
// Build system prompt for subagent
|
||||
systemPrompt := `You are a subagent. Complete the given task independently and report the result.
|
||||
You have access to tools - use them as needed to complete your task.
|
||||
After completing the task, provide a clear summary of what was done.`
|
||||
|
||||
messages := []providers.Message{
|
||||
{
|
||||
Role: "system",
|
||||
Content: "You are a subagent. Complete the given task independently and report the result.",
|
||||
Content: systemPrompt,
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
@@ -95,9 +119,22 @@ func (sm *SubagentManager) runTask(ctx context.Context, task *SubagentTask, call
|
||||
default:
|
||||
}
|
||||
|
||||
response, err := sm.provider.Chat(ctx, messages, nil, sm.defaultModel, map[string]interface{}{
|
||||
"max_tokens": 4096,
|
||||
})
|
||||
// Run tool loop with access to tools
|
||||
sm.mu.RLock()
|
||||
tools := sm.tools
|
||||
maxIter := sm.maxIterations
|
||||
sm.mu.RUnlock()
|
||||
|
||||
loopResult, err := RunToolLoop(ctx, ToolLoopConfig{
|
||||
Provider: sm.provider,
|
||||
Model: sm.defaultModel,
|
||||
Tools: tools,
|
||||
MaxIterations: maxIter,
|
||||
LLMOptions: map[string]any{
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7,
|
||||
},
|
||||
}, messages, task.OriginChannel, task.OriginChatID)
|
||||
|
||||
sm.mu.Lock()
|
||||
var result *ToolResult
|
||||
@@ -127,10 +164,10 @@ func (sm *SubagentManager) runTask(ctx context.Context, task *SubagentTask, call
|
||||
}
|
||||
} else {
|
||||
task.Status = "completed"
|
||||
task.Result = response.Content
|
||||
task.Result = loopResult.Content
|
||||
result = &ToolResult{
|
||||
ForLLM: fmt.Sprintf("Subagent '%s' completed: %s", task.Label, response.Content),
|
||||
ForUser: response.Content,
|
||||
ForLLM: fmt.Sprintf("Subagent '%s' completed (iterations: %d): %s", task.Label, loopResult.Iterations, loopResult.Content),
|
||||
ForUser: loopResult.Content,
|
||||
Silent: false,
|
||||
IsError: false,
|
||||
Async: false,
|
||||
|
||||
165
pkg/tools/toolloop.go
Normal file
165
pkg/tools/toolloop.go
Normal file
@@ -0,0 +1,165 @@
|
||||
// PicoClaw - Ultra-lightweight personal AI agent
|
||||
// Inspired by and based on nanobot: https://github.com/HKUDS/nanobot
|
||||
// License: MIT
|
||||
//
|
||||
// Copyright (c) 2026 PicoClaw contributors
|
||||
|
||||
package tools
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/sipeed/picoclaw/pkg/logger"
|
||||
"github.com/sipeed/picoclaw/pkg/providers"
|
||||
"github.com/sipeed/picoclaw/pkg/utils"
|
||||
)
|
||||
|
||||
// ToolLoopConfig configures the tool execution loop.
|
||||
type ToolLoopConfig struct {
|
||||
Provider providers.LLMProvider
|
||||
Model string
|
||||
Tools *ToolRegistry
|
||||
MaxIterations int
|
||||
LLMOptions map[string]any
|
||||
}
|
||||
|
||||
// ToolLoopResult contains the result of running the tool loop.
|
||||
type ToolLoopResult struct {
|
||||
Content string
|
||||
Iterations int
|
||||
}
|
||||
|
||||
// RunToolLoop executes the LLM + tool call iteration loop.
|
||||
// This is the core agent logic that can be reused by both main agent and subagents.
|
||||
func RunToolLoop(ctx context.Context, config ToolLoopConfig, messages []providers.Message, channel, chatID string) (*ToolLoopResult, error) {
|
||||
iteration := 0
|
||||
var finalContent string
|
||||
|
||||
for iteration < config.MaxIterations {
|
||||
iteration++
|
||||
|
||||
logger.DebugCF("toolloop", "LLM iteration",
|
||||
map[string]any{
|
||||
"iteration": iteration,
|
||||
"max": config.MaxIterations,
|
||||
})
|
||||
|
||||
// 1. Build tool definitions
|
||||
var providerToolDefs []providers.ToolDefinition
|
||||
if config.Tools != nil {
|
||||
toolDefs := config.Tools.GetDefinitions()
|
||||
providerToolDefs = make([]providers.ToolDefinition, 0, len(toolDefs))
|
||||
for _, td := range toolDefs {
|
||||
providerToolDefs = append(providerToolDefs, providers.ToolDefinition{
|
||||
Type: td["type"].(string),
|
||||
Function: providers.ToolFunctionDefinition{
|
||||
Name: td["function"].(map[string]any)["name"].(string),
|
||||
Description: td["function"].(map[string]any)["description"].(string),
|
||||
Parameters: td["function"].(map[string]any)["parameters"].(map[string]any),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Set default LLM options
|
||||
llmOpts := config.LLMOptions
|
||||
if llmOpts == nil {
|
||||
llmOpts = map[string]any{
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7,
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Call LLM
|
||||
response, err := config.Provider.Chat(ctx, messages, providerToolDefs, config.Model, llmOpts)
|
||||
if err != nil {
|
||||
logger.ErrorCF("toolloop", "LLM call failed",
|
||||
map[string]any{
|
||||
"iteration": iteration,
|
||||
"error": err.Error(),
|
||||
})
|
||||
return nil, fmt.Errorf("LLM call failed: %w", err)
|
||||
}
|
||||
|
||||
// 4. If no tool calls, we're done
|
||||
if len(response.ToolCalls) == 0 {
|
||||
finalContent = response.Content
|
||||
logger.InfoCF("toolloop", "LLM response without tool calls (direct answer)",
|
||||
map[string]any{
|
||||
"iteration": iteration,
|
||||
"content_chars": len(finalContent),
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
// 5. Log tool calls
|
||||
toolNames := make([]string, 0, len(response.ToolCalls))
|
||||
for _, tc := range response.ToolCalls {
|
||||
toolNames = append(toolNames, tc.Name)
|
||||
}
|
||||
logger.InfoCF("toolloop", "LLM requested tool calls",
|
||||
map[string]any{
|
||||
"tools": toolNames,
|
||||
"count": len(response.ToolCalls),
|
||||
"iteration": iteration,
|
||||
})
|
||||
|
||||
// 6. Build assistant message with tool calls
|
||||
assistantMsg := providers.Message{
|
||||
Role: "assistant",
|
||||
Content: response.Content,
|
||||
}
|
||||
for _, tc := range response.ToolCalls {
|
||||
argumentsJSON, _ := json.Marshal(tc.Arguments)
|
||||
assistantMsg.ToolCalls = append(assistantMsg.ToolCalls, providers.ToolCall{
|
||||
ID: tc.ID,
|
||||
Type: "function",
|
||||
Function: &providers.FunctionCall{
|
||||
Name: tc.Name,
|
||||
Arguments: string(argumentsJSON),
|
||||
},
|
||||
})
|
||||
}
|
||||
messages = append(messages, assistantMsg)
|
||||
|
||||
// 7. Execute tool calls
|
||||
for _, tc := range response.ToolCalls {
|
||||
argsJSON, _ := json.Marshal(tc.Arguments)
|
||||
argsPreview := utils.Truncate(string(argsJSON), 200)
|
||||
logger.InfoCF("toolloop", fmt.Sprintf("Tool call: %s(%s)", tc.Name, argsPreview),
|
||||
map[string]any{
|
||||
"tool": tc.Name,
|
||||
"iteration": iteration,
|
||||
})
|
||||
|
||||
// Execute tool (no async callback for subagents - they run independently)
|
||||
var toolResult *ToolResult
|
||||
if config.Tools != nil {
|
||||
toolResult = config.Tools.ExecuteWithContext(ctx, tc.Name, tc.Arguments, channel, chatID, nil)
|
||||
} else {
|
||||
toolResult = ErrorResult("No tools available")
|
||||
}
|
||||
|
||||
// Determine content for LLM
|
||||
contentForLLM := toolResult.ForLLM
|
||||
if contentForLLM == "" && toolResult.Err != nil {
|
||||
contentForLLM = toolResult.Err.Error()
|
||||
}
|
||||
|
||||
// Add tool result message
|
||||
toolResultMsg := providers.Message{
|
||||
Role: "tool",
|
||||
Content: contentForLLM,
|
||||
ToolCallID: tc.ID,
|
||||
}
|
||||
messages = append(messages, toolResultMsg)
|
||||
}
|
||||
}
|
||||
|
||||
return &ToolLoopResult{
|
||||
Content: finalContent,
|
||||
Iterations: iteration,
|
||||
}, nil
|
||||
}
|
||||
Reference in New Issue
Block a user