Extract core LLM tool loop logic into shared RunToolLoop function that can be used by both main agent and subagents. Subagents now run their own tool loop with dedicated tool registry, enabling full independence. Key changes: - New pkg/tools/toolloop.go with reusable tool execution logic - Subagents use message tool to communicate directly with users - Heartbeat processing is now stateless via ProcessHeartbeat - Simplified system message routing without result forwarding - Shared tool registry creation for consistency between agents This architecture follows openclaw's design where async tools notify via bus and subagents handle their own user communication.
166 lines
4.6 KiB
Go
166 lines
4.6 KiB
Go
// PicoClaw - Ultra-lightweight personal AI agent
|
|
// Inspired by and based on nanobot: https://github.com/HKUDS/nanobot
|
|
// License: MIT
|
|
//
|
|
// Copyright (c) 2026 PicoClaw contributors
|
|
|
|
package tools
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
|
|
"github.com/sipeed/picoclaw/pkg/logger"
|
|
"github.com/sipeed/picoclaw/pkg/providers"
|
|
"github.com/sipeed/picoclaw/pkg/utils"
|
|
)
|
|
|
|
// ToolLoopConfig configures the tool execution loop.
|
|
type ToolLoopConfig struct {
|
|
Provider providers.LLMProvider
|
|
Model string
|
|
Tools *ToolRegistry
|
|
MaxIterations int
|
|
LLMOptions map[string]any
|
|
}
|
|
|
|
// ToolLoopResult contains the result of running the tool loop.
|
|
type ToolLoopResult struct {
|
|
Content string
|
|
Iterations int
|
|
}
|
|
|
|
// RunToolLoop executes the LLM + tool call iteration loop.
|
|
// This is the core agent logic that can be reused by both main agent and subagents.
|
|
func RunToolLoop(ctx context.Context, config ToolLoopConfig, messages []providers.Message, channel, chatID string) (*ToolLoopResult, error) {
|
|
iteration := 0
|
|
var finalContent string
|
|
|
|
for iteration < config.MaxIterations {
|
|
iteration++
|
|
|
|
logger.DebugCF("toolloop", "LLM iteration",
|
|
map[string]any{
|
|
"iteration": iteration,
|
|
"max": config.MaxIterations,
|
|
})
|
|
|
|
// 1. Build tool definitions
|
|
var providerToolDefs []providers.ToolDefinition
|
|
if config.Tools != nil {
|
|
toolDefs := config.Tools.GetDefinitions()
|
|
providerToolDefs = make([]providers.ToolDefinition, 0, len(toolDefs))
|
|
for _, td := range toolDefs {
|
|
providerToolDefs = append(providerToolDefs, providers.ToolDefinition{
|
|
Type: td["type"].(string),
|
|
Function: providers.ToolFunctionDefinition{
|
|
Name: td["function"].(map[string]any)["name"].(string),
|
|
Description: td["function"].(map[string]any)["description"].(string),
|
|
Parameters: td["function"].(map[string]any)["parameters"].(map[string]any),
|
|
},
|
|
})
|
|
}
|
|
}
|
|
|
|
// 2. Set default LLM options
|
|
llmOpts := config.LLMOptions
|
|
if llmOpts == nil {
|
|
llmOpts = map[string]any{
|
|
"max_tokens": 4096,
|
|
"temperature": 0.7,
|
|
}
|
|
}
|
|
|
|
// 3. Call LLM
|
|
response, err := config.Provider.Chat(ctx, messages, providerToolDefs, config.Model, llmOpts)
|
|
if err != nil {
|
|
logger.ErrorCF("toolloop", "LLM call failed",
|
|
map[string]any{
|
|
"iteration": iteration,
|
|
"error": err.Error(),
|
|
})
|
|
return nil, fmt.Errorf("LLM call failed: %w", err)
|
|
}
|
|
|
|
// 4. If no tool calls, we're done
|
|
if len(response.ToolCalls) == 0 {
|
|
finalContent = response.Content
|
|
logger.InfoCF("toolloop", "LLM response without tool calls (direct answer)",
|
|
map[string]any{
|
|
"iteration": iteration,
|
|
"content_chars": len(finalContent),
|
|
})
|
|
break
|
|
}
|
|
|
|
// 5. Log tool calls
|
|
toolNames := make([]string, 0, len(response.ToolCalls))
|
|
for _, tc := range response.ToolCalls {
|
|
toolNames = append(toolNames, tc.Name)
|
|
}
|
|
logger.InfoCF("toolloop", "LLM requested tool calls",
|
|
map[string]any{
|
|
"tools": toolNames,
|
|
"count": len(response.ToolCalls),
|
|
"iteration": iteration,
|
|
})
|
|
|
|
// 6. Build assistant message with tool calls
|
|
assistantMsg := providers.Message{
|
|
Role: "assistant",
|
|
Content: response.Content,
|
|
}
|
|
for _, tc := range response.ToolCalls {
|
|
argumentsJSON, _ := json.Marshal(tc.Arguments)
|
|
assistantMsg.ToolCalls = append(assistantMsg.ToolCalls, providers.ToolCall{
|
|
ID: tc.ID,
|
|
Type: "function",
|
|
Function: &providers.FunctionCall{
|
|
Name: tc.Name,
|
|
Arguments: string(argumentsJSON),
|
|
},
|
|
})
|
|
}
|
|
messages = append(messages, assistantMsg)
|
|
|
|
// 7. Execute tool calls
|
|
for _, tc := range response.ToolCalls {
|
|
argsJSON, _ := json.Marshal(tc.Arguments)
|
|
argsPreview := utils.Truncate(string(argsJSON), 200)
|
|
logger.InfoCF("toolloop", fmt.Sprintf("Tool call: %s(%s)", tc.Name, argsPreview),
|
|
map[string]any{
|
|
"tool": tc.Name,
|
|
"iteration": iteration,
|
|
})
|
|
|
|
// Execute tool (no async callback for subagents - they run independently)
|
|
var toolResult *ToolResult
|
|
if config.Tools != nil {
|
|
toolResult = config.Tools.ExecuteWithContext(ctx, tc.Name, tc.Arguments, channel, chatID, nil)
|
|
} else {
|
|
toolResult = ErrorResult("No tools available")
|
|
}
|
|
|
|
// Determine content for LLM
|
|
contentForLLM := toolResult.ForLLM
|
|
if contentForLLM == "" && toolResult.Err != nil {
|
|
contentForLLM = toolResult.Err.Error()
|
|
}
|
|
|
|
// Add tool result message
|
|
toolResultMsg := providers.Message{
|
|
Role: "tool",
|
|
Content: contentForLLM,
|
|
ToolCallID: tc.ID,
|
|
}
|
|
messages = append(messages, toolResultMsg)
|
|
}
|
|
}
|
|
|
|
return &ToolLoopResult{
|
|
Content: finalContent,
|
|
Iterations: iteration,
|
|
}, nil
|
|
}
|