- Add constants package with IsInternalChannel helper to centralize internal channel checks across agent, channels, and heartbeat services - Add ToProviderDefs method to ToolRegistry to consolidate tool definition conversion logic used in agent loop and tool loop - Refactor SubagentTool.Execute to use RunToolLoop for consistent tool execution with iteration tracking - Remove duplicate inline map definitions and type assertion code throughout codebase
155 lines
4.1 KiB
Go
155 lines
4.1 KiB
Go
// PicoClaw - Ultra-lightweight personal AI agent
|
|
// Inspired by and based on nanobot: https://github.com/HKUDS/nanobot
|
|
// License: MIT
|
|
//
|
|
// Copyright (c) 2026 PicoClaw contributors
|
|
|
|
package tools
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
|
|
"github.com/sipeed/picoclaw/pkg/logger"
|
|
"github.com/sipeed/picoclaw/pkg/providers"
|
|
"github.com/sipeed/picoclaw/pkg/utils"
|
|
)
|
|
|
|
// ToolLoopConfig configures the tool execution loop.
|
|
type ToolLoopConfig struct {
|
|
Provider providers.LLMProvider
|
|
Model string
|
|
Tools *ToolRegistry
|
|
MaxIterations int
|
|
LLMOptions map[string]any
|
|
}
|
|
|
|
// ToolLoopResult contains the result of running the tool loop.
|
|
type ToolLoopResult struct {
|
|
Content string
|
|
Iterations int
|
|
}
|
|
|
|
// RunToolLoop executes the LLM + tool call iteration loop.
|
|
// This is the core agent logic that can be reused by both main agent and subagents.
|
|
func RunToolLoop(ctx context.Context, config ToolLoopConfig, messages []providers.Message, channel, chatID string) (*ToolLoopResult, error) {
|
|
iteration := 0
|
|
var finalContent string
|
|
|
|
for iteration < config.MaxIterations {
|
|
iteration++
|
|
|
|
logger.DebugCF("toolloop", "LLM iteration",
|
|
map[string]any{
|
|
"iteration": iteration,
|
|
"max": config.MaxIterations,
|
|
})
|
|
|
|
// 1. Build tool definitions
|
|
var providerToolDefs []providers.ToolDefinition
|
|
if config.Tools != nil {
|
|
providerToolDefs = config.Tools.ToProviderDefs()
|
|
}
|
|
|
|
// 2. Set default LLM options
|
|
llmOpts := config.LLMOptions
|
|
if llmOpts == nil {
|
|
llmOpts = map[string]any{
|
|
"max_tokens": 4096,
|
|
"temperature": 0.7,
|
|
}
|
|
}
|
|
|
|
// 3. Call LLM
|
|
response, err := config.Provider.Chat(ctx, messages, providerToolDefs, config.Model, llmOpts)
|
|
if err != nil {
|
|
logger.ErrorCF("toolloop", "LLM call failed",
|
|
map[string]any{
|
|
"iteration": iteration,
|
|
"error": err.Error(),
|
|
})
|
|
return nil, fmt.Errorf("LLM call failed: %w", err)
|
|
}
|
|
|
|
// 4. If no tool calls, we're done
|
|
if len(response.ToolCalls) == 0 {
|
|
finalContent = response.Content
|
|
logger.InfoCF("toolloop", "LLM response without tool calls (direct answer)",
|
|
map[string]any{
|
|
"iteration": iteration,
|
|
"content_chars": len(finalContent),
|
|
})
|
|
break
|
|
}
|
|
|
|
// 5. Log tool calls
|
|
toolNames := make([]string, 0, len(response.ToolCalls))
|
|
for _, tc := range response.ToolCalls {
|
|
toolNames = append(toolNames, tc.Name)
|
|
}
|
|
logger.InfoCF("toolloop", "LLM requested tool calls",
|
|
map[string]any{
|
|
"tools": toolNames,
|
|
"count": len(response.ToolCalls),
|
|
"iteration": iteration,
|
|
})
|
|
|
|
// 6. Build assistant message with tool calls
|
|
assistantMsg := providers.Message{
|
|
Role: "assistant",
|
|
Content: response.Content,
|
|
}
|
|
for _, tc := range response.ToolCalls {
|
|
argumentsJSON, _ := json.Marshal(tc.Arguments)
|
|
assistantMsg.ToolCalls = append(assistantMsg.ToolCalls, providers.ToolCall{
|
|
ID: tc.ID,
|
|
Type: "function",
|
|
Function: &providers.FunctionCall{
|
|
Name: tc.Name,
|
|
Arguments: string(argumentsJSON),
|
|
},
|
|
})
|
|
}
|
|
messages = append(messages, assistantMsg)
|
|
|
|
// 7. Execute tool calls
|
|
for _, tc := range response.ToolCalls {
|
|
argsJSON, _ := json.Marshal(tc.Arguments)
|
|
argsPreview := utils.Truncate(string(argsJSON), 200)
|
|
logger.InfoCF("toolloop", fmt.Sprintf("Tool call: %s(%s)", tc.Name, argsPreview),
|
|
map[string]any{
|
|
"tool": tc.Name,
|
|
"iteration": iteration,
|
|
})
|
|
|
|
// Execute tool (no async callback for subagents - they run independently)
|
|
var toolResult *ToolResult
|
|
if config.Tools != nil {
|
|
toolResult = config.Tools.ExecuteWithContext(ctx, tc.Name, tc.Arguments, channel, chatID, nil)
|
|
} else {
|
|
toolResult = ErrorResult("No tools available")
|
|
}
|
|
|
|
// Determine content for LLM
|
|
contentForLLM := toolResult.ForLLM
|
|
if contentForLLM == "" && toolResult.Err != nil {
|
|
contentForLLM = toolResult.Err.Error()
|
|
}
|
|
|
|
// Add tool result message
|
|
toolResultMsg := providers.Message{
|
|
Role: "tool",
|
|
Content: contentForLLM,
|
|
ToolCallID: tc.ID,
|
|
}
|
|
messages = append(messages, toolResultMsg)
|
|
}
|
|
}
|
|
|
|
return &ToolLoopResult{
|
|
Content: finalContent,
|
|
Iterations: iteration,
|
|
}, nil
|
|
}
|