feat: make context compression dynamic and add thinking animation

- Implement dynamic context window awareness for compression thresholds
- Add 'Thinking' animation for Telegram channel with auto-edit response
- Refactor summarization to handle multi-part batches and oversized messages
This commit is contained in:
Danieldd28
2026-02-10 02:00:57 +07:00
parent 07e624c8da
commit 2df60b2fa3
2 changed files with 151 additions and 40 deletions

View File

@@ -27,6 +27,7 @@ type AgentLoop struct {
provider providers.LLMProvider
workspace string
model string
contextWindow int
maxIterations int
sessions *session.SessionManager
contextBuilder *ContextBuilder
@@ -56,6 +57,7 @@ func NewAgentLoop(cfg *config.Config, bus *bus.MessageBus, provider providers.LL
provider: provider,
workspace: workspace,
model: cfg.Agents.Defaults.Model,
contextWindow: cfg.Agents.Defaults.MaxTokens,
maxIterations: cfg.Agents.Defaults.MaxToolIterations,
sessions: sessionsManager,
contextBuilder: NewContextBuilder(workspace),
@@ -198,7 +200,13 @@ func (al *AgentLoop) processMessage(ctx context.Context, msg bus.InboundMessage)
// Context compression logic
newHistory := al.sessions.GetHistory(msg.SessionKey)
if len(newHistory) > 20 {
// Token Awareness (Dynamic)
// Trigger if history > 20 messages OR estimated tokens > 75% of context window
tokenEstimate := al.estimateTokens(newHistory)
threshold := al.contextWindow * 75 / 100
if len(newHistory) > 20 || tokenEstimate > threshold {
if _, loading := al.summarizing.LoadOrStore(msg.SessionKey, true); !loading {
go func() {
defer al.summarizing.Delete(msg.SessionKey)
@@ -213,55 +221,104 @@ func (al *AgentLoop) processMessage(ctx context.Context, msg bus.InboundMessage)
}
func (al *AgentLoop) summarizeSession(sessionKey string) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
history := al.sessions.GetHistory(sessionKey)
summary := al.sessions.GetSummary(sessionKey)
// Keep last 4 messages, summarize the rest
// Keep last 4 messages for continuity
if len(history) <= 4 {
return
}
toSummarize := history[:len(history)-4]
prompt := "Below is a conversation history and an optional existing summary. " +
"Please provide a concise summary of the conversation so far, " +
"preserving the core context and key points discussed. " +
"If there's an existing summary, incorporate it into the new one.\n\n"
// Oversized Message Guard (Dynamic)
// Skip messages larger than 50% of context window to prevent summarizer overflow.
maxMessageTokens := al.contextWindow / 2
validMessages := make([]providers.Message, 0)
omitted := false
if summary != "" {
prompt += "EXISTING SUMMARY: " + summary + "\n\n"
}
prompt += "CONVERSATION TO SUMMARIZE:\n"
for _, m := range toSummarize {
if m.Role == "user" || m.Role == "assistant" {
prompt += fmt.Sprintf("%s: %s\n", m.Role, m.Content)
if m.Role != "user" && m.Role != "assistant" {
continue
}
// Estimate tokens for this message
msgTokens := len(m.Content) / 4
if msgTokens > maxMessageTokens {
omitted = true
continue
}
validMessages = append(validMessages, m)
}
messages := []providers.Message{
{
Role: "user",
Content: prompt,
},
}
response, err := al.provider.Chat(ctx, messages, nil, al.model, map[string]interface{}{
"max_tokens": 1024,
"temperature": 0.3,
})
if err != nil {
fmt.Printf("Error summarizing session %s: %v\n", sessionKey, err)
if len(validMessages) == 0 {
return
}
if response.Content != "" {
al.sessions.SetSummary(sessionKey, response.Content)
// Multi-Part Summarization
// Split into two parts if history is significant
var finalSummary string
if len(validMessages) > 10 {
mid := len(validMessages) / 2
part1 := validMessages[:mid]
part2 := validMessages[mid:]
s1, _ := al.summarizeBatch(ctx, part1, "")
s2, _ := al.summarizeBatch(ctx, part2, "")
// Merge them
mergePrompt := fmt.Sprintf("Merge these two conversation summaries into one cohesive summary:\n\n1: %s\n\n2: %s", s1, s2)
resp, err := al.provider.Chat(ctx, []providers.Message{{Role: "user", Content: mergePrompt}}, nil, al.model, map[string]interface{}{
"max_tokens": 1024,
"temperature": 0.3,
})
if err == nil {
finalSummary = resp.Content
} else {
finalSummary = s1 + " " + s2
}
} else {
finalSummary, _ = al.summarizeBatch(ctx, validMessages, summary)
}
if omitted && finalSummary != "" {
finalSummary += "\n[Note: Some oversized messages were omitted from this summary for efficiency.]"
}
if finalSummary != "" {
al.sessions.SetSummary(sessionKey, finalSummary)
al.sessions.TruncateHistory(sessionKey, 4)
al.sessions.Save(al.sessions.GetOrCreate(sessionKey))
}
}
func (al *AgentLoop) summarizeBatch(ctx context.Context, batch []providers.Message, existingSummary string) (string, error) {
prompt := "Provide a concise summary of this conversation segment, preserving core context and key points.\n"
if existingSummary != "" {
prompt += "Existing context: " + existingSummary + "\n"
}
prompt += "\nCONVERSATION:\n"
for _, m := range batch {
prompt += fmt.Sprintf("%s: %s\n", m.Role, m.Content)
}
response, err := al.provider.Chat(ctx, []providers.Message{{Role: "user", Content: prompt}}, nil, al.model, map[string]interface{}{
"max_tokens": 1024,
"temperature": 0.3,
})
if err != nil {
return "", err
}
return response.Content, nil
}
func (al *AgentLoop) estimateTokens(messages []providers.Message) int {
total := 0
for _, m := range messages {
total += len(m.Content) / 4 // Simple heuristic: 4 chars per token
}
return total
}