Implement core AI service infrastructure including agent operations, escalation handling, and asynchronous queue processing. New services: - Agent service: Handles Tier 2 AI operations with action runner integration - Queue service: Asynchronous processing of AI operations with retry logic - Escalation service: Routes complex issues to staff with configurable rules - Notifier service: Sends notifications for AI operation results Additional changes: - Add GitCaddy AI system user (ID: -3) for bot operations - Add AIConfig to repository units - Add AI-specific error codes (rate limiting, service errors, etc.) - Extend AI client with GenerateIssueResponse method - Add AISettingsV2 struct for repository-level AI configuration The queue system enables non-blocking AI operations with proper error handling and rate limiting.
68 lines
2.3 KiB
Go
68 lines
2.3 KiB
Go
// Copyright 2026 MarketAlly. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package ai
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
ai_model "code.gitcaddy.com/server/v3/models/ai"
|
|
issues_model "code.gitcaddy.com/server/v3/models/issues"
|
|
repo_model "code.gitcaddy.com/server/v3/models/repo"
|
|
user_model "code.gitcaddy.com/server/v3/models/user"
|
|
"code.gitcaddy.com/server/v3/modules/log"
|
|
issue_service "code.gitcaddy.com/server/v3/services/issue"
|
|
)
|
|
|
|
// escalateToStaff handles escalation when an AI operation fails or has low confidence.
|
|
// It adds the configured escalation label and posts a comment summarizing the situation.
|
|
func escalateToStaff(ctx context.Context, repo *repo_model.Repository, aiCfg *repo_model.AIConfig, opLog *ai_model.OperationLog) {
|
|
if !aiCfg.EscalateToStaff {
|
|
return
|
|
}
|
|
|
|
issue, err := issues_model.GetIssueByID(ctx, opLog.TargetID)
|
|
if err != nil {
|
|
log.Error("Escalation: failed to load issue %d: %v", opLog.TargetID, err)
|
|
return
|
|
}
|
|
issue.Repo = repo
|
|
|
|
botUser := user_model.NewAIUser()
|
|
|
|
// Add escalation label if configured
|
|
if aiCfg.EscalationLabel != "" {
|
|
label, err := issues_model.GetLabelInRepoByName(ctx, repo.ID, aiCfg.EscalationLabel)
|
|
if err != nil {
|
|
log.Warn("Escalation: label %q not found in repo %d, skipping label", aiCfg.EscalationLabel, repo.ID)
|
|
} else {
|
|
if err := issue_service.AddLabel(ctx, issue, botUser, label); err != nil {
|
|
log.Error("Escalation: failed to add label %q to issue #%d: %v", aiCfg.EscalationLabel, issue.Index, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Post an escalation comment
|
|
comment := fmt.Sprintf(
|
|
"I attempted to handle this automatically (%s) but was unable to complete the operation successfully. "+
|
|
"A team member should review this.\n\n"+
|
|
"**Operation:** %s\n"+
|
|
"**Status:** %s",
|
|
opLog.Operation, opLog.Operation, opLog.Status,
|
|
)
|
|
if opLog.ErrorMessage != "" {
|
|
comment += fmt.Sprintf("\n**Error:** %s", opLog.ErrorMessage)
|
|
}
|
|
|
|
if _, err := issue_service.CreateIssueComment(ctx, botUser, repo, issue, comment, nil); err != nil {
|
|
log.Error("Escalation: failed to create escalation comment on issue #%d: %v", issue.Index, err)
|
|
}
|
|
|
|
// Update operation log to reflect escalation
|
|
opLog.Status = ai_model.OperationStatusEscalated
|
|
if err := ai_model.UpdateOperationLog(ctx, opLog); err != nil {
|
|
log.Error("Escalation: failed to update operation log: %v", err)
|
|
}
|
|
}
|