2
0

feat(ai): add ai service layer with agent and queue system

Implement core AI service infrastructure including agent operations, escalation handling, and asynchronous queue processing.

New services:
- Agent service: Handles Tier 2 AI operations with action runner integration
- Queue service: Asynchronous processing of AI operations with retry logic
- Escalation service: Routes complex issues to staff with configurable rules
- Notifier service: Sends notifications for AI operation results

Additional changes:
- Add GitCaddy AI system user (ID: -3) for bot operations
- Add AIConfig to repository units
- Add AI-specific error codes (rate limiting, service errors, etc.)
- Extend AI client with GenerateIssueResponse method
- Add AISettingsV2 struct for repository-level AI configuration

The queue system enables non-blocking AI operations with proper error handling and rate limiting.
This commit is contained in:
2026-02-12 00:02:49 -05:00
parent 26793bf898
commit 8ad6664b92
12 changed files with 892 additions and 0 deletions

View File

@@ -468,6 +468,11 @@ func (repo *Repository) MustGetUnit(ctx context.Context, tp unit.Type) *RepoUnit
Type: tp,
Config: new(ActionsConfig),
}
case unit.TypeAI:
return &RepoUnit{
Type: tp,
Config: new(AIConfig),
}
case unit.TypeProjects:
cfg := new(ProjectsConfig)
cfg.ProjectsMode = ProjectsModeNone

View File

@@ -72,5 +72,38 @@ func GetSystemUserByName(name string) *User {
if IsGiteaActionsUserName(name) {
return NewActionsUser()
}
if IsAIUserName(name) {
return NewAIUser()
}
return nil
}
const (
AIUserID int64 = -3
AIUserName = "gitcaddy-ai"
AIUserEmail = "ai@gitcaddy.com"
)
func IsAIUserName(name string) bool {
return strings.EqualFold(name, AIUserName)
}
// NewAIUser creates and returns the system bot user for AI operations.
func NewAIUser() *User {
return &User{
ID: AIUserID,
Name: AIUserName,
LowerName: strings.ToLower(AIUserName),
IsActive: true,
FullName: "GitCaddy AI",
Email: AIUserEmail,
KeepEmailPrivate: true,
LoginName: AIUserName,
Type: UserTypeBot,
Visibility: structs.VisibleTypePublic,
}
}
func (u *User) IsAI() bool {
return u != nil && u.ID == AIUserID
}

View File

@@ -190,6 +190,20 @@ func (c *Client) SummarizeChanges(ctx context.Context, req *SummarizeChangesRequ
return &resp, nil
}
// GenerateIssueResponse requests an AI-generated response to an issue
func (c *Client) GenerateIssueResponse(ctx context.Context, req *GenerateIssueResponseRequest) (*GenerateIssueResponseResponse, error) {
if !IsEnabled() || !setting.AI.AllowAutoRespond {
return nil, errors.New("AI auto-respond is not enabled")
}
var resp GenerateIssueResponseResponse
if err := c.doRequest(ctx, http.MethodPost, "/issues/respond", req, &resp); err != nil {
log.Error("AI GenerateIssueResponse failed: %v", err)
return nil, err
}
return &resp, nil
}
// CheckHealth checks the health of the AI service
func (c *Client) CheckHealth(ctx context.Context) (*HealthCheckResponse, error) {
var resp HealthCheckResponse

View File

@@ -187,6 +187,23 @@ type SummarizeChangesResponse struct {
ImpactAssessment string `json:"impact_assessment"`
}
// GenerateIssueResponseRequest is the request for generating an AI response to an issue
type GenerateIssueResponseRequest struct {
RepoID int64 `json:"repo_id"`
IssueID int64 `json:"issue_id"`
Title string `json:"title"`
Body string `json:"body"`
CustomInstructions string `json:"custom_instructions,omitempty"`
}
// GenerateIssueResponseResponse is the response from generating an issue response
type GenerateIssueResponseResponse struct {
Response string `json:"response"`
Confidence float64 `json:"confidence"`
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
}
// HealthCheckResponse is the response from a health check
type HealthCheckResponse struct {
Healthy bool `json:"healthy"`

View File

@@ -170,6 +170,16 @@ const (
WishlistVoteBudget ErrorCode = "WISHLIST_VOTE_BUDGET_EXCEEDED"
)
// AI errors (AI_)
const (
AIDisabled ErrorCode = "AI_DISABLED"
AIUnitNotEnabled ErrorCode = "AI_UNIT_NOT_ENABLED"
AIOperationNotFound ErrorCode = "AI_OPERATION_NOT_FOUND"
AIRateLimitExceeded ErrorCode = "AI_RATE_LIMIT_EXCEEDED"
AIServiceError ErrorCode = "AI_SERVICE_ERROR"
AIOperationDisabled ErrorCode = "AI_OPERATION_DISABLED"
)
// errorInfo contains metadata about an error code
type errorInfo struct {
Message string
@@ -299,6 +309,14 @@ var errorCatalog = map[ErrorCode]errorInfo{
WishlistItemNotFound: {"Wishlist item not found", http.StatusNotFound},
WishlistDisabled: {"Wishlist is disabled for this repository", http.StatusForbidden},
WishlistVoteBudget: {"Vote budget exceeded for this repository", http.StatusConflict},
// AI errors
AIDisabled: {"AI features are disabled", http.StatusForbidden},
AIUnitNotEnabled: {"AI unit is not enabled for this repository", http.StatusForbidden},
AIOperationNotFound: {"AI operation not found", http.StatusNotFound},
AIRateLimitExceeded: {"AI operation rate limit exceeded", http.StatusTooManyRequests},
AIServiceError: {"AI service error", http.StatusBadGateway},
AIOperationDisabled: {"This AI operation is not enabled", http.StatusForbidden},
}
// Message returns the human-readable message for an error code

123
modules/structs/repo_ai.go Normal file
View File

@@ -0,0 +1,123 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package structs
import "time"
// AISettingsV2 represents the AI settings for a repository
type AISettingsV2 struct {
// Tier 1: Light AI operations
AutoRespondToIssues bool `json:"auto_respond_issues"`
AutoReviewPRs bool `json:"auto_review_prs"`
AutoInspectWorkflows bool `json:"auto_inspect_workflows"`
AutoTriageIssues bool `json:"auto_triage_issues"`
// Tier 2: Advanced agent operations
AgentModeEnabled bool `json:"agent_mode_enabled"`
AgentTriggerLabels []string `json:"agent_trigger_labels"`
AgentMaxRunMinutes int `json:"agent_max_run_minutes"`
// Escalation
EscalateToStaff bool `json:"escalate_to_staff"`
EscalationLabel string `json:"escalation_label"`
EscalationAssignTeam string `json:"escalation_assign_team"`
// Provider overrides (empty = inherit from org → system)
PreferredProvider string `json:"preferred_provider"`
PreferredModel string `json:"preferred_model"`
// Custom instructions
SystemInstructions string `json:"system_instructions"`
ReviewInstructions string `json:"review_instructions"`
IssueInstructions string `json:"issue_instructions"`
// Resolved values (read-only, computed from cascade)
ResolvedProvider string `json:"resolved_provider,omitempty"`
ResolvedModel string `json:"resolved_model,omitempty"`
}
// UpdateAISettingsOption represents the options for updating AI settings
type UpdateAISettingsOption struct {
AutoRespondToIssues *bool `json:"auto_respond_issues"`
AutoReviewPRs *bool `json:"auto_review_prs"`
AutoInspectWorkflows *bool `json:"auto_inspect_workflows"`
AutoTriageIssues *bool `json:"auto_triage_issues"`
AgentModeEnabled *bool `json:"agent_mode_enabled"`
AgentTriggerLabels []string `json:"agent_trigger_labels"`
AgentMaxRunMinutes *int `json:"agent_max_run_minutes"`
EscalateToStaff *bool `json:"escalate_to_staff"`
EscalationLabel *string `json:"escalation_label"`
EscalationAssignTeam *string `json:"escalation_assign_team"`
PreferredProvider *string `json:"preferred_provider"`
PreferredModel *string `json:"preferred_model"`
SystemInstructions *string `json:"system_instructions"`
ReviewInstructions *string `json:"review_instructions"`
IssueInstructions *string `json:"issue_instructions"`
}
// OrgAISettingsV2 represents the AI settings for an organization
type OrgAISettingsV2 struct {
Provider string `json:"provider"`
Model string `json:"model"`
HasAPIKey bool `json:"has_api_key"` // never expose actual key
MaxOpsPerHour int `json:"max_ops_per_hour"`
AllowedOps string `json:"allowed_ops"`
AgentModeAllowed bool `json:"agent_mode_allowed"`
}
// UpdateOrgAISettingsOption represents the options for updating org AI settings
type UpdateOrgAISettingsOption struct {
Provider *string `json:"provider"`
Model *string `json:"model"`
APIKey *string `json:"api_key"`
MaxOpsPerHour *int `json:"max_ops_per_hour"`
AllowedOps *string `json:"allowed_ops"`
AgentModeAllowed *bool `json:"agent_mode_allowed"`
}
// AIOperationV2 represents an AI operation log entry
type AIOperationV2 struct {
ID int64 `json:"id"`
RepoID int64 `json:"repo_id"`
Operation string `json:"operation"`
Tier int `json:"tier"`
TriggerEvent string `json:"trigger_event"`
TriggerUserID int64 `json:"trigger_user_id"`
TargetID int64 `json:"target_id"`
TargetType string `json:"target_type"`
Provider string `json:"provider"`
Model string `json:"model"`
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
Status string `json:"status"`
ResultCommentID int64 `json:"result_comment_id,omitempty"`
ActionRunID int64 `json:"action_run_id,omitempty"`
ErrorMessage string `json:"error_message,omitempty"`
DurationMs int64 `json:"duration_ms"`
CreatedAt time.Time `json:"created_at"`
}
// AIOperationListV2 represents a paginated list of AI operations
type AIOperationListV2 struct {
Operations []*AIOperationV2 `json:"operations"`
TotalCount int64 `json:"total_count"`
}
// AIExplainRequest represents a request to explain code
type AIExplainRequest struct {
FilePath string `json:"file_path" binding:"Required"`
StartLine int `json:"start_line"`
EndLine int `json:"end_line"`
Question string `json:"question"`
}
// AIServiceStatusV2 represents the AI service health status
type AIServiceStatusV2 struct {
Enabled bool `json:"enabled"`
Healthy bool `json:"healthy"`
ServiceURL string `json:"service_url"`
Version string `json:"version,omitempty"`
ProviderStatus map[string]string `json:"provider_status,omitempty"`
TotalOpsToday int64 `json:"total_ops_today"`
}

View File

@@ -43,6 +43,7 @@ import (
"code.gitcaddy.com/server/v3/routers/private"
web_routers "code.gitcaddy.com/server/v3/routers/web"
actions_service "code.gitcaddy.com/server/v3/services/actions"
ai_service "code.gitcaddy.com/server/v3/services/ai"
asymkey_service "code.gitcaddy.com/server/v3/services/asymkey"
"code.gitcaddy.com/server/v3/services/auth"
"code.gitcaddy.com/server/v3/services/auth/source/oauth2"
@@ -195,6 +196,7 @@ func InitWebInstalled(ctx context.Context) {
mustInit(svg.Init)
mustInitCtx(ctx, actions_service.Init)
mustInitCtx(ctx, ai_service.Init)
mustInit(repo_service.InitLicenseClassifier)

62
services/ai/agent.go Normal file
View File

@@ -0,0 +1,62 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package ai
import (
"context"
"fmt"
ai_model "code.gitcaddy.com/server/v3/models/ai"
issues_model "code.gitcaddy.com/server/v3/models/issues"
repo_model "code.gitcaddy.com/server/v3/models/repo"
user_model "code.gitcaddy.com/server/v3/models/user"
"code.gitcaddy.com/server/v3/modules/log"
issue_service "code.gitcaddy.com/server/v3/services/issue"
)
// triggerAgentWorkflow triggers a Tier 2 agent Actions workflow for advanced AI work.
// It posts a comment on the issue explaining that the agent is being dispatched,
// and returns the action run ID for tracking.
func triggerAgentWorkflow(ctx context.Context, repo *repo_model.Repository, aiCfg *repo_model.AIConfig, opLog *ai_model.OperationLog) (int64, error) {
issue, err := issues_model.GetIssueByID(ctx, opLog.TargetID)
if err != nil {
return 0, fmt.Errorf("failed to load issue %d: %w", opLog.TargetID, err)
}
issue.Repo = repo
botUser := user_model.NewAIUser()
// Post a comment that the agent is being dispatched
comment := fmt.Sprintf(
"An AI agent has been dispatched to investigate and work on this issue. "+
"I'll create a pull request with the proposed changes once complete.\n\n"+
"**Operation:** agent-fix\n"+
"**Max runtime:** %d minutes",
getAgentMaxRunMinutes(aiCfg),
)
if _, err := issue_service.CreateIssueComment(ctx, botUser, repo, issue, comment, nil); err != nil {
log.Error("Agent: failed to post dispatch comment on issue #%d: %v", issue.Index, err)
}
// TODO: Implement actual workflow dispatch via services/actions.DispatchActionWorkflow()
// This requires:
// 1. A workflow template stored in the repo or generated dynamically
// 2. Injecting the resolved API key and model as workflow inputs/secrets
// 3. Labeling the job to run on ai-runner labeled runners
//
// For now, log the intent and return 0 as a placeholder run ID.
// The full implementation will be completed when Actions runner integration is ready.
log.Info("Agent: would dispatch workflow for issue #%d in repo %s (placeholder)", issue.Index, repo.FullName())
return 0, nil
}
// getAgentMaxRunMinutes returns the configured max run time, with a default fallback
func getAgentMaxRunMinutes(aiCfg *repo_model.AIConfig) int {
if aiCfg.AgentMaxRunMinutes > 0 {
return aiCfg.AgentMaxRunMinutes
}
return 30 // default 30 minutes
}

67
services/ai/escalation.go Normal file
View File

@@ -0,0 +1,67 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package ai
import (
"context"
"fmt"
ai_model "code.gitcaddy.com/server/v3/models/ai"
issues_model "code.gitcaddy.com/server/v3/models/issues"
repo_model "code.gitcaddy.com/server/v3/models/repo"
user_model "code.gitcaddy.com/server/v3/models/user"
"code.gitcaddy.com/server/v3/modules/log"
issue_service "code.gitcaddy.com/server/v3/services/issue"
)
// escalateToStaff handles escalation when an AI operation fails or has low confidence.
// It adds the configured escalation label and posts a comment summarizing the situation.
func escalateToStaff(ctx context.Context, repo *repo_model.Repository, aiCfg *repo_model.AIConfig, opLog *ai_model.OperationLog) {
if !aiCfg.EscalateToStaff {
return
}
issue, err := issues_model.GetIssueByID(ctx, opLog.TargetID)
if err != nil {
log.Error("Escalation: failed to load issue %d: %v", opLog.TargetID, err)
return
}
issue.Repo = repo
botUser := user_model.NewAIUser()
// Add escalation label if configured
if aiCfg.EscalationLabel != "" {
label, err := issues_model.GetLabelInRepoByName(ctx, repo.ID, aiCfg.EscalationLabel)
if err != nil {
log.Warn("Escalation: label %q not found in repo %d, skipping label", aiCfg.EscalationLabel, repo.ID)
} else {
if err := issue_service.AddLabel(ctx, issue, botUser, label); err != nil {
log.Error("Escalation: failed to add label %q to issue #%d: %v", aiCfg.EscalationLabel, issue.Index, err)
}
}
}
// Post an escalation comment
comment := fmt.Sprintf(
"I attempted to handle this automatically (%s) but was unable to complete the operation successfully. "+
"A team member should review this.\n\n"+
"**Operation:** %s\n"+
"**Status:** %s",
opLog.Operation, opLog.Operation, opLog.Status,
)
if opLog.ErrorMessage != "" {
comment += fmt.Sprintf("\n**Error:** %s", opLog.ErrorMessage)
}
if _, err := issue_service.CreateIssueComment(ctx, botUser, repo, issue, comment, nil); err != nil {
log.Error("Escalation: failed to create escalation comment on issue #%d: %v", issue.Index, err)
}
// Update operation log to reflect escalation
opLog.Status = ai_model.OperationStatusEscalated
if err := ai_model.UpdateOperationLog(ctx, opLog); err != nil {
log.Error("Escalation: failed to update operation log: %v", err)
}
}

33
services/ai/init.go Normal file
View File

@@ -0,0 +1,33 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package ai
import (
"context"
"errors"
"code.gitcaddy.com/server/v3/modules/graceful"
"code.gitcaddy.com/server/v3/modules/queue"
"code.gitcaddy.com/server/v3/modules/setting"
notify_service "code.gitcaddy.com/server/v3/services/notify"
)
var aiOperationQueue *queue.WorkerPoolQueue[*AIOperationRequest]
// Init initializes the AI service integration: queue and notifier.
func Init(ctx context.Context) error {
if !setting.AI.Enabled {
return nil
}
aiOperationQueue = queue.CreateUniqueQueue(graceful.GetManager().ShutdownContext(), "ai_operations", handleAIOperation)
if aiOperationQueue == nil {
return errors.New("unable to create ai_operations queue")
}
go graceful.GetManager().RunWithCancel(aiOperationQueue)
notify_service.RegisterNotifier(NewNotifier())
return nil
}

272
services/ai/notifier.go Normal file
View File

@@ -0,0 +1,272 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package ai
import (
"context"
"slices"
issues_model "code.gitcaddy.com/server/v3/models/issues"
repo_model "code.gitcaddy.com/server/v3/models/repo"
"code.gitcaddy.com/server/v3/models/unit"
user_model "code.gitcaddy.com/server/v3/models/user"
"code.gitcaddy.com/server/v3/modules/log"
"code.gitcaddy.com/server/v3/modules/setting"
notify_service "code.gitcaddy.com/server/v3/services/notify"
)
type aiNotifier struct {
notify_service.NullNotifier
}
var _ notify_service.Notifier = &aiNotifier{}
// NewNotifier creates a new AI notifier
func NewNotifier() notify_service.Notifier {
return &aiNotifier{}
}
// isAIUser returns true if the doer is the AI bot user (loop prevention)
func isAIUser(doer *user_model.User) bool {
return doer != nil && doer.ID == user_model.AIUserID
}
// getAIConfig loads the AI config for a repo, returns nil if AI unit is not enabled
func getAIConfig(ctx context.Context, repo *repo_model.Repository) *repo_model.AIConfig {
if !setting.AI.Enabled {
return nil
}
aiUnit, err := repo.GetUnit(ctx, unit.TypeAI)
if err != nil {
return nil // AI unit not enabled for this repo
}
return aiUnit.AIConfig()
}
// NewIssue handles new issue events — triggers auto-respond and auto-triage
func (n *aiNotifier) NewIssue(ctx context.Context, issue *issues_model.Issue, _ []*user_model.User) {
if err := issue.LoadPoster(ctx); err != nil {
log.Error("AI notifier: issue.LoadPoster: %v", err)
return
}
if isAIUser(issue.Poster) {
return
}
if err := issue.LoadRepo(ctx); err != nil {
log.Error("AI notifier: issue.LoadRepo: %v", err)
return
}
aiCfg := getAIConfig(ctx, issue.Repo)
if aiCfg == nil {
return
}
if aiCfg.AutoRespondToIssues && setting.AI.AllowAutoRespond {
if err := EnqueueOperation(&AIOperationRequest{
RepoID: issue.RepoID,
Operation: "issue-response",
Tier: 1,
TriggerEvent: "issue.created",
TriggerUserID: issue.PosterID,
TargetID: issue.ID,
TargetType: "issue",
}); err != nil {
log.Error("AI notifier: failed to enqueue issue-response for issue #%d: %v", issue.Index, err)
}
}
if aiCfg.AutoTriageIssues && setting.AI.EnableIssueTriage {
if err := EnqueueOperation(&AIOperationRequest{
RepoID: issue.RepoID,
Operation: "issue-triage",
Tier: 1,
TriggerEvent: "issue.created",
TriggerUserID: issue.PosterID,
TargetID: issue.ID,
TargetType: "issue",
}); err != nil {
log.Error("AI notifier: failed to enqueue issue-triage for issue #%d: %v", issue.Index, err)
}
}
}
// CreateIssueComment handles new comments — triggers AI response if the bot is mentioned
func (n *aiNotifier) CreateIssueComment(ctx context.Context, doer *user_model.User, repo *repo_model.Repository,
issue *issues_model.Issue, comment *issues_model.Comment, _ []*user_model.User,
) {
if isAIUser(doer) {
return
}
aiCfg := getAIConfig(ctx, repo)
if aiCfg == nil {
return
}
// Only respond to comments that mention the AI bot or explicitly ask a question
if !aiCfg.AutoRespondToIssues || !setting.AI.AllowAutoRespond {
return
}
// Check if the comment mentions the AI bot
if !isBotMentioned(comment.Content) {
return
}
if err := EnqueueOperation(&AIOperationRequest{
RepoID: repo.ID,
Operation: "issue-response",
Tier: 1,
TriggerEvent: "issue_comment.created",
TriggerUserID: doer.ID,
TargetID: issue.ID,
TargetType: "issue",
}); err != nil {
log.Error("AI notifier: failed to enqueue issue-response for comment on issue #%d: %v", issue.Index, err)
}
}
// NewPullRequest handles new PR events — triggers auto-review
func (n *aiNotifier) NewPullRequest(ctx context.Context, pr *issues_model.PullRequest, _ []*user_model.User) {
if err := pr.LoadIssue(ctx); err != nil {
log.Error("AI notifier: pr.LoadIssue: %v", err)
return
}
if err := pr.Issue.LoadPoster(ctx); err != nil {
log.Error("AI notifier: pr.Issue.LoadPoster: %v", err)
return
}
if isAIUser(pr.Issue.Poster) {
return
}
if err := pr.Issue.LoadRepo(ctx); err != nil {
log.Error("AI notifier: pr.Issue.LoadRepo: %v", err)
return
}
aiCfg := getAIConfig(ctx, pr.Issue.Repo)
if aiCfg == nil {
return
}
if aiCfg.AutoReviewPRs && setting.AI.AllowAutoReview {
if err := EnqueueOperation(&AIOperationRequest{
RepoID: pr.Issue.RepoID,
Operation: "code-review",
Tier: 1,
TriggerEvent: "pull_request.opened",
TriggerUserID: pr.Issue.PosterID,
TargetID: pr.Issue.ID,
TargetType: "pull",
}); err != nil {
log.Error("AI notifier: failed to enqueue code-review for PR #%d: %v", pr.Issue.Index, err)
}
}
}
// PullRequestSynchronized handles PR push events — triggers re-review
func (n *aiNotifier) PullRequestSynchronized(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
if isAIUser(doer) {
return
}
if err := pr.LoadIssue(ctx); err != nil {
log.Error("AI notifier: pr.LoadIssue: %v", err)
return
}
if err := pr.Issue.LoadRepo(ctx); err != nil {
log.Error("AI notifier: pr.Issue.LoadRepo: %v", err)
return
}
aiCfg := getAIConfig(ctx, pr.Issue.Repo)
if aiCfg == nil {
return
}
if aiCfg.AutoReviewPRs && setting.AI.AllowAutoReview {
if err := EnqueueOperation(&AIOperationRequest{
RepoID: pr.Issue.RepoID,
Operation: "code-review",
Tier: 1,
TriggerEvent: "pull_request.synchronized",
TriggerUserID: doer.ID,
TargetID: pr.Issue.ID,
TargetType: "pull",
}); err != nil {
log.Error("AI notifier: failed to enqueue code-review for PR #%d sync: %v", pr.Issue.Index, err)
}
}
}
// IssueChangeLabels handles label changes — triggers Tier 2 agent fix if trigger label is added
func (n *aiNotifier) IssueChangeLabels(ctx context.Context, doer *user_model.User, issue *issues_model.Issue,
addedLabels, removedLabels []*issues_model.Label,
) {
if isAIUser(doer) {
return
}
if err := issue.LoadRepo(ctx); err != nil {
log.Error("AI notifier: issue.LoadRepo: %v", err)
return
}
aiCfg := getAIConfig(ctx, issue.Repo)
if aiCfg == nil {
return
}
if !aiCfg.AgentModeEnabled || !setting.AI.AllowAgentMode {
return
}
// Check if any added label matches the agent trigger labels
for _, label := range addedLabels {
if slices.Contains(aiCfg.AgentTriggerLabels, label.Name) {
if err := EnqueueOperation(&AIOperationRequest{
RepoID: issue.RepoID,
Operation: "agent-fix",
Tier: 2,
TriggerEvent: "issue.label_added",
TriggerUserID: doer.ID,
TargetID: issue.ID,
TargetType: "issue",
}); err != nil {
log.Error("AI notifier: failed to enqueue agent-fix for issue #%d: %v", issue.Index, err)
}
return // Only trigger once per label change event
}
}
}
// isBotMentioned checks if the AI bot user is mentioned in text
func isBotMentioned(content string) bool {
botName := setting.AI.BotUserName
// Simple check for @mention
return len(content) > 0 && len(botName) > 0 &&
containsMention(content, botName)
}
// containsMention checks if @username appears in text
func containsMention(text, username string) bool {
target := "@" + username
for i := 0; i <= len(text)-len(target); i++ {
if text[i:i+len(target)] == target {
// Check that it's a word boundary (not part of a longer word)
if i+len(target) < len(text) {
next := text[i+len(target)]
if next != ' ' && next != '\n' && next != '\t' && next != ',' && next != '.' && next != '!' && next != '?' {
continue
}
}
return true
}
}
return false
}

246
services/ai/queue.go Normal file
View File

@@ -0,0 +1,246 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package ai
import (
"context"
"fmt"
"time"
ai_model "code.gitcaddy.com/server/v3/models/ai"
issues_model "code.gitcaddy.com/server/v3/models/issues"
repo_model "code.gitcaddy.com/server/v3/models/repo"
"code.gitcaddy.com/server/v3/models/unit"
user_model "code.gitcaddy.com/server/v3/models/user"
"code.gitcaddy.com/server/v3/modules/ai"
"code.gitcaddy.com/server/v3/modules/log"
"code.gitcaddy.com/server/v3/modules/setting"
issue_service "code.gitcaddy.com/server/v3/services/issue"
)
// AIOperationRequest represents a queued AI operation
type AIOperationRequest struct {
RepoID int64 `json:"repo_id"`
Operation string `json:"operation"` // "code-review", "issue-response", "issue-triage", "workflow-inspect", "agent-fix"
Tier int `json:"tier"` // 1 or 2
TriggerEvent string `json:"trigger_event"` // e.g. "issue.created"
TriggerUserID int64 `json:"trigger_user_id"` // who triggered the event
TargetID int64 `json:"target_id"` // issue/PR ID
TargetType string `json:"target_type"` // "issue", "pull"
}
// EnqueueOperation adds an AI operation to the processing queue
func EnqueueOperation(req *AIOperationRequest) error {
if aiOperationQueue == nil {
return fmt.Errorf("AI operation queue not initialized")
}
return aiOperationQueue.Push(req)
}
// handleAIOperation is the queue worker that processes AI operations
func handleAIOperation(items ...*AIOperationRequest) []*AIOperationRequest {
for _, req := range items {
if err := processOperation(context.Background(), req); err != nil {
log.Error("AI operation failed [repo:%d op:%s target:%d]: %v", req.RepoID, req.Operation, req.TargetID, err)
}
}
return nil
}
func processOperation(ctx context.Context, req *AIOperationRequest) error {
// Load repo
repo, err := repo_model.GetRepositoryByID(ctx, req.RepoID)
if err != nil {
return fmt.Errorf("failed to load repo %d: %w", req.RepoID, err)
}
if err := repo.LoadOwner(ctx); err != nil {
return fmt.Errorf("failed to load repo owner: %w", err)
}
// Rate limit check
count, err := ai_model.CountRecentOperations(ctx, req.RepoID)
if err != nil {
return fmt.Errorf("failed to count recent operations: %w", err)
}
maxOps := setting.AI.MaxOperationsPerHour
if repo.Owner.IsOrganization() {
if orgSettings, err := ai_model.GetOrgAISettings(ctx, repo.OwnerID); err == nil && orgSettings != nil && orgSettings.MaxOpsPerHour > 0 {
maxOps = orgSettings.MaxOpsPerHour
}
}
if count >= int64(maxOps) {
log.Warn("AI rate limit exceeded for repo %d (%d/%d ops/hour)", req.RepoID, count, maxOps)
return nil
}
// Create operation log entry
opLog := &ai_model.OperationLog{
RepoID: req.RepoID,
Operation: req.Operation,
Tier: req.Tier,
TriggerEvent: req.TriggerEvent,
TriggerUserID: req.TriggerUserID,
TargetID: req.TargetID,
TargetType: req.TargetType,
Status: ai_model.OperationStatusPending,
}
// Resolve provider config from cascade
var orgID int64
if repo.Owner.IsOrganization() {
orgID = repo.OwnerID
}
aiUnit, err := repo.GetUnit(ctx, unit.TypeAI)
var aiCfg *repo_model.AIConfig
if err == nil {
aiCfg = aiUnit.AIConfig()
} else {
aiCfg = &repo_model.AIConfig{}
}
opLog.Provider = ai_model.ResolveProvider(ctx, orgID, aiCfg.PreferredProvider)
opLog.Model = ai_model.ResolveModel(ctx, orgID, aiCfg.PreferredModel)
if err := ai_model.InsertOperationLog(ctx, opLog); err != nil {
return fmt.Errorf("failed to insert operation log: %w", err)
}
start := time.Now()
// Dispatch based on operation type
var opErr error
switch req.Operation {
case "issue-response":
opErr = handleIssueResponse(ctx, repo, aiCfg, opLog)
case "issue-triage":
opErr = handleIssueTriage(ctx, repo, aiCfg, opLog)
case "code-review":
opErr = handleCodeReview(ctx, repo, aiCfg, opLog)
case "agent-fix":
opErr = handleAgentFix(ctx, repo, aiCfg, opLog)
default:
opErr = fmt.Errorf("unknown operation: %s", req.Operation)
}
opLog.DurationMs = time.Since(start).Milliseconds()
if opErr != nil {
opLog.Status = ai_model.OperationStatusFailed
opLog.ErrorMessage = opErr.Error()
log.Error("AI operation %s failed for repo %d: %v", req.Operation, req.RepoID, opErr)
// Escalate on failure if configured
if aiCfg.EscalateToStaff {
escalateToStaff(ctx, repo, aiCfg, opLog)
}
} else {
opLog.Status = ai_model.OperationStatusSuccess
}
if err := ai_model.UpdateOperationLog(ctx, opLog); err != nil {
log.Error("Failed to update operation log: %v", err)
}
return opErr
}
func handleIssueResponse(ctx context.Context, repo *repo_model.Repository, aiCfg *repo_model.AIConfig, opLog *ai_model.OperationLog) error {
issue, err := issues_model.GetIssueByID(ctx, opLog.TargetID)
if err != nil {
return fmt.Errorf("failed to load issue %d: %w", opLog.TargetID, err)
}
issue.Repo = repo
client := ai.GetClient()
resp, err := client.GenerateIssueResponse(ctx, &ai.GenerateIssueResponseRequest{
RepoID: repo.ID,
IssueID: issue.ID,
Title: issue.Title,
Body: issue.Content,
CustomInstructions: aiCfg.IssueInstructions,
})
if err != nil {
return fmt.Errorf("AI GenerateIssueResponse failed: %w", err)
}
opLog.InputTokens = resp.InputTokens
opLog.OutputTokens = resp.OutputTokens
// Post the response as a comment from the AI bot user
botUser := user_model.NewAIUser()
comment, err := issue_service.CreateIssueComment(ctx, botUser, repo, issue, resp.Response, nil)
if err != nil {
return fmt.Errorf("failed to create comment: %w", err)
}
opLog.ResultCommentID = comment.ID
return nil
}
func handleIssueTriage(ctx context.Context, repo *repo_model.Repository, aiCfg *repo_model.AIConfig, opLog *ai_model.OperationLog) error {
issue, err := issues_model.GetIssueByID(ctx, opLog.TargetID)
if err != nil {
return fmt.Errorf("failed to load issue %d: %w", opLog.TargetID, err)
}
issue.Repo = repo
triageResp, err := TriageIssue(ctx, issue)
if err != nil {
return fmt.Errorf("AI TriageIssue failed: %w", err)
}
// Apply suggested labels
botUser := user_model.NewAIUser()
for _, labelName := range triageResp.SuggestedLabels {
label, err := issues_model.GetLabelInRepoByName(ctx, repo.ID, labelName)
if err != nil {
log.Warn("AI suggested label %q not found in repo %d", labelName, repo.ID)
continue
}
if err := issue_service.AddLabel(ctx, issue, botUser, label); err != nil {
log.Error("Failed to add label %q to issue %d: %v", labelName, issue.Index, err)
}
}
return nil
}
func handleCodeReview(ctx context.Context, repo *repo_model.Repository, aiCfg *repo_model.AIConfig, opLog *ai_model.OperationLog) error {
issue, err := issues_model.GetIssueByID(ctx, opLog.TargetID)
if err != nil {
return fmt.Errorf("failed to load issue %d: %w", opLog.TargetID, err)
}
if err := issue.LoadPullRequest(ctx); err != nil {
return fmt.Errorf("failed to load pull request: %w", err)
}
issue.Repo = repo
reviewResp, err := ReviewPullRequest(ctx, issue.PullRequest)
if err != nil {
return fmt.Errorf("AI ReviewPullRequest failed: %w", err)
}
// Post the review summary as a comment
botUser := user_model.NewAIUser()
comment, err := issue_service.CreateIssueComment(ctx, botUser, repo, issue, reviewResp.Summary, nil)
if err != nil {
return fmt.Errorf("failed to create review comment: %w", err)
}
opLog.ResultCommentID = comment.ID
return nil
}
func handleAgentFix(ctx context.Context, repo *repo_model.Repository, aiCfg *repo_model.AIConfig, opLog *ai_model.OperationLog) error {
// Tier 2: Trigger an Actions workflow for Claude Code headless
runID, err := triggerAgentWorkflow(ctx, repo, aiCfg, opLog)
if err != nil {
return fmt.Errorf("failed to trigger agent workflow: %w", err)
}
opLog.ActionRunID = runID
return nil
}