All checks were successful
Build and Release / Create Release (push) Has been skipped
Build and Release / Unit Tests (push) Successful in 6m49s
Build and Release / Integration Tests (PostgreSQL) (push) Successful in 7m6s
Build and Release / Lint (push) Successful in 7m15s
Build and Release / Build Binaries (amd64, windows, windows-latest) (push) Has been skipped
Build and Release / Build Binaries (amd64, darwin, macos) (push) Has been skipped
Build and Release / Build Binaries (amd64, linux, linux-latest) (push) Has been skipped
Build and Release / Build Binaries (arm64, darwin, macos) (push) Has been skipped
Build and Release / Build Binary (linux/arm64) (push) Has been skipped
Implement critical production readiness features for AI integration: per-request provider config, admin dashboard, workflow inspection, and plugin framework foundation. Per-Request Provider Config: - Add ProviderConfig struct to all AI request types - Update queue to resolve provider/model/API key from cascade (repo > org > system) - Pass resolved config to AI sidecar on every request - Fixes multi-tenant issue where all orgs shared sidecar's hardcoded config Admin AI Dashboard: - Add /admin/ai page with sidecar health status - Display global operation stats (total, 24h, success/fail/escalated counts) - Show operations by tier, top 5 repos, token usage - Recent operations table with repo, operation, status, duration - Add GetGlobalOperationStats model method Workflow Inspection: - Add InspectWorkflow client method and types - Implement workflow-inspect queue handler - Add notifier trigger on workflow file push - Analyzes YAML for syntax errors, security issues, best practices - Returns structured issues with line numbers and suggested fixes Plugin Framework (Phase 5 Foundation): - Add external plugin config loading from app.ini - Define ExternalPlugin interface and manager - Add plugin.proto contract (Initialize, Shutdown, HealthCheck, OnEvent, HandleHTTP) - Implement health monitoring with auto-restart for managed plugins - Add event routing to subscribed plugins - HTTP proxy support for plugin-served routes This completes Tasks 1-4 from the production readiness plan and establishes the foundation for managed plugin lifecycle.
208 lines
6.5 KiB
Go
208 lines
6.5 KiB
Go
// Copyright 2026 MarketAlly. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package ai
|
|
|
|
import (
|
|
"context"
|
|
|
|
"code.gitcaddy.com/server/v3/models/db"
|
|
"code.gitcaddy.com/server/v3/modules/timeutil"
|
|
|
|
"xorm.io/builder"
|
|
)
|
|
|
|
func init() {
|
|
db.RegisterModel(new(OperationLog))
|
|
}
|
|
|
|
// OperationLog records every AI operation for auditing
|
|
type OperationLog struct {
|
|
ID int64 `xorm:"pk autoincr"`
|
|
RepoID int64 `xorm:"INDEX NOT NULL"`
|
|
Operation string `xorm:"VARCHAR(50) NOT NULL"` // "code-review", "issue-response", etc.
|
|
Tier int `xorm:"NOT NULL"` // 1 or 2
|
|
TriggerEvent string `xorm:"VARCHAR(100) NOT NULL"`
|
|
TriggerUserID int64 `xorm:"INDEX"`
|
|
TargetID int64 `xorm:"INDEX"` // issue/PR ID
|
|
TargetType string `xorm:"VARCHAR(20)"` // "issue", "pull", "commit"
|
|
Provider string `xorm:"VARCHAR(20)"`
|
|
Model string `xorm:"VARCHAR(100)"`
|
|
InputTokens int `xorm:"DEFAULT 0"`
|
|
OutputTokens int `xorm:"DEFAULT 0"`
|
|
Status string `xorm:"VARCHAR(20) NOT NULL"` // "success", "failed", "escalated", "pending"
|
|
ResultCommentID int64 `xorm:"DEFAULT 0"`
|
|
ActionRunID int64 `xorm:"DEFAULT 0"` // for Tier 2
|
|
ErrorMessage string `xorm:"TEXT"`
|
|
DurationMs int64 `xorm:"DEFAULT 0"`
|
|
CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
|
|
}
|
|
|
|
// TableName returns the table name for OperationLog
|
|
func (OperationLog) TableName() string {
|
|
return "ai_operation_log"
|
|
}
|
|
|
|
// OperationStatus constants
|
|
const (
|
|
OperationStatusPending = "pending"
|
|
OperationStatusSuccess = "success"
|
|
OperationStatusFailed = "failed"
|
|
OperationStatusEscalated = "escalated"
|
|
)
|
|
|
|
// InsertOperationLog creates a new operation log entry
|
|
func InsertOperationLog(ctx context.Context, log *OperationLog) error {
|
|
return db.Insert(ctx, log)
|
|
}
|
|
|
|
// UpdateOperationLog updates an existing operation log entry
|
|
func UpdateOperationLog(ctx context.Context, log *OperationLog) error {
|
|
_, err := db.GetEngine(ctx).ID(log.ID).AllCols().Update(log)
|
|
return err
|
|
}
|
|
|
|
// GetOperationLog returns a single operation log entry by ID
|
|
func GetOperationLog(ctx context.Context, id int64) (*OperationLog, error) {
|
|
log := &OperationLog{}
|
|
has, err := db.GetEngine(ctx).ID(id).Get(log)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if !has {
|
|
return nil, nil
|
|
}
|
|
return log, nil
|
|
}
|
|
|
|
// FindOperationLogsOptions represents options for finding operation logs
|
|
type FindOperationLogsOptions struct {
|
|
db.ListOptions
|
|
RepoID int64
|
|
Operation string
|
|
Status string
|
|
Tier int
|
|
}
|
|
|
|
func (opts FindOperationLogsOptions) ToConds() builder.Cond {
|
|
cond := builder.NewCond()
|
|
if opts.RepoID > 0 {
|
|
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
|
|
}
|
|
if opts.Operation != "" {
|
|
cond = cond.And(builder.Eq{"operation": opts.Operation})
|
|
}
|
|
if opts.Status != "" {
|
|
cond = cond.And(builder.Eq{"status": opts.Status})
|
|
}
|
|
if opts.Tier > 0 {
|
|
cond = cond.And(builder.Eq{"tier": opts.Tier})
|
|
}
|
|
return cond
|
|
}
|
|
|
|
func (opts FindOperationLogsOptions) ToOrders() string {
|
|
return "created_unix DESC"
|
|
}
|
|
|
|
// CountRecentOperations counts operations in the last hour for rate limiting
|
|
func CountRecentOperations(ctx context.Context, repoID int64) (int64, error) {
|
|
oneHourAgo := timeutil.TimeStampNow() - 3600
|
|
return db.GetEngine(ctx).Where("repo_id = ? AND created_unix > ?", repoID, oneHourAgo).Count(new(OperationLog))
|
|
}
|
|
|
|
// GlobalOperationStats holds aggregate AI operation statistics for admin dashboard
|
|
type GlobalOperationStats struct {
|
|
TotalOperations int64 `json:"total_operations"`
|
|
Operations24h int64 `json:"operations_24h"`
|
|
SuccessCount int64 `json:"success_count"`
|
|
FailedCount int64 `json:"failed_count"`
|
|
EscalatedCount int64 `json:"escalated_count"`
|
|
PendingCount int64 `json:"pending_count"`
|
|
CountByTier map[int]int64 `json:"count_by_tier"`
|
|
TopRepos []RepoOpCount `json:"top_repos"`
|
|
TotalInputTokens int64 `json:"total_input_tokens"`
|
|
TotalOutputTokens int64 `json:"total_output_tokens"`
|
|
}
|
|
|
|
// RepoOpCount holds a repo's operation count for the top-repos list
|
|
type RepoOpCount struct {
|
|
RepoID int64 `json:"repo_id"`
|
|
Count int64 `json:"count"`
|
|
}
|
|
|
|
// GetGlobalOperationStats returns aggregate statistics across all repos for the admin dashboard
|
|
func GetGlobalOperationStats(ctx context.Context) (*GlobalOperationStats, error) {
|
|
e := db.GetEngine(ctx)
|
|
stats := &GlobalOperationStats{
|
|
CountByTier: make(map[int]int64),
|
|
}
|
|
|
|
// Total operations
|
|
total, err := e.Count(new(OperationLog))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
stats.TotalOperations = total
|
|
|
|
// Operations in last 24 hours
|
|
oneDayAgo := timeutil.TimeStampNow() - 86400
|
|
stats.Operations24h, err = e.Where("created_unix > ?", oneDayAgo).Count(new(OperationLog))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Counts by status
|
|
stats.SuccessCount, err = e.Where("status = ?", OperationStatusSuccess).Count(new(OperationLog))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
stats.FailedCount, err = e.Where("status = ?", OperationStatusFailed).Count(new(OperationLog))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
stats.EscalatedCount, err = e.Where("status = ?", OperationStatusEscalated).Count(new(OperationLog))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
stats.PendingCount, err = e.Where("status = ?", OperationStatusPending).Count(new(OperationLog))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Counts by tier
|
|
type tierCount struct {
|
|
Tier int `xorm:"tier"`
|
|
Count int64 `xorm:"count"`
|
|
}
|
|
var tierCounts []tierCount
|
|
if err := e.Table("ai_operation_log").Select("tier, count(*) as count").GroupBy("tier").Find(&tierCounts); err != nil {
|
|
return nil, err
|
|
}
|
|
for _, tc := range tierCounts {
|
|
stats.CountByTier[tc.Tier] = tc.Count
|
|
}
|
|
|
|
// Top 5 repos by operation count
|
|
var topRepos []RepoOpCount
|
|
if err := e.Table("ai_operation_log").Select("repo_id, count(*) as count").
|
|
GroupBy("repo_id").OrderBy("count DESC").Limit(5).Find(&topRepos); err != nil {
|
|
return nil, err
|
|
}
|
|
stats.TopRepos = topRepos
|
|
|
|
// Total tokens
|
|
type tokenSum struct {
|
|
InputTokens int64 `xorm:"input_tokens"`
|
|
OutputTokens int64 `xorm:"output_tokens"`
|
|
}
|
|
var ts tokenSum
|
|
if _, err := e.Table("ai_operation_log").Select("COALESCE(SUM(input_tokens),0) as input_tokens, COALESCE(SUM(output_tokens),0) as output_tokens").Get(&ts); err != nil {
|
|
return nil, err
|
|
}
|
|
stats.TotalInputTokens = ts.InputTokens
|
|
stats.TotalOutputTokens = ts.OutputTokens
|
|
|
|
return stats, nil
|
|
}
|