Replace manual HTTP/JSON RPC implementation with generated gRPC/Connect client, providing type-safe plugin communication. Code Generation: - Generate plugin.pb.go and pluginv1connect/plugin.connect.go from plugin.proto - Add generate-plugin-proto Makefile target - Delete hand-written types.go (replaced by generated code) ExternalPluginManager Refactoring: - Replace httpClient with pluginv1connect.PluginServiceClient - Use h2c (cleartext HTTP/2) transport for gRPC without TLS - Replace all manual callRPC/callRPCWithContext calls with typed Connect methods - Remove JSON serialization/deserialization code - Simplify error handling with native gRPC status codes Benefits: - Type safety: compile-time verification of request/response types - Protocol compatibility: standard gRPC wire format - Reduced code: ~100 lines of manual RPC code removed - Better errors: structured gRPC status codes instead of string parsing - Matches existing Actions runner pattern (Connect RPC over HTTP/2) This completes the plugin framework migration to production-grade RPC transport.
137 lines
3.2 KiB
Go
137 lines
3.2 KiB
Go
// Copyright 2026 MarketAlly. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package plugins
|
|
|
|
import (
|
|
"context"
|
|
"maps"
|
|
"time"
|
|
|
|
"connectrpc.com/connect"
|
|
|
|
"code.gitcaddy.com/server/v3/modules/graceful"
|
|
"code.gitcaddy.com/server/v3/modules/log"
|
|
pluginv1 "code.gitcaddy.com/server/v3/modules/plugins/pluginv1"
|
|
)
|
|
|
|
const (
|
|
maxConsecutiveFailures = 3
|
|
)
|
|
|
|
// StartHealthMonitoring begins periodic health checks for all external plugins.
|
|
// It runs as a background goroutine managed by the graceful manager.
|
|
func (m *ExternalPluginManager) StartHealthMonitoring() {
|
|
interval := m.config.HealthCheckInterval
|
|
if interval <= 0 {
|
|
interval = 30 * time.Second
|
|
}
|
|
|
|
graceful.GetManager().RunWithShutdownContext(func(ctx context.Context) {
|
|
ticker := time.NewTicker(interval)
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
return
|
|
case <-m.ctx.Done():
|
|
return
|
|
case <-ticker.C:
|
|
m.checkAllPlugins(ctx)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
|
|
func (m *ExternalPluginManager) checkAllPlugins(ctx context.Context) {
|
|
m.mu.RLock()
|
|
plugins := make(map[string]*ManagedPlugin, len(m.plugins))
|
|
maps.Copy(plugins, m.plugins)
|
|
m.mu.RUnlock()
|
|
|
|
for name, mp := range plugins {
|
|
if err := m.checkPlugin(ctx, name, mp); err != nil {
|
|
log.Warn("Health check failed for plugin %s: %v", name, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (m *ExternalPluginManager) checkPlugin(ctx context.Context, name string, mp *ManagedPlugin) error {
|
|
healthCtx, cancel := context.WithTimeout(ctx, mp.config.HealthTimeout)
|
|
defer cancel()
|
|
|
|
resp, err := mp.client.HealthCheck(healthCtx, connect.NewRequest(&pluginv1.HealthCheckRequest{}))
|
|
|
|
mp.mu.Lock()
|
|
defer mp.mu.Unlock()
|
|
|
|
if err != nil {
|
|
mp.failCount++
|
|
if mp.failCount >= maxConsecutiveFailures {
|
|
if mp.status != PluginStatusOffline {
|
|
log.Error("Plugin %s is now offline after %d consecutive health check failures", name, mp.failCount)
|
|
mp.status = PluginStatusOffline
|
|
}
|
|
|
|
// Auto-restart managed plugins
|
|
if mp.config.IsManaged() && mp.process != nil {
|
|
log.Info("Attempting to restart managed plugin %s", name)
|
|
go m.restartManagedPlugin(name, mp)
|
|
}
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Health check succeeded
|
|
if mp.status != PluginStatusOnline {
|
|
log.Info("Plugin %s is back online", name)
|
|
}
|
|
mp.failCount = 0
|
|
mp.status = PluginStatusOnline
|
|
mp.lastSeen = time.Now()
|
|
|
|
if !resp.Msg.Healthy {
|
|
log.Warn("Plugin %s reports unhealthy: %s", name, resp.Msg.Status)
|
|
mp.status = PluginStatusError
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (m *ExternalPluginManager) restartManagedPlugin(name string, mp *ManagedPlugin) {
|
|
// Kill the old process first
|
|
if mp.process != nil {
|
|
_ = mp.process.Kill()
|
|
mp.process = nil
|
|
}
|
|
|
|
mp.mu.Lock()
|
|
mp.status = PluginStatusStarting
|
|
mp.mu.Unlock()
|
|
|
|
if err := m.startManagedPlugin(mp); err != nil {
|
|
log.Error("Failed to restart managed plugin %s: %v", name, err)
|
|
mp.mu.Lock()
|
|
mp.status = PluginStatusError
|
|
mp.mu.Unlock()
|
|
return
|
|
}
|
|
|
|
if err := m.initializePlugin(mp); err != nil {
|
|
log.Error("Failed to re-initialize managed plugin %s: %v", name, err)
|
|
mp.mu.Lock()
|
|
mp.status = PluginStatusError
|
|
mp.mu.Unlock()
|
|
return
|
|
}
|
|
|
|
mp.mu.Lock()
|
|
mp.status = PluginStatusOnline
|
|
mp.lastSeen = time.Now()
|
|
mp.failCount = 0
|
|
mp.mu.Unlock()
|
|
|
|
log.Info("Managed plugin %s restarted successfully", name)
|
|
}
|