2
0

Compare commits

..

8 Commits

Author SHA1 Message Date
GitCaddy
0db86bc6a4 chore: Fix linter issues and update copyrights
Some checks failed
CI / build-and-test (push) Failing after 55s
Release / build (amd64, darwin) (push) Has been cancelled
Release / build (amd64, linux) (push) Has been cancelled
Release / build (amd64, windows) (push) Has been cancelled
Release / build (arm64, darwin) (push) Has been cancelled
Release / build (arm64, linux) (push) Has been cancelled
Release / release (push) Has been cancelled
- Format Go files with gofmt
- Update copyrights to include MarketAlly
- Add MarketAlly copyright to files we created

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-14 09:41:16 +00:00
GitCaddy
f5b22c4149 feat: Add build cache cleanup and CLI cleanup command
Some checks failed
CI / build-and-test (push) Failing after 30s
- Add cleanup for common build tool caches (Go, npm, NuGet, Gradle, Maven, pip, Cargo)
- Build caches cleaned for files older than 7 days
- Add gitcaddy-runner cleanup CLI command for manual cleanup trigger
- Fixes disk space issues from accumulated CI build artifacts

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-14 09:26:21 +00:00
GitCaddy
0ba2e0c3d5 feat: Add CPU load monitoring and cleanup support
Some checks failed
CI / build-and-test (push) Failing after 55s
- Add CPUInfo struct with load average and percentage
- Add detectCPULoad() for Linux, macOS, and Windows
- Add cleanup package for disk space management
- Handle RequestCleanup signal from server
- Report CPU load in capabilities to server

🤖 Generated with Claude Code
2026-01-14 08:48:54 +00:00
GitCaddy
8a54ec62d4 fix: Use linux-latest instead of ubuntu-latest
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Has been cancelled
Release / build (amd64, linux) (push) Has been cancelled
Release / build (amd64, windows) (push) Has been cancelled
Release / build (arm64, darwin) (push) Has been cancelled
Release / build (arm64, linux) (push) Has been cancelled
Release / release (push) Has been cancelled
2026-01-14 07:39:18 +00:00
GitCaddy
587ac42be4 feat: Rebrand to gitcaddy-runner with upload helper
Some checks failed
Release / build (amd64, linux) (push) Successful in 1m12s
Release / build (amd64, darwin) (push) Successful in 1m16s
Release / build (arm64, darwin) (push) Successful in 1m0s
Release / build (amd64, windows) (push) Successful in 1m13s
Release / build (arm64, linux) (push) Successful in 45s
Release / release (push) Successful in 50s
CI / build-and-test (push) Has been cancelled
- Rename binary from act_runner to gitcaddy-runner
- Update all user-facing strings (Gitea → GitCaddy)
- Add gitcaddy-upload helper with automatic retry for large files
- Add upload helper package (internal/pkg/artifact)
- Update Docker image name to marketally/gitcaddy-runner

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-14 07:26:46 +00:00
GitCaddy
56dcda0d5e fix: remove binaries from git tracking
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Successful in 1m22s
Release / build (arm64, darwin) (push) Successful in 2m9s
Release / build (amd64, linux) (push) Successful in 2m19s
Release / build (amd64, windows) (push) Successful in 2m22s
Release / build (arm64, linux) (push) Successful in 1m9s
Release / release (push) Successful in 21s
2026-01-12 01:36:19 +00:00
GitCaddy
e44f0c403b fix: remove accidentally committed binaries and add to gitignore
Some checks failed
CI / build-and-test (push) Has been cancelled
2026-01-12 01:35:38 +00:00
GitCaddy
fb1498bf7a fix: add -a flag to force rebuild and prevent cached binaries
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Successful in 1m14s
Release / build (amd64, windows) (push) Successful in 1m38s
Release / build (amd64, linux) (push) Successful in 2m52s
Release / build (arm64, darwin) (push) Successful in 2m50s
Release / build (arm64, linux) (push) Successful in 1m48s
Release / release (push) Successful in 47s
2026-01-12 01:28:20 +00:00
20 changed files with 705 additions and 24 deletions

View File

@@ -44,7 +44,7 @@ jobs:
fi
echo "Building version: ${VERSION}"
CGO_ENABLED=0 GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} \
go build -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=${VERSION}" \
go build -a -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=${VERSION}" \
-o act_runner-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}
- name: Upload artifact

View File

@@ -8,7 +8,7 @@ on:
jobs:
build-and-test:
runs-on: ubuntu-latest
runs-on: linux-latest
steps:
- uses: actions/checkout@v4

1
.gitignore vendored
View File

@@ -12,3 +12,4 @@ coverage.txt
__debug_bin
# gorelease binary folder
dist
act_runner-*

View File

@@ -1,5 +1,5 @@
DIST := dist
EXECUTABLE := act_runner
EXECUTABLE := gitcaddy-runner
GOFMT ?= gofumpt -l
DIST_DIRS := $(DIST)/binaries $(DIST)/release
GO ?= go
@@ -15,7 +15,7 @@ WINDOWS_ARCHS ?= windows/amd64
GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
DOCKER_IMAGE ?= gitea/act_runner
DOCKER_IMAGE ?= marketally/gitcaddy-runner
DOCKER_TAG ?= nightly
DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)
DOCKER_ROOTLESS_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)-dind-rootless

View File

Binary file not shown.

View File

Binary file not shown.

View File

Binary file not shown.

View File

Binary file not shown.

38
cmd/upload-helper/main.go Normal file
View File

@@ -0,0 +1,38 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package main
import (
"flag"
"fmt"
"os"
"gitea.com/gitea/act_runner/internal/pkg/artifact"
)
func main() {
url := flag.String("url", "", "Upload URL")
token := flag.String("token", "", "Auth token")
file := flag.String("file", "", "File to upload")
retries := flag.Int("retries", 5, "Maximum retry attempts")
flag.Parse()
if *url == "" || *token == "" || *file == "" {
fmt.Fprintf(os.Stderr, "GitCaddy Upload Helper - Reliable file uploads with retry\n\n")
fmt.Fprintf(os.Stderr, "Usage: gitcaddy-upload -url URL -token TOKEN -file FILE\n\n")
fmt.Fprintf(os.Stderr, "Options:\n")
flag.PrintDefaults()
os.Exit(1)
}
helper := artifact.NewUploadHelper()
helper.MaxRetries = *retries
if err := helper.UploadWithRetry(*url, *token, *file); err != nil {
fmt.Fprintf(os.Stderr, "Upload failed: %v\n", err)
os.Exit(1)
}
fmt.Println("Upload succeeded!")
}

2
go.mod
View File

@@ -111,4 +111,4 @@ replace github.com/go-git/go-git/v5 => github.com/go-git/go-git/v5 v5.16.2
replace github.com/distribution/reference v0.6.0 => github.com/distribution/reference v0.5.0
// Use GitCaddy fork with capability support
replace code.gitea.io/actions-proto-go => git.marketally.com/gitcaddy/actions-proto-go v0.5.7
replace code.gitea.io/actions-proto-go => git.marketally.com/gitcaddy/actions-proto-go v0.5.8

2
go.sum
View File

@@ -8,6 +8,8 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
git.marketally.com/gitcaddy/actions-proto-go v0.5.7 h1:RUbafr3Vkw2l4WfSwa+oF+Ihakbm05W0FlAmXuQrDJc=
git.marketally.com/gitcaddy/actions-proto-go v0.5.7/go.mod h1:RPu21UoRD3zSAujoZR6LJwuVNa2uFRBveadslczCRfQ=
git.marketally.com/gitcaddy/actions-proto-go v0.5.8 h1:MBipeHvY6A0jcobvziUtzgatZTrV4fs/HE1rPQxREN4=
git.marketally.com/gitcaddy/actions-proto-go v0.5.8/go.mod h1:RPu21UoRD3zSAujoZR6LJwuVNa2uFRBveadslczCRfQ=
gitea.com/gitea/act v0.261.7-0.20251202193638-5417d3ac6742 h1:ulcquQluJbmNASkh6ina70LvcHEa9eWYfQ+DeAZ0VEE=
gitea.com/gitea/act v0.261.7-0.20251202193638-5417d3ac6742/go.mod h1:Pg5C9kQY1CEA3QjthjhlrqOC/QOT5NyWNjOjRHw23Ok=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=

View File

@@ -1,4 +1,4 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// Copyright 2022 The Gitea Authors and MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package cmd
@@ -10,14 +10,15 @@ import (
"github.com/spf13/cobra"
"gitea.com/gitea/act_runner/internal/pkg/cleanup"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
func Execute(ctx context.Context) {
// ./act_runner
// ./gitcaddy-runner
rootCmd := &cobra.Command{
Use: "act_runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
Use: "gitcaddy-runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
Args: cobra.MaximumNArgs(1),
Version: ver.Version(),
@@ -26,7 +27,7 @@ func Execute(ctx context.Context) {
configFile := ""
rootCmd.PersistentFlags().StringVarP(&configFile, "config", "c", "", "Config file path")
// ./act_runner register
// ./gitcaddy-runner register
var regArgs registerArgs
registerCmd := &cobra.Command{
Use: "register",
@@ -35,14 +36,14 @@ func Execute(ctx context.Context) {
RunE: runRegister(ctx, &regArgs, &configFile), // must use a pointer to regArgs
}
registerCmd.Flags().BoolVar(&regArgs.NoInteractive, "no-interactive", false, "Disable interactive mode")
registerCmd.Flags().StringVar(&regArgs.InstanceAddr, "instance", "", "Gitea instance address")
registerCmd.Flags().StringVar(&regArgs.InstanceAddr, "instance", "", "GitCaddy instance address")
registerCmd.Flags().StringVar(&regArgs.Token, "token", "", "Runner token")
registerCmd.Flags().StringVar(&regArgs.RunnerName, "name", "", "Runner name")
registerCmd.Flags().StringVar(&regArgs.Labels, "labels", "", "Runner tags, comma separated")
registerCmd.Flags().BoolVar(&regArgs.Ephemeral, "ephemeral", false, "Configure the runner to be ephemeral and only ever be able to pick a single job (stricter than --once)")
rootCmd.AddCommand(registerCmd)
// ./act_runner daemon
// ./gitcaddy-runner daemon
var daemArgs daemonArgs
daemonCmd := &cobra.Command{
Use: "daemon",
@@ -53,10 +54,10 @@ func Execute(ctx context.Context) {
daemonCmd.Flags().BoolVar(&daemArgs.Once, "once", false, "Run one job then exit")
rootCmd.AddCommand(daemonCmd)
// ./act_runner exec
// ./gitcaddy-runner exec
rootCmd.AddCommand(loadExecCmd(ctx))
// ./act_runner config
// ./gitcaddy-runner config
rootCmd.AddCommand(&cobra.Command{
Use: "generate-config",
Short: "Generate an example config file",
@@ -66,7 +67,7 @@ func Execute(ctx context.Context) {
},
})
// ./act_runner cache-server
// ./gitcaddy-runner cache-server
var cacheArgs cacheServerArgs
cacheCmd := &cobra.Command{
Use: "cache-server",
@@ -79,6 +80,31 @@ func Execute(ctx context.Context) {
cacheCmd.Flags().Uint16VarP(&cacheArgs.Port, "port", "p", 0, "Port of the cache server")
rootCmd.AddCommand(cacheCmd)
// ./gitcaddy-runner cleanup
cleanupCmd := &cobra.Command{
Use: "cleanup",
Short: "Manually trigger cleanup to free disk space",
Args: cobra.MaximumNArgs(0),
RunE: func(_ *cobra.Command, _ []string) error {
cfg, err := config.LoadDefault(configFile)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
result, err := cleanup.RunCleanup(ctx, cfg)
if err != nil {
return fmt.Errorf("cleanup failed: %w", err)
}
fmt.Printf("Cleanup completed: freed %d bytes, deleted %d files in %s\n", result.BytesFreed, result.FilesDeleted, result.Duration)
if len(result.Errors) > 0 {
fmt.Printf("Warnings: %d errors occurred\n", len(result.Errors))
for _, e := range result.Errors {
fmt.Printf(" - %s\n", e)
}
}
return nil
},
}
rootCmd.AddCommand(cleanupCmd)
// hide completion command
rootCmd.CompletionOptions.HiddenDefaultCmd = true

View File

@@ -175,7 +175,7 @@ func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) fu
// declare the labels of the runner before fetching tasks
resp, err := runner.Declare(ctx, ls.Names(), capabilitiesJson)
if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented {
log.Errorf("Your Gitea version is too old to support runner declare, please upgrade to v1.21 or later")
log.Errorf("Your GitCaddy version is too old to support runner declare, please upgrade to v1.21 or later")
return err
} else if err != nil {
log.WithError(err).Error("fail to invoke Declare")

View File

@@ -505,7 +505,7 @@ func loadExecCmd(ctx context.Context) *cobra.Command {
execCmd.PersistentFlags().BoolVarP(&execArg.dryrun, "dryrun", "n", false, "dryrun mode")
execCmd.PersistentFlags().StringVarP(&execArg.image, "image", "i", "docker.gitea.com/runner-images:ubuntu-latest", "Docker image to use. Use \"-self-hosted\" to run directly on the host.")
execCmd.PersistentFlags().StringVarP(&execArg.network, "network", "", "", "Specify the network to which the container will connect")
execCmd.PersistentFlags().StringVarP(&execArg.githubInstance, "gitea-instance", "", "", "Gitea instance to use.")
execCmd.PersistentFlags().StringVarP(&execArg.githubInstance, "gitea-instance", "", "", "GitCaddy instance to use.")
return execCmd
}

View File

@@ -272,7 +272,7 @@ func printStageHelp(stage registerStage) {
case StageOverwriteLocalConfig:
log.Infoln("Runner is already registered, overwrite local config? [y/N]")
case StageInputInstance:
log.Infoln("Enter the Gitea instance URL (for example, https://gitea.com/):")
log.Infoln("Enter the GitCaddy instance URL (for example, https://gitea.com/):")
case StageInputToken:
log.Infoln("Enter the runner token:")
case StageInputRunnerName:
@@ -341,7 +341,7 @@ func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs)
}
if err != nil {
log.WithError(err).
Errorln("Cannot ping the Gitea instance server")
Errorln("Cannot ping the GitCaddy instance server")
// TODO: if ping failed, retry or exit
time.Sleep(time.Second)
} else {

View File

@@ -1,4 +1,4 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// Copyright 2023 The Gitea Authors and MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package poll
@@ -16,6 +16,7 @@ import (
"golang.org/x/time/rate"
"gitea.com/gitea/act_runner/internal/app/run"
"gitea.com/gitea/act_runner/internal/pkg/cleanup"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/envcheck"
@@ -205,6 +206,20 @@ func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
}()
}
// Check if server requested a cleanup
if resp.Msg.RequestCleanup {
log.Info("Server requested cleanup, running now...")
go func() {
result, err := cleanup.RunCleanup(ctx, p.cfg)
if err != nil {
log.Errorf("Cleanup failed: %v", err)
} else if result != nil {
log.Infof("Cleanup completed: freed %d bytes, deleted %d files in %s",
result.BytesFreed, result.FilesDeleted, result.Duration)
}
}()
}
if resp.Msg.TasksVersion > v {
p.tasksVersion.CompareAndSwap(v, resp.Msg.TasksVersion)
}

View File

@@ -0,0 +1,145 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package artifact
import (
"bytes"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"time"
log "github.com/sirupsen/logrus"
)
// UploadHelper handles reliable file uploads with retry logic
type UploadHelper struct {
MaxRetries int
RetryDelay time.Duration
ChunkSize int64
ConnectTimeout time.Duration
MaxTimeout time.Duration
}
// NewUploadHelper creates a new upload helper with sensible defaults
func NewUploadHelper() *UploadHelper {
return &UploadHelper{
MaxRetries: 5,
RetryDelay: 10 * time.Second,
ChunkSize: 10 * 1024 * 1024, // 10MB
ConnectTimeout: 120 * time.Second,
MaxTimeout: 3600 * time.Second,
}
}
// UploadWithRetry uploads a file with automatic retry on failure
func (u *UploadHelper) UploadWithRetry(url, token, filepath string) error {
client := &http.Client{
Timeout: u.MaxTimeout,
Transport: &http.Transport{
MaxIdleConns: 10,
MaxIdleConnsPerHost: 5,
IdleConnTimeout: 90 * time.Second,
DisableKeepAlives: false, // Keep connections alive
ForceAttemptHTTP2: false, // Use HTTP/1.1 for large uploads
},
}
var lastErr error
for attempt := 0; attempt < u.MaxRetries; attempt++ {
if attempt > 0 {
delay := u.RetryDelay * time.Duration(attempt)
log.Infof("Upload attempt %d/%d, waiting %v before retry...", attempt+1, u.MaxRetries, delay)
time.Sleep(delay)
}
// Pre-resolve DNS / warm connection
if err := u.prewarmConnection(url); err != nil {
lastErr = fmt.Errorf("connection prewarm failed: %w", err)
log.Warnf("Prewarm failed: %v", err)
continue
}
// Attempt upload
if err := u.doUpload(client, url, token, filepath); err != nil {
lastErr = err
log.Warnf("Upload attempt %d failed: %v", attempt+1, err)
continue
}
log.Infof("Upload succeeded on attempt %d", attempt+1)
return nil // Success
}
return fmt.Errorf("upload failed after %d attempts: %w", u.MaxRetries, lastErr)
}
// prewarmConnection establishes a connection to help with DNS and TCP setup
func (u *UploadHelper) prewarmConnection(url string) error {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return err
}
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// doUpload performs the actual file upload
func (u *UploadHelper) doUpload(client *http.Client, url, token, filepath string) error {
file, err := os.Open(filepath)
if err != nil {
return fmt.Errorf("failed to open file: %w", err)
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return fmt.Errorf("failed to stat file: %w", err)
}
log.Infof("Uploading %s (%d bytes) to %s", filepath, stat.Size(), url)
// Create multipart form
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("attachment", stat.Name())
if err != nil {
return fmt.Errorf("failed to create form file: %w", err)
}
if _, err := io.Copy(part, file); err != nil {
return fmt.Errorf("failed to copy file to form: %w", err)
}
writer.Close()
req, err := http.NewRequest("POST", url, body)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", token))
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Connection", "keep-alive")
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("upload request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
respBody, _ := io.ReadAll(resp.Body)
return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(respBody))
}
log.Infof("Upload completed successfully, status: %d", resp.StatusCode)
return nil
}

View File

@@ -0,0 +1,355 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package cleanup
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"gitea.com/gitea/act_runner/internal/pkg/config"
log "github.com/sirupsen/logrus"
)
// CleanupResult contains the results of a cleanup operation
type CleanupResult struct {
BytesFreed int64
FilesDeleted int
Errors []error
Duration time.Duration
}
// RunCleanup performs cleanup operations to free disk space
func RunCleanup(ctx context.Context, cfg *config.Config) (*CleanupResult, error) {
start := time.Now()
result := &CleanupResult{}
log.Info("Starting runner cleanup...")
// 1. Clean old cache directories
cacheDir := filepath.Join(cfg.Cache.Dir, "_cache")
if cacheDir != "" {
if bytes, files, err := cleanOldDir(cacheDir, 24*time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("cache cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned cache: freed %d bytes, deleted %d files", bytes, files)
}
}
// 2. Clean old work directories
workDir := cfg.Container.WorkdirParent
if workDir != "" {
if bytes, files, err := cleanOldWorkDirs(workDir, 48*time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("workdir cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned work dirs: freed %d bytes, deleted %d files", bytes, files)
}
}
// 3. Clean old artifact staging directories
artifactDir := cfg.Cache.Dir
if bytes, files, err := cleanOldArtifacts(artifactDir, 72*time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("artifact cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned artifacts: freed %d bytes, deleted %d files", bytes, files)
}
// 4. Clean system temp files (older than 24h)
if bytes, files, err := cleanTempDir(24 * time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("temp cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned temp: freed %d bytes, deleted %d files", bytes, files)
}
// 5. Clean build tool caches (older than 7 days)
// These can grow very large from Go, npm, nuget, gradle, maven builds
if bytes, files, err := cleanBuildCaches(7 * 24 * time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("build cache cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned build caches: freed %d bytes, deleted %d files", bytes, files)
}
result.Duration = time.Since(start)
log.Infof("Cleanup completed: freed %s in %s", formatBytes(result.BytesFreed), result.Duration)
return result, nil
}
// cleanOldDir removes files older than maxAge from a directory
func cleanOldDir(dir string, maxAge time.Duration) (int64, int, error) {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return 0, 0, nil
}
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Skip errors
}
if info.IsDir() {
return nil
}
if info.ModTime().Before(cutoff) {
size := info.Size()
if err := os.Remove(path); err == nil {
bytesFreed += size
filesDeleted++
}
}
return nil
})
return bytesFreed, filesDeleted, err
}
// cleanOldWorkDirs removes work directories older than maxAge
func cleanOldWorkDirs(baseDir string, maxAge time.Duration) (int64, int, error) {
if _, err := os.Stat(baseDir); os.IsNotExist(err) {
return 0, 0, nil
}
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
entries, err := os.ReadDir(baseDir)
if err != nil {
return 0, 0, err
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
path := filepath.Join(baseDir, entry.Name())
info, err := entry.Info()
if err != nil {
continue
}
if info.ModTime().Before(cutoff) {
size := dirSize(path)
if err := os.RemoveAll(path); err == nil {
bytesFreed += size
filesDeleted++
log.Debugf("Removed old work dir: %s", path)
}
}
}
return bytesFreed, filesDeleted, nil
}
// cleanOldArtifacts removes artifact staging files older than maxAge
func cleanOldArtifacts(baseDir string, maxAge time.Duration) (int64, int, error) {
if _, err := os.Stat(baseDir); os.IsNotExist(err) {
return 0, 0, nil
}
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
// Look for artifact staging dirs
patterns := []string{"artifact-*", "upload-*", "download-*"}
for _, pattern := range patterns {
matches, _ := filepath.Glob(filepath.Join(baseDir, pattern))
for _, path := range matches {
info, err := os.Stat(path)
if err != nil {
continue
}
if info.ModTime().Before(cutoff) {
var size int64
if info.IsDir() {
size = dirSize(path)
err = os.RemoveAll(path)
} else {
size = info.Size()
err = os.Remove(path)
}
if err == nil {
bytesFreed += size
filesDeleted++
}
}
}
}
return bytesFreed, filesDeleted, nil
}
// cleanTempDir removes old files from system temp directory
func cleanTempDir(maxAge time.Duration) (int64, int, error) {
tmpDir := os.TempDir()
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
entries, err := os.ReadDir(tmpDir)
if err != nil {
return 0, 0, err
}
// Only clean files/dirs that look like runner/act artifacts
runnerPatterns := []string{"act-", "runner-", "gitea-", "workflow-"}
for _, entry := range entries {
name := entry.Name()
isRunner := false
for _, p := range runnerPatterns {
if len(name) >= len(p) && name[:len(p)] == p {
isRunner = true
break
}
}
if !isRunner {
continue
}
path := filepath.Join(tmpDir, name)
info, err := entry.Info()
if err != nil {
continue
}
if info.ModTime().Before(cutoff) {
var size int64
if info.IsDir() {
size = dirSize(path)
err = os.RemoveAll(path)
} else {
size = info.Size()
err = os.Remove(path)
}
if err == nil {
bytesFreed += size
filesDeleted++
}
}
}
return bytesFreed, filesDeleted, nil
}
// dirSize calculates the total size of a directory
func dirSize(path string) int64 {
var size int64
filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if !info.IsDir() {
size += info.Size()
}
return nil
})
return size
}
// cleanBuildCaches removes old build tool caches that accumulate from CI jobs
// These are cleaned more aggressively (files older than 7 days) since they can grow very large
func cleanBuildCaches(maxAge time.Duration) (int64, int, error) {
home := os.Getenv("HOME")
if home == "" {
home = "/root" // fallback for runners typically running as root
}
var totalBytesFreed int64
var totalFilesDeleted int
// Build cache directories to clean
// Format: {path, description}
cacheDirs := []struct {
path string
desc string
}{
{filepath.Join(home, ".cache", "go-build"), "Go build cache"},
{filepath.Join(home, ".cache", "golangci-lint"), "golangci-lint cache"},
{filepath.Join(home, ".npm", "_cacache"), "npm cache"},
{filepath.Join(home, ".cache", "pnpm"), "pnpm cache"},
{filepath.Join(home, ".cache", "yarn"), "yarn cache"},
{filepath.Join(home, ".nuget", "packages"), "NuGet cache"},
{filepath.Join(home, ".gradle", "caches"), "Gradle cache"},
{filepath.Join(home, ".m2", "repository"), "Maven cache"},
{filepath.Join(home, ".cache", "pip"), "pip cache"},
{filepath.Join(home, ".cargo", "registry", "cache"), "Cargo cache"},
{filepath.Join(home, ".rustup", "tmp"), "Rustup temp"},
}
cutoff := time.Now().Add(-maxAge)
for _, cache := range cacheDirs {
if _, err := os.Stat(cache.path); os.IsNotExist(err) {
continue
}
var bytesFreed int64
var filesDeleted int
err := filepath.Walk(cache.path, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Skip errors
}
if info.IsDir() {
return nil
}
if info.ModTime().Before(cutoff) {
size := info.Size()
if err := os.Remove(path); err == nil {
bytesFreed += size
filesDeleted++
}
}
return nil
})
if err == nil && (bytesFreed > 0 || filesDeleted > 0) {
log.Infof("Cleaned %s: freed %s, deleted %d files", cache.desc, formatBytes(bytesFreed), filesDeleted)
totalBytesFreed += bytesFreed
totalFilesDeleted += filesDeleted
}
// Also remove empty directories
filepath.Walk(cache.path, func(path string, info os.FileInfo, err error) error {
if err != nil || !info.IsDir() || path == cache.path {
return nil
}
entries, _ := os.ReadDir(path)
if len(entries) == 0 {
os.Remove(path)
}
return nil
})
}
return totalBytesFreed, totalFilesDeleted, nil
}
// formatBytes formats bytes into human readable string
func formatBytes(bytes int64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}

View File

@@ -84,7 +84,7 @@ func (bm *BandwidthManager) GetLastResult() *BandwidthInfo {
return bm.lastResult
}
// TestBandwidth tests network bandwidth to the Gitea server
// TestBandwidth tests network bandwidth to the GitCaddy server
func TestBandwidth(ctx context.Context, serverURL string) *BandwidthInfo {
if serverURL == "" {
return nil

View File

@@ -1,4 +1,4 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package envcheck
@@ -19,13 +19,22 @@ import (
// DiskInfo holds disk space information
type DiskInfo struct {
Path string `json:"path,omitempty"` // Path being checked (working directory)
Path string `json:"path,omitempty"` // Path being checked (working directory)
Total uint64 `json:"total_bytes"`
Free uint64 `json:"free_bytes"`
Used uint64 `json:"used_bytes"`
UsedPercent float64 `json:"used_percent"`
}
// CPUInfo holds CPU load information
type CPUInfo struct {
NumCPU int `json:"num_cpu"` // Number of logical CPUs
LoadAvg1m float64 `json:"load_avg_1m"` // 1-minute load average
LoadAvg5m float64 `json:"load_avg_5m"` // 5-minute load average
LoadAvg15m float64 `json:"load_avg_15m"` // 15-minute load average
LoadPercent float64 `json:"load_percent"` // (load_avg_1m / num_cpu) * 100
}
// DistroInfo holds Linux distribution information
type DistroInfo struct {
ID string `json:"id,omitempty"` // e.g., "ubuntu", "debian", "fedora"
@@ -37,7 +46,7 @@ type DistroInfo struct {
type XcodeInfo struct {
Version string `json:"version,omitempty"`
Build string `json:"build,omitempty"`
SDKs []string `json:"sdks,omitempty"` // e.g., ["iOS 17.0", "macOS 14.0"]
SDKs []string `json:"sdks,omitempty"` // e.g., ["iOS 17.0", "macOS 14.0"]
Simulators []string `json:"simulators,omitempty"` // Available iOS simulators
}
@@ -52,11 +61,12 @@ type RunnerCapabilities struct {
ContainerRuntime string `json:"container_runtime,omitempty"`
Shell []string `json:"shell,omitempty"`
Tools map[string][]string `json:"tools,omitempty"`
BuildTools []string `json:"build_tools,omitempty"` // Available build/installer tools
BuildTools []string `json:"build_tools,omitempty"` // Available build/installer tools
PackageManagers []string `json:"package_managers,omitempty"`
Features *CapabilityFeatures `json:"features,omitempty"`
Limitations []string `json:"limitations,omitempty"`
Disk *DiskInfo `json:"disk,omitempty"`
CPU *CPUInfo `json:"cpu,omitempty"`
Bandwidth *BandwidthInfo `json:"bandwidth,omitempty"`
SuggestedLabels []string `json:"suggested_labels,omitempty"`
}
@@ -120,6 +130,9 @@ func DetectCapabilities(ctx context.Context, dockerHost string, workingDir strin
// Detect disk space on the working directory's filesystem
cap.Disk = detectDiskSpace(workingDir)
// Detect CPU load
cap.CPU = detectCPULoad()
// Generate suggested labels based on detected capabilities
cap.SuggestedLabels = generateSuggestedLabels(cap)
@@ -887,3 +900,89 @@ func contains(slice []string, item string) bool {
}
return false
}
// detectCPULoad detects the current CPU load
func detectCPULoad() *CPUInfo {
numCPU := runtime.NumCPU()
info := &CPUInfo{
NumCPU: numCPU,
}
switch runtime.GOOS {
case "linux":
// Read from /proc/loadavg
data, err := os.ReadFile("/proc/loadavg")
if err != nil {
return info
}
parts := strings.Fields(string(data))
if len(parts) >= 3 {
if load, err := parseFloat(parts[0]); err == nil {
info.LoadAvg1m = load
}
if load, err := parseFloat(parts[1]); err == nil {
info.LoadAvg5m = load
}
if load, err := parseFloat(parts[2]); err == nil {
info.LoadAvg15m = load
}
}
case "darwin":
// Use sysctl on macOS
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "sysctl", "-n", "vm.loadavg")
output, err := cmd.Output()
if err == nil {
// Output format: "{ 1.23 4.56 7.89 }"
line := strings.Trim(string(output), "{ }\n")
parts := strings.Fields(line)
if len(parts) >= 3 {
if load, err := parseFloat(parts[0]); err == nil {
info.LoadAvg1m = load
}
if load, err := parseFloat(parts[1]); err == nil {
info.LoadAvg5m = load
}
if load, err := parseFloat(parts[2]); err == nil {
info.LoadAvg15m = load
}
}
}
case "windows":
// Windows doesn't have load average, use CPU usage via wmic
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "wmic", "cpu", "get", "loadpercentage")
output, err := cmd.Output()
if err == nil {
lines := strings.Split(string(output), "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
if line != "" && line != "LoadPercentage" {
if load, err := parseFloat(line); err == nil {
// Convert percentage to "load" equivalent
info.LoadPercent = load
info.LoadAvg1m = load * float64(numCPU) / 100.0
return info
}
}
}
}
}
// Calculate load percent (load_avg_1m / num_cpu * 100)
if info.LoadAvg1m > 0 && numCPU > 0 {
info.LoadPercent = (info.LoadAvg1m / float64(numCPU)) * 100.0
}
return info
}
// parseFloat parses a string to float64
func parseFloat(s string) (float64, error) {
s = strings.TrimSpace(s)
var f float64
err := json.Unmarshal([]byte(s), &f)
return f, err
}