2
0

Compare commits

...

26 Commits

Author SHA1 Message Date
GitCaddy
0db86bc6a4 chore: Fix linter issues and update copyrights
Some checks failed
CI / build-and-test (push) Failing after 55s
Release / build (amd64, darwin) (push) Has been cancelled
Release / build (amd64, linux) (push) Has been cancelled
Release / build (amd64, windows) (push) Has been cancelled
Release / build (arm64, darwin) (push) Has been cancelled
Release / build (arm64, linux) (push) Has been cancelled
Release / release (push) Has been cancelled
- Format Go files with gofmt
- Update copyrights to include MarketAlly
- Add MarketAlly copyright to files we created

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-14 09:41:16 +00:00
GitCaddy
f5b22c4149 feat: Add build cache cleanup and CLI cleanup command
Some checks failed
CI / build-and-test (push) Failing after 30s
- Add cleanup for common build tool caches (Go, npm, NuGet, Gradle, Maven, pip, Cargo)
- Build caches cleaned for files older than 7 days
- Add gitcaddy-runner cleanup CLI command for manual cleanup trigger
- Fixes disk space issues from accumulated CI build artifacts

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-14 09:26:21 +00:00
GitCaddy
0ba2e0c3d5 feat: Add CPU load monitoring and cleanup support
Some checks failed
CI / build-and-test (push) Failing after 55s
- Add CPUInfo struct with load average and percentage
- Add detectCPULoad() for Linux, macOS, and Windows
- Add cleanup package for disk space management
- Handle RequestCleanup signal from server
- Report CPU load in capabilities to server

🤖 Generated with Claude Code
2026-01-14 08:48:54 +00:00
GitCaddy
8a54ec62d4 fix: Use linux-latest instead of ubuntu-latest
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Has been cancelled
Release / build (amd64, linux) (push) Has been cancelled
Release / build (amd64, windows) (push) Has been cancelled
Release / build (arm64, darwin) (push) Has been cancelled
Release / build (arm64, linux) (push) Has been cancelled
Release / release (push) Has been cancelled
2026-01-14 07:39:18 +00:00
GitCaddy
587ac42be4 feat: Rebrand to gitcaddy-runner with upload helper
Some checks failed
Release / build (amd64, linux) (push) Successful in 1m12s
Release / build (amd64, darwin) (push) Successful in 1m16s
Release / build (arm64, darwin) (push) Successful in 1m0s
Release / build (amd64, windows) (push) Successful in 1m13s
Release / build (arm64, linux) (push) Successful in 45s
Release / release (push) Successful in 50s
CI / build-and-test (push) Has been cancelled
- Rename binary from act_runner to gitcaddy-runner
- Update all user-facing strings (Gitea → GitCaddy)
- Add gitcaddy-upload helper with automatic retry for large files
- Add upload helper package (internal/pkg/artifact)
- Update Docker image name to marketally/gitcaddy-runner

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-14 07:26:46 +00:00
GitCaddy
56dcda0d5e fix: remove binaries from git tracking
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Successful in 1m22s
Release / build (arm64, darwin) (push) Successful in 2m9s
Release / build (amd64, linux) (push) Successful in 2m19s
Release / build (amd64, windows) (push) Successful in 2m22s
Release / build (arm64, linux) (push) Successful in 1m9s
Release / release (push) Successful in 21s
2026-01-12 01:36:19 +00:00
GitCaddy
e44f0c403b fix: remove accidentally committed binaries and add to gitignore
Some checks failed
CI / build-and-test (push) Has been cancelled
2026-01-12 01:35:38 +00:00
GitCaddy
fb1498bf7a fix: add -a flag to force rebuild and prevent cached binaries
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Successful in 1m14s
Release / build (amd64, windows) (push) Successful in 1m38s
Release / build (amd64, linux) (push) Successful in 2m52s
Release / build (arm64, darwin) (push) Successful in 2m50s
Release / build (arm64, linux) (push) Successful in 1m48s
Release / release (push) Successful in 47s
2026-01-12 01:28:20 +00:00
GitCaddy
fa69213d15 fix: use GitHub Actions expression syntax for VERSION
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, linux) (push) Successful in 45s
Release / build (amd64, windows) (push) Successful in 54s
Release / build (amd64, darwin) (push) Successful in 1m8s
Release / build (arm64, darwin) (push) Successful in 1m6s
Release / build (arm64, linux) (push) Successful in 42s
Release / release (push) Successful in 25s
2026-01-12 01:22:54 +00:00
GitCaddy
f92e50f35b fix: use GITHUB_REF instead of GITHUB_REF_NAME for version extraction
Some checks failed
Release / build (amd64, darwin) (push) Successful in 47s
Release / build (amd64, linux) (push) Successful in 55s
Release / build (arm64, darwin) (push) Successful in 1m7s
Release / build (amd64, windows) (push) Successful in 1m10s
Release / build (arm64, linux) (push) Successful in 51s
Release / release (push) Successful in 26s
CI / build-and-test (push) Has been cancelled
2026-01-12 01:14:20 +00:00
GitCaddy
a792b47b41 fix: isolate golangci-lint cache per job to prevent parallel conflicts
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Successful in 56s
Release / build (amd64, linux) (push) Successful in 1m0s
Release / build (amd64, windows) (push) Successful in 1m9s
Release / build (arm64, linux) (push) Successful in 38s
Release / build (arm64, darwin) (push) Successful in 55s
Release / release (push) Successful in 27s
Add GOLANGCI_LINT_CACHE and XDG_CACHE_HOME environment variables
pointing to job-specific cache directory to prevent parallel job
conflicts when running golangci-lint.

🤖 Generated with Claude Code

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-12 00:47:11 +00:00
GitCaddy
68ec7efde0 fix: isolate golangci-lint cache per job to prevent parallel conflicts
Add GOLANGCI_LINT_CACHE and XDG_CACHE_HOME environment variables
pointing to job-specific cache directory to prevent parallel job
conflicts when running golangci-lint.

🤖 Generated with Claude Code

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-12 00:45:26 +00:00
GitCaddy
f314ffb036 feat: implement job-isolated cache directories
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Successful in 17s
Release / build (amd64, linux) (push) Successful in 21s
Release / build (amd64, windows) (push) Successful in 17s
Release / build (arm64, darwin) (push) Successful in 16s
Release / build (arm64, linux) (push) Successful in 43s
Release / release (push) Successful in 29s
- Each job now gets its own cache directory: ~/.cache/act/jobs/{taskId}/
- Cache is cleaned up automatically after job completion
- Periodic cleanup removes stale job caches older than 2 hours
- Eliminates race conditions in npm/pnpm cache operations
- No more ENOTEMPTY errors from concurrent tool installs
- Fix workflow: use linux-latest and setup-go@v4
2026-01-11 22:25:24 +00:00
GitCaddy
b303a83a77 feat(capabilities): add visionOS SDK, PowerShell versions, working directory disk space
Some checks failed
CI / build-and-test (push) Has been cancelled
Release / build (amd64, darwin) (push) Has been cancelled
Release / build (amd64, linux) (push) Has been cancelled
Release / build (amd64, windows) (push) Has been cancelled
Release / build (arm64, darwin) (push) Has been cancelled
Release / build (arm64, linux) (push) Has been cancelled
Release / release (push) Has been cancelled
- Add visionOS/xrOS SDK detection for Vision Pro development
- Add PowerShell version detection (pwsh and powershell) with actual versions
- Detect disk space on working directory filesystem (not just root)
  - Useful for runners using external/USB drives for builds
- Add watchOS and tvOS suggested labels
- Refactor disk detection to accept path parameter

🤖 Generated with Claude Code (https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-11 20:32:23 +00:00
GitCaddy
66d0b1e608 feat(capabilities): enhanced tool and platform detection
Some checks failed
CI / build-and-test (push) Has been cancelled
macOS:
- Xcode version and build detection
- iOS/watchOS/tvOS SDK detection
- iOS Simulator detection
- Swift, CocoaPods, Carthage, fastlane detection
- Code signing tools (codesign, notarytool)
- Package builders (pkgbuild, create-dmg)

Windows:
- Visual Studio detection via vswhere
- MSBuild detection
- Inno Setup (ISCC) detection
- NSIS (makensis) detection
- WiX Toolset detection
- Windows SDK signtool detection
- Package managers (Chocolatey, Scoop, winget)

Linux:
- GCC/Clang compiler detection
- Build tools (autoconf, automake, meson)
- Package builders (dpkg-deb, rpmbuild, fpm)
- AppImage tools detection

Cross-platform:
- Ruby, PHP, Swift, Kotlin, Flutter, Dart
- CMake, Make, Ninja, Gradle, Maven
- npm, yarn, pnpm, cargo, pip
- Git version detection

Suggested labels now include:
- xcode, ios, ios-simulator for macOS with Xcode
- inno-setup, nsis, msbuild, vs2022 for Windows
- Tool-based labels (dotnet, java, node)

🤖 Generated with Claude Code
2026-01-11 20:20:02 +00:00
GitCaddy
48a589eb79 fix: add cross-platform disk detection for Windows/macOS builds
Some checks failed
CI / build-and-test (push) Failing after 2s
Release / build (amd64, darwin) (push) Successful in 6s
Release / build (amd64, linux) (push) Successful in 5s
Release / build (amd64, windows) (push) Successful in 6s
Release / build (arm64, darwin) (push) Successful in 5s
Release / build (arm64, linux) (push) Successful in 5s
Release / release (push) Successful in 11s
- Split detectDiskSpace() into platform-specific files with build tags
- disk_unix.go: Uses unix.Statfs for Linux and macOS
- disk_windows.go: Uses windows.GetDiskFreeSpaceEx for Windows
- Fixes Windows cross-compilation build errors

🤖 Generated with Claude Code
2026-01-11 19:29:27 +00:00
GitCaddy
fef300dd5b docs: add HOWTOSTART.md guide for setting up runners
All checks were successful
CI / build-and-test (push) Successful in 17s
Comprehensive guide covering:
- Prerequisites and quick start
- Registration process
- Labels configuration
- Running as a systemd service
- Docker support
- Capabilities detection
- Troubleshooting tips

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-11 18:18:39 +00:00
GitCaddy
49a0b6f167 feat(capabilities): Add Linux distro detection and suggested labels
All checks were successful
CI / build-and-test (push) Successful in 14s
- Add DistroInfo struct to detect Linux distribution from /etc/os-release
- Add detectLinuxDistro() function to parse distro ID, version, pretty name
- Add generateSuggestedLabels() to create industry-standard labels
- Suggested labels include: linux/windows/macos, distro name, with -latest suffix

🤖 Generated with Claude Code
2026-01-11 17:25:45 +00:00
GitCaddy
e5fdaadbd2 feat: handle bandwidth test requests from server
All checks were successful
CI / build-and-test (push) Successful in 8s
- Update to actions-proto-go v0.5.7 with RequestBandwidthTest field
- Add SetBandwidthManager method to Poller
- Check FetchTaskResponse for bandwidth test request
- Include bandwidth in capabilities sent to server

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-11 15:29:21 +00:00
GitCaddy
ab382dc256 feat: add bandwidth testing to runner capabilities
All checks were successful
CI / build-and-test (push) Successful in 8s
- Add BandwidthManager for periodic bandwidth tests (hourly)
- Test download speed and latency against registered Gitea server
- Include bandwidth in runner capabilities JSON
- Add FormatBandwidth helper for display

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-11 07:38:49 +00:00
GitCaddy
2c66de4df2 fix: run make fmt for code formatting
Some checks failed
CI / build-and-test (push) Successful in 8s
Release / build (amd64, darwin) (push) Successful in 14s
Release / build (amd64, linux) (push) Successful in 13s
Release / build (amd64, windows) (push) Failing after 17s
Release / build (arm64, darwin) (push) Successful in 21s
Release / build (arm64, linux) (push) Successful in 5s
Release / release (push) Has been skipped
2026-01-11 07:11:49 +00:00
GitCaddy
9de33d4252 Send runner capabilities with FetchTask poll
Some checks failed
CI / build-and-test (push) Failing after 8s
- Add envcheck import and capability detection to poller.go
- Send capabilities JSON with every FetchTask request
- Use GitCaddy actions-proto-go v0.5.6 with CapabilitiesJson field
2026-01-11 07:03:54 +00:00
GitCaddy
ff8375c6e1 Add disk space reporting to runner capabilities
Some checks failed
CI / build-and-test (push) Failing after 4s
- Add DiskInfo struct with total, free, used bytes and usage percentage
- Detect disk space using unix.Statfs on startup
- Add periodic capabilities updates every 5 minutes
- Add disk space warnings at 85% (warning) and 95% (critical) thresholds
- Send updated capabilities to Gitea server periodically

This helps monitor runners that are running low on disk space.

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-11 05:16:49 +00:00
3e22214bbd ci: Disable Go cache to prevent hanging on Gitea runners
Some checks failed
Release / build (amd64, darwin) (push) Successful in 15s
Release / build (arm64, darwin) (push) Successful in 5s
CI / build-and-test (push) Failing after 3s
Release / build (arm64, linux) (push) Successful in 15s
Release / release (push) Successful in 11s
Release / build (amd64, linux) (push) Successful in 9s
Release / build (amd64, windows) (push) Successful in 5s
The actions/setup-go@v5 cache feature tries to upload to GitHub's
cache infrastructure, which doesn't exist on self-hosted Gitea
runners. This causes jobs to hang indefinitely during the post step.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-10 05:30:00 -05:00
b4e90db372 Update actions-proto-go to v0.5.2
Some checks failed
CI / build-and-test (push) Waiting to run
Release / build (arm64, darwin) (push) Has been cancelled
Release / build (arm64, linux) (push) Has been cancelled
Release / release (push) Has been cancelled
Release / build (amd64, darwin) (push) Has been cancelled
Release / build (amd64, windows) (push) Has been cancelled
Release / build (amd64, linux) (push) Has been cancelled
Use corrected module path version.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-10 05:15:40 -05:00
c6b9e0c2d1 Add professional README for GitCaddy fork
Some checks are pending
CI / build-and-test (push) Waiting to run
Document capability detection, installation, configuration,
and GitCaddy integration with architecture diagram.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-10 05:03:21 -05:00
22 changed files with 2116 additions and 146 deletions

View File

@@ -7,7 +7,7 @@ on:
jobs:
build:
runs-on: ubuntu-latest
runs-on: linux-latest
strategy:
matrix:
include:
@@ -26,22 +26,26 @@ jobs:
with:
fetch-depth: 0
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: Build
env:
GOPRIVATE: git.marketally.com
VERSION: ${{ github.ref_name }}
run: |
VERSION=${GITHUB_REF_NAME#v}
# Strip the v prefix from tag
VERSION="${VERSION#v}"
EXT=""
if [ "${{ matrix.goos }}" = "windows" ]; then
EXT=".exe"
fi
echo "Building version: ${VERSION}"
CGO_ENABLED=0 GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} \
go build -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=${VERSION}" \
go build -a -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=${VERSION}" \
-o act_runner-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}
env:
GOPRIVATE: git.marketally.com
- name: Upload artifact
uses: actions/upload-artifact@v3
@@ -51,7 +55,7 @@ jobs:
release:
needs: build
runs-on: ubuntu-latest
runs-on: linux-latest
steps:
- uses: actions/checkout@v4

View File

@@ -8,13 +8,14 @@ on:
jobs:
build-and-test:
runs-on: ubuntu-latest
runs-on: linux-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: false
- name: Vet
run: make vet

1
.gitignore vendored
View File

@@ -12,3 +12,4 @@ coverage.txt
__debug_bin
# gorelease binary folder
dist
act_runner-*

121
HOWTOSTART.md Normal file
View File

@@ -0,0 +1,121 @@
# How to Start a GitCaddy Runner
This guide explains how to set up and start a GitCaddy Actions runner (act_runner) to execute your CI/CD workflows.
## Prerequisites
- A Linux, macOS, or Windows machine
- Network access to your GitCaddy/Gitea instance
- (Optional) Docker installed for container-based workflows
## Quick Start
### 1. Download the Runner
Download the latest release from the [releases page](https://git.marketally.com/gitcaddy/act_runner/releases) or build from source:
```bash
git clone https://git.marketally.com/gitcaddy/act_runner.git
cd act_runner
make build
```
### 2. Register the Runner
Get a registration token from your GitCaddy instance:
- **Global runners**: Admin Area → Actions → Runners → Create Runner
- **Organization runners**: Organization Settings → Actions → Runners
- **Repository runners**: Repository Settings → Actions → Runners
Then register:
```bash
./act_runner register --no-interactive \
--instance https://your-gitea-instance.com \
--token YOUR_REGISTRATION_TOKEN \
--name my-runner \
--labels linux,ubuntu-latest
```
### 3. Start the Runner
```bash
./act_runner daemon
```
## Configuration Options
### Runner Labels
Labels determine which jobs the runner can execute. Configure labels during registration or edit them in the admin UI.
Common labels:
- `linux`, `linux-latest` - Linux runners
- `windows`, `windows-latest` - Windows runners
- `macos`, `macos-latest` - macOS runners
- `ubuntu`, `ubuntu-latest` - Ubuntu-specific
- `self-hosted` - Self-hosted runners
### Running as a Service
#### Linux (systemd)
```bash
sudo cat > /etc/systemd/system/act_runner.service << 'SERVICE'
[Unit]
Description=GitCaddy Actions Runner
After=network.target
[Service]
Type=simple
User=runner
WorkingDirectory=/opt/act_runner
ExecStart=/opt/act_runner/act_runner daemon
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
SERVICE
sudo systemctl enable act_runner
sudo systemctl start act_runner
```
### Docker Support
For workflows that use container actions, ensure Docker is installed and the runner user has access:
```bash
sudo usermod -aG docker $USER
```
## Capabilities Detection
The runner automatically detects and reports:
- Operating system and architecture
- Available shells (bash, sh, powershell)
- Installed tools (node, python, go, etc.)
- Docker availability
- Disk space and network bandwidth
These capabilities help admins understand what each runner can handle.
## Troubleshooting
### Runner not connecting
1. Check network connectivity to your GitCaddy instance
2. Verify the registration token is valid
3. Check firewall rules allow outbound HTTPS
### Jobs not running
1. Verify runner labels match the job's `runs-on` requirement
2. Check runner is online in the admin panel
3. Review runner logs: `journalctl -u act_runner -f`
## More Information
- [act_runner Repository](https://git.marketally.com/gitcaddy/act_runner)
- [GitCaddy Documentation](https://git.marketally.com/gitcaddy/gitea)

View File

@@ -1,5 +1,5 @@
DIST := dist
EXECUTABLE := act_runner
EXECUTABLE := gitcaddy-runner
GOFMT ?= gofumpt -l
DIST_DIRS := $(DIST)/binaries $(DIST)/release
GO ?= go
@@ -15,7 +15,7 @@ WINDOWS_ARCHS ?= windows/amd64
GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
DOCKER_IMAGE ?= gitea/act_runner
DOCKER_IMAGE ?= marketally/gitcaddy-runner
DOCKER_TAG ?= nightly
DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)
DOCKER_ROOTLESS_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)-dind-rootless

221
README.md
View File

@@ -1,108 +1,195 @@
# act runner
# GitCaddy Act Runner
Act runner is a runner for Gitea based on [Gitea fork](https://gitea.com/gitea/act) of [act](https://github.com/nektos/act).
A Gitea Actions runner with enhanced capability detection and reporting for AI-friendly workflow generation.
> **This is a GitCaddy fork** of [gitea.com/gitea/act_runner](https://gitea.com/gitea/act_runner) with runner capability discovery features.
## Overview
Act Runner executes Gitea Actions workflows using [act](https://github.com/nektos/act). This fork adds automatic capability detection, enabling Gitea to expose runner capabilities via API for AI tools to query before generating workflows.
## Key Features
- **Capability Detection**: Automatically detects OS, architecture, Docker support, available shells, and installed tools
- **Capability Reporting**: Reports capabilities to Gitea server during runner declaration
- **Full Compatibility**: Drop-in replacement for standard act_runner
- **Multi-Platform**: Supports Linux, macOS, and Windows
## Installation
### Prerequisites
### Download Pre-built Binary
Docker Engine Community version is required for docker mode. To install Docker CE, follow the official [install instructions](https://docs.docker.com/engine/install/).
### Download pre-built binary
Visit [here](https://dl.gitea.com/act_runner/) and download the right version for your platform.
### Build from source
Download from [Releases](https://git.marketally.com/gitcaddy/act_runner/releases):
```bash
# Linux (amd64)
curl -L -o act_runner https://git.marketally.com/gitcaddy/act_runner/releases/download/v0.3.1-gitcaddy/act_runner-linux-amd64
chmod +x act_runner
# macOS (Apple Silicon)
curl -L -o act_runner https://git.marketally.com/gitcaddy/act_runner/releases/download/v0.3.1-gitcaddy/act_runner-darwin-arm64
chmod +x act_runner
```
### Build from Source
```bash
git clone https://git.marketally.com/gitcaddy/act_runner.git
cd act_runner
make build
```
### Build a docker image
## Quick Start
```bash
make docker
```
### 1. Enable Actions in Gitea
## Quickstart
Add to your Gitea `app.ini`:
Actions are disabled by default, so you need to add the following to the configuration file of your Gitea instance to enable it:
```ini
[actions]
ENABLED=true
ENABLED = true
```
### Register
### 2. Register the Runner
```bash
./act_runner register
./act_runner register \
--instance https://your-gitea-instance.com \
--token YOUR_RUNNER_TOKEN \
--name my-runner \
--labels ubuntu-latest,docker
```
And you will be asked to input:
1. Gitea instance URL, like `http://192.168.8.8:3000/`. You should use your gitea instance ROOT_URL as the instance argument
and you should not use `localhost` or `127.0.0.1` as instance IP;
2. Runner token, you can get it from `http://192.168.8.8:3000/admin/actions/runners`;
3. Runner name, you can just leave it blank;
4. Runner labels, you can just leave it blank.
The process looks like:
```text
INFO Registering runner, arch=amd64, os=darwin, version=0.1.5.
WARN Runner in user-mode.
INFO Enter the Gitea instance URL (for example, https://gitea.com/):
http://192.168.8.8:3000/
INFO Enter the runner token:
fe884e8027dc292970d4e0303fe82b14xxxxxxxx
INFO Enter the runner name (if set empty, use hostname: Test.local):
INFO Enter the runner labels, leave blank to use the default labels (comma-separated, for example, ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest):
INFO Registering runner, name=Test.local, instance=http://192.168.8.8:3000/, labels=[ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04 ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04].
DEBU Successfully pinged the Gitea instance server
INFO Runner registered successfully.
```
You can also register with command line arguments.
```bash
./act_runner register --instance http://192.168.8.8:3000 --token <my_runner_token> --no-interactive
```
If the registry succeed, it will run immediately. Next time, you could run the runner directly.
### Run
### 3. Start the Runner
```bash
./act_runner daemon
```
### Run with docker
On startup, the runner will:
1. Detect system capabilities (OS, arch, Docker, shells, tools)
2. Report capabilities to Gitea via the Declare API
3. Begin polling for jobs
```bash
docker run -e GITEA_INSTANCE_URL=https://your_gitea.com -e GITEA_RUNNER_REGISTRATION_TOKEN=<your_token> -v /var/run/docker.sock:/var/run/docker.sock --name my_runner gitea/act_runner:nightly
## Capability Detection
The runner automatically detects:
| Category | Examples |
|----------|----------|
| **OS/Arch** | linux/amd64, darwin/arm64, windows/amd64 |
| **Container Runtime** | Docker, Podman |
| **Shells** | bash, sh, zsh, powershell, cmd |
| **Tools** | Node.js, Go, Python, Java, .NET, Rust |
| **Features** | Cache support, Docker Compose |
### Example Capabilities JSON
```json
{
"os": "linux",
"arch": "amd64",
"docker": true,
"docker_compose": true,
"container_runtime": "docker",
"shell": ["bash", "sh"],
"tools": {
"node": ["18.19.0"],
"go": ["1.21.5"],
"python": ["3.11.6"]
},
"features": {
"cache": true,
"docker_services": true
},
"limitations": []
}
```
### Configuration
## Configuration
You can also configure the runner with a configuration file.
The configuration file is a YAML file, you can generate a sample configuration file with `./act_runner generate-config`.
Create a config file or use command-line flags:
```bash
./act_runner generate-config > config.yaml
./act_runner -c config.yaml daemon
```
You can specify the configuration file path with `-c`/`--config` argument.
Example configuration:
```yaml
log:
level: info
runner:
file: .runner
capacity: 1
timeout: 3h
labels:
- ubuntu-latest:docker://node:18-bullseye
- ubuntu-22.04:docker://ubuntu:22.04
container:
docker_host: ""
force_pull: false
privileged: false
cache:
enabled: true
dir: ~/.cache/actcache
```
## Docker Deployment
```bash
./act_runner -c config.yaml register # register with config file
./act_runner -c config.yaml daemon # run with config file
docker run -d \
--name act_runner \
-e GITEA_INSTANCE_URL=https://your-gitea.com \
-e GITEA_RUNNER_REGISTRATION_TOKEN=<token> \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ./data:/data \
gitcaddy/act_runner:latest
```
You can read the latest version of the configuration file online at [config.example.yaml](internal/pkg/config/config.example.yaml).
## GitCaddy Integration
### Example Deployments
This runner is designed to work with the [GitCaddy Gitea fork](https://git.marketally.com/gitcaddy/gitea), which provides:
Check out the [examples](examples) directory for sample deployment types.
- **Runner Capabilities API** (`/api/v2/repos/{owner}/{repo}/actions/runners/capabilities`)
- **Workflow Validation API** for pre-flight checks
- **Action Compatibility Database** for GitHub Actions mapping
### How It Works
```
act_runner Gitea AI Tool
| | |
| Declare + Capabilities | |
|---------------------------->| |
| | |
| | GET /api/v2/.../caps |
| |<------------------------|
| | |
| | Runner capabilities |
| |------------------------>|
| | |
| | Generates workflow |
| | with correct config |
```
## Related Projects
| Project | Description |
|---------|-------------|
| [gitcaddy/gitea](https://git.marketally.com/gitcaddy/gitea) | Gitea with AI-friendly enhancements |
| [gitcaddy/actions-proto-go](https://git.marketally.com/gitcaddy/actions-proto-go) | Protocol definitions with capability support |
## Upstream
This project is a fork of [gitea.com/gitea/act_runner](https://gitea.com/gitea/act_runner). We contribute enhancements back to upstream where appropriate.
## License
MIT License - see [LICENSE](LICENSE) for details.

BIN
act_runner_test Executable file
View File

Binary file not shown.

38
cmd/upload-helper/main.go Normal file
View File

@@ -0,0 +1,38 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package main
import (
"flag"
"fmt"
"os"
"gitea.com/gitea/act_runner/internal/pkg/artifact"
)
func main() {
url := flag.String("url", "", "Upload URL")
token := flag.String("token", "", "Auth token")
file := flag.String("file", "", "File to upload")
retries := flag.Int("retries", 5, "Maximum retry attempts")
flag.Parse()
if *url == "" || *token == "" || *file == "" {
fmt.Fprintf(os.Stderr, "GitCaddy Upload Helper - Reliable file uploads with retry\n\n")
fmt.Fprintf(os.Stderr, "Usage: gitcaddy-upload -url URL -token TOKEN -file FILE\n\n")
fmt.Fprintf(os.Stderr, "Options:\n")
flag.PrintDefaults()
os.Exit(1)
}
helper := artifact.NewUploadHelper()
helper.MaxRetries = *retries
if err := helper.UploadWithRetry(*url, *token, *file); err != nil {
fmt.Fprintf(os.Stderr, "Upload failed: %v\n", err)
os.Exit(1)
}
fmt.Println("Upload succeeded!")
}

7
go.mod
View File

@@ -5,7 +5,7 @@ go 1.24.0
toolchain go1.24.11
require (
code.gitea.io/actions-proto-go v0.5.0
code.gitea.io/actions-proto-go v0.5.2
code.gitea.io/gitea-vet v0.2.3
connectrpc.com/connect v1.16.2
github.com/avast/retry-go/v4 v4.6.0
@@ -23,6 +23,8 @@ require (
gotest.tools/v3 v3.5.1
)
require golang.org/x/sys v0.37.0
require (
cyphar.com/go-pathrs v0.2.1 // indirect
dario.cat/mergo v1.0.0 // indirect
@@ -96,7 +98,6 @@ require (
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/net v0.45.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/tools v0.23.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@@ -110,4 +111,4 @@ replace github.com/go-git/go-git/v5 => github.com/go-git/go-git/v5 v5.16.2
replace github.com/distribution/reference v0.6.0 => github.com/distribution/reference v0.5.0
// Use GitCaddy fork with capability support
replace code.gitea.io/actions-proto-go => git.marketally.com/gitcaddy/actions-proto-go v0.5.0
replace code.gitea.io/actions-proto-go => git.marketally.com/gitcaddy/actions-proto-go v0.5.8

6
go.sum
View File

@@ -6,8 +6,10 @@ cyphar.com/go-pathrs v0.2.1 h1:9nx1vOgwVvX1mNBWDu93+vaceedpbsDqo+XuBGL40b8=
cyphar.com/go-pathrs v0.2.1/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
git.marketally.com/gitcaddy/actions-proto-go v0.5.0 h1:D2loMnqTXiaJL6TMfOOUJz4/3Vpv0AnMDSJVuiqMNrM=
git.marketally.com/gitcaddy/actions-proto-go v0.5.0/go.mod h1:li5RzZsj1sV8a0SXzXWsGNwv0dYw7Wj829AgloZqF5o=
git.marketally.com/gitcaddy/actions-proto-go v0.5.7 h1:RUbafr3Vkw2l4WfSwa+oF+Ihakbm05W0FlAmXuQrDJc=
git.marketally.com/gitcaddy/actions-proto-go v0.5.7/go.mod h1:RPu21UoRD3zSAujoZR6LJwuVNa2uFRBveadslczCRfQ=
git.marketally.com/gitcaddy/actions-proto-go v0.5.8 h1:MBipeHvY6A0jcobvziUtzgatZTrV4fs/HE1rPQxREN4=
git.marketally.com/gitcaddy/actions-proto-go v0.5.8/go.mod h1:RPu21UoRD3zSAujoZR6LJwuVNa2uFRBveadslczCRfQ=
gitea.com/gitea/act v0.261.7-0.20251202193638-5417d3ac6742 h1:ulcquQluJbmNASkh6ina70LvcHEa9eWYfQ+DeAZ0VEE=
gitea.com/gitea/act v0.261.7-0.20251202193638-5417d3ac6742/go.mod h1:Pg5C9kQY1CEA3QjthjhlrqOC/QOT5NyWNjOjRHw23Ok=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=

View File

@@ -1,4 +1,4 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// Copyright 2022 The Gitea Authors and MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package cmd
@@ -10,14 +10,15 @@ import (
"github.com/spf13/cobra"
"gitea.com/gitea/act_runner/internal/pkg/cleanup"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
func Execute(ctx context.Context) {
// ./act_runner
// ./gitcaddy-runner
rootCmd := &cobra.Command{
Use: "act_runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
Use: "gitcaddy-runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
Args: cobra.MaximumNArgs(1),
Version: ver.Version(),
@@ -26,7 +27,7 @@ func Execute(ctx context.Context) {
configFile := ""
rootCmd.PersistentFlags().StringVarP(&configFile, "config", "c", "", "Config file path")
// ./act_runner register
// ./gitcaddy-runner register
var regArgs registerArgs
registerCmd := &cobra.Command{
Use: "register",
@@ -35,14 +36,14 @@ func Execute(ctx context.Context) {
RunE: runRegister(ctx, &regArgs, &configFile), // must use a pointer to regArgs
}
registerCmd.Flags().BoolVar(&regArgs.NoInteractive, "no-interactive", false, "Disable interactive mode")
registerCmd.Flags().StringVar(&regArgs.InstanceAddr, "instance", "", "Gitea instance address")
registerCmd.Flags().StringVar(&regArgs.InstanceAddr, "instance", "", "GitCaddy instance address")
registerCmd.Flags().StringVar(&regArgs.Token, "token", "", "Runner token")
registerCmd.Flags().StringVar(&regArgs.RunnerName, "name", "", "Runner name")
registerCmd.Flags().StringVar(&regArgs.Labels, "labels", "", "Runner tags, comma separated")
registerCmd.Flags().BoolVar(&regArgs.Ephemeral, "ephemeral", false, "Configure the runner to be ephemeral and only ever be able to pick a single job (stricter than --once)")
rootCmd.AddCommand(registerCmd)
// ./act_runner daemon
// ./gitcaddy-runner daemon
var daemArgs daemonArgs
daemonCmd := &cobra.Command{
Use: "daemon",
@@ -53,10 +54,10 @@ func Execute(ctx context.Context) {
daemonCmd.Flags().BoolVar(&daemArgs.Once, "once", false, "Run one job then exit")
rootCmd.AddCommand(daemonCmd)
// ./act_runner exec
// ./gitcaddy-runner exec
rootCmd.AddCommand(loadExecCmd(ctx))
// ./act_runner config
// ./gitcaddy-runner config
rootCmd.AddCommand(&cobra.Command{
Use: "generate-config",
Short: "Generate an example config file",
@@ -66,7 +67,7 @@ func Execute(ctx context.Context) {
},
})
// ./act_runner cache-server
// ./gitcaddy-runner cache-server
var cacheArgs cacheServerArgs
cacheCmd := &cobra.Command{
Use: "cache-server",
@@ -79,6 +80,31 @@ func Execute(ctx context.Context) {
cacheCmd.Flags().Uint16VarP(&cacheArgs.Port, "port", "p", 0, "Port of the cache server")
rootCmd.AddCommand(cacheCmd)
// ./gitcaddy-runner cleanup
cleanupCmd := &cobra.Command{
Use: "cleanup",
Short: "Manually trigger cleanup to free disk space",
Args: cobra.MaximumNArgs(0),
RunE: func(_ *cobra.Command, _ []string) error {
cfg, err := config.LoadDefault(configFile)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
result, err := cleanup.RunCleanup(ctx, cfg)
if err != nil {
return fmt.Errorf("cleanup failed: %w", err)
}
fmt.Printf("Cleanup completed: freed %d bytes, deleted %d files in %s\n", result.BytesFreed, result.FilesDeleted, result.Duration)
if len(result.Errors) > 0 {
fmt.Printf("Warnings: %d errors occurred\n", len(result.Errors))
for _, e := range result.Errors {
fmt.Printf(" - %s\n", e)
}
}
return nil
},
}
rootCmd.AddCommand(cleanupCmd)
// hide completion command
rootCmd.CompletionOptions.HiddenDefaultCmd = true

View File

@@ -30,6 +30,20 @@ import (
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
const (
// DiskSpaceWarningThreshold is the percentage at which to warn about low disk space
DiskSpaceWarningThreshold = 85.0
// DiskSpaceCriticalThreshold is the percentage at which to log critical warnings
DiskSpaceCriticalThreshold = 95.0
// CapabilitiesUpdateInterval is how often to update capabilities (including disk space)
CapabilitiesUpdateInterval = 5 * time.Minute
// BandwidthTestInterval is how often to run bandwidth tests (hourly)
BandwidthTestInterval = 1 * time.Hour
)
// Global bandwidth manager - accessible for triggering manual tests
var bandwidthManager *envcheck.BandwidthManager
func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
cfg, err := config.LoadDefault(*configFile)
@@ -143,14 +157,25 @@ func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) fu
dockerHost = dh
}
}
capabilities := envcheck.DetectCapabilities(ctx, dockerHost)
// Initialize bandwidth manager with the Gitea server address
bandwidthManager = envcheck.NewBandwidthManager(reg.Address, BandwidthTestInterval)
bandwidthManager.Start(ctx)
log.Infof("bandwidth manager started, testing against: %s", reg.Address)
capabilities := envcheck.DetectCapabilities(ctx, dockerHost, cfg.Container.WorkdirParent)
// Include initial bandwidth result if available
capabilities.Bandwidth = bandwidthManager.GetLastResult()
capabilitiesJson := capabilities.ToJSON()
log.Infof("detected capabilities: %s", capabilitiesJson)
// Check disk space and warn if low
checkDiskSpaceWarnings(capabilities)
// declare the labels of the runner before fetching tasks
resp, err := runner.Declare(ctx, ls.Names(), capabilitiesJson)
if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented {
log.Errorf("Your Gitea version is too old to support runner declare, please upgrade to v1.21 or later")
log.Errorf("Your GitCaddy version is too old to support runner declare, please upgrade to v1.21 or later")
return err
} else if err != nil {
log.WithError(err).Error("fail to invoke Declare")
@@ -160,7 +185,24 @@ func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) fu
resp.Msg.Runner.Name, resp.Msg.Runner.Version, resp.Msg.Runner.Labels)
}
// Start periodic capabilities update goroutine
go periodicCapabilitiesUpdate(ctx, runner, ls.Names(), dockerHost, cfg.Container.WorkdirParent)
// Start periodic stale job cache cleanup (every hour, remove caches older than 2 hours)
go func() {
ticker := time.NewTicker(1 * time.Hour)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
runner.CleanStaleJobCaches(2 * time.Hour)
}
}
}()
poller := poll.New(cfg, cli, runner)
poller.SetBandwidthManager(bandwidthManager)
if daemArgs.Once || reg.Ephemeral {
done := make(chan struct{})
@@ -194,6 +236,67 @@ func runDaemon(ctx context.Context, daemArgs *daemonArgs, configFile *string) fu
}
}
// checkDiskSpaceWarnings logs warnings if disk space is low
func checkDiskSpaceWarnings(capabilities *envcheck.RunnerCapabilities) {
if capabilities.Disk == nil {
return
}
usedPercent := capabilities.Disk.UsedPercent
freeGB := float64(capabilities.Disk.Free) / (1024 * 1024 * 1024)
if usedPercent >= DiskSpaceCriticalThreshold {
log.Errorf("CRITICAL: Disk space critically low! %.1f%% used, only %.2f GB free. Runner may fail to execute jobs!", usedPercent, freeGB)
} else if usedPercent >= DiskSpaceWarningThreshold {
log.Warnf("WARNING: Disk space running low. %.1f%% used, %.2f GB free. Consider cleaning up disk space.", usedPercent, freeGB)
}
}
// periodicCapabilitiesUpdate periodically updates capabilities including disk space and bandwidth
func periodicCapabilitiesUpdate(ctx context.Context, runner *run.Runner, labelNames []string, dockerHost string, workingDir string) {
ticker := time.NewTicker(CapabilitiesUpdateInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
log.Debug("stopping periodic capabilities update")
if bandwidthManager != nil {
bandwidthManager.Stop()
}
return
case <-ticker.C:
// Detect updated capabilities (disk space changes over time)
capabilities := envcheck.DetectCapabilities(ctx, dockerHost, workingDir)
// Include latest bandwidth result
if bandwidthManager != nil {
capabilities.Bandwidth = bandwidthManager.GetLastResult()
}
capabilitiesJson := capabilities.ToJSON()
// Check for disk space warnings
checkDiskSpaceWarnings(capabilities)
// Send updated capabilities to server
_, err := runner.Declare(ctx, labelNames, capabilitiesJson)
if err != nil {
log.WithError(err).Debug("failed to update capabilities")
} else {
bandwidthInfo := ""
if capabilities.Bandwidth != nil {
bandwidthInfo = fmt.Sprintf(", bandwidth: %.1f Mbps", capabilities.Bandwidth.DownloadMbps)
}
log.Debugf("capabilities updated: disk %.1f%% used, %.2f GB free%s",
capabilities.Disk.UsedPercent,
float64(capabilities.Disk.Free)/(1024*1024*1024),
bandwidthInfo)
}
}
}
}
type daemonArgs struct {
Once bool
}

View File

@@ -505,7 +505,7 @@ func loadExecCmd(ctx context.Context) *cobra.Command {
execCmd.PersistentFlags().BoolVarP(&execArg.dryrun, "dryrun", "n", false, "dryrun mode")
execCmd.PersistentFlags().StringVarP(&execArg.image, "image", "i", "docker.gitea.com/runner-images:ubuntu-latest", "Docker image to use. Use \"-self-hosted\" to run directly on the host.")
execCmd.PersistentFlags().StringVarP(&execArg.network, "network", "", "", "Specify the network to which the container will connect")
execCmd.PersistentFlags().StringVarP(&execArg.githubInstance, "gitea-instance", "", "", "Gitea instance to use.")
execCmd.PersistentFlags().StringVarP(&execArg.githubInstance, "gitea-instance", "", "", "GitCaddy instance to use.")
return execCmd
}

View File

@@ -272,7 +272,7 @@ func printStageHelp(stage registerStage) {
case StageOverwriteLocalConfig:
log.Infoln("Runner is already registered, overwrite local config? [y/N]")
case StageInputInstance:
log.Infoln("Enter the Gitea instance URL (for example, https://gitea.com/):")
log.Infoln("Enter the GitCaddy instance URL (for example, https://gitea.com/):")
case StageInputToken:
log.Infoln("Enter the runner token:")
case StageInputRunnerName:
@@ -341,7 +341,7 @@ func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs)
}
if err != nil {
log.WithError(err).
Errorln("Cannot ping the Gitea instance server")
Errorln("Cannot ping the GitCaddy instance server")
// TODO: if ping failed, retry or exit
time.Sleep(time.Second)
} else {

View File

@@ -1,4 +1,4 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// Copyright 2023 The Gitea Authors and MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package poll
@@ -16,15 +16,18 @@ import (
"golang.org/x/time/rate"
"gitea.com/gitea/act_runner/internal/app/run"
"gitea.com/gitea/act_runner/internal/pkg/cleanup"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/envcheck"
)
type Poller struct {
client client.Client
runner *run.Runner
cfg *config.Config
tasksVersion atomic.Int64 // tasksVersion used to store the version of the last task fetched from the Gitea.
client client.Client
runner *run.Runner
cfg *config.Config
tasksVersion atomic.Int64 // tasksVersion used to store the version of the last task fetched from the Gitea.
bandwidthManager *envcheck.BandwidthManager
pollingCtx context.Context
shutdownPolling context.CancelFunc
@@ -57,6 +60,11 @@ func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
}
}
// SetBandwidthManager sets the bandwidth manager for on-demand testing
func (p *Poller) SetBandwidthManager(bm *envcheck.BandwidthManager) {
p.bandwidthManager = bm
}
func (p *Poller) Poll() {
limiter := rate.NewLimiter(rate.Every(p.cfg.Runner.FetchInterval), 1)
wg := &sync.WaitGroup{}
@@ -157,11 +165,23 @@ func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
reqCtx, cancel := context.WithTimeout(ctx, p.cfg.Runner.FetchTimeout)
defer cancel()
// Detect capabilities including current disk space
caps := envcheck.DetectCapabilities(ctx, p.cfg.Container.DockerHost, p.cfg.Container.WorkdirParent)
// Include latest bandwidth result if available
if p.bandwidthManager != nil {
caps.Bandwidth = p.bandwidthManager.GetLastResult()
}
capsJson := caps.ToJSON()
// Load the version value that was in the cache when the request was sent.
v := p.tasksVersion.Load()
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{
TasksVersion: v,
}))
fetchReq := &runnerv1.FetchTaskRequest{
TasksVersion: v,
CapabilitiesJson: capsJson,
}
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(fetchReq))
if errors.Is(err, context.DeadlineExceeded) {
err = nil
}
@@ -174,6 +194,32 @@ func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
return nil, false
}
// Check if server requested a bandwidth test
if resp.Msg.RequestBandwidthTest && p.bandwidthManager != nil {
log.Info("Server requested bandwidth test, running now...")
go func() {
result := p.bandwidthManager.RunTest(ctx)
if result != nil {
log.Infof("Bandwidth test completed: %.1f Mbps download, %.0f ms latency",
result.DownloadMbps, result.Latency)
}
}()
}
// Check if server requested a cleanup
if resp.Msg.RequestCleanup {
log.Info("Server requested cleanup, running now...")
go func() {
result, err := cleanup.RunCleanup(ctx, p.cfg)
if err != nil {
log.Errorf("Cleanup failed: %v", err)
} else if result != nil {
log.Infof("Cleanup completed: freed %d bytes, deleted %d files in %s",
result.BytesFreed, result.FilesDeleted, result.Duration)
}
}()
}
if resp.Msg.TasksVersion > v {
p.tasksVersion.CompareAndSwap(v, resp.Msg.TasksVersion)
}
@@ -182,7 +228,7 @@ func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
return nil, false
}
// got a task, set `tasksVersion` to zero to focre query db in next request.
// got a task, set tasksVersion to zero to force query db in next request.
p.tasksVersion.CompareAndSwap(resp.Msg.TasksVersion, 0)
return resp.Msg.Task, true

View File

@@ -7,6 +7,7 @@ import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
@@ -41,6 +42,48 @@ type Runner struct {
runningTasks sync.Map
}
// getJobCacheDir returns a job-isolated cache directory
func (r *Runner) getJobCacheDir(taskID int64) string {
return filepath.Join(r.cfg.Host.WorkdirParent, "jobs", fmt.Sprintf("%d", taskID))
}
// cleanupJobCache removes the job-specific cache directory after completion
func (r *Runner) cleanupJobCache(taskID int64) {
jobCacheDir := r.getJobCacheDir(taskID)
if err := os.RemoveAll(jobCacheDir); err != nil {
log.Warnf("failed to cleanup job cache %s: %v", jobCacheDir, err)
} else {
log.Infof("cleaned up job cache: %s", jobCacheDir)
}
}
// CleanStaleJobCaches removes job cache directories older than maxAge
func (r *Runner) CleanStaleJobCaches(maxAge time.Duration) {
jobsDir := filepath.Join(r.cfg.Host.WorkdirParent, "jobs")
entries, err := os.ReadDir(jobsDir)
if err != nil {
return // directory may not exist yet
}
cutoff := time.Now().Add(-maxAge)
for _, entry := range entries {
if !entry.IsDir() {
continue
}
info, err := entry.Info()
if err != nil {
continue
}
if info.ModTime().Before(cutoff) {
jobPath := filepath.Join(jobsDir, entry.Name())
if err := os.RemoveAll(jobPath); err != nil {
log.Warnf("failed to remove stale job cache %s: %v", jobPath, err)
} else {
log.Infof("evicted stale job cache: %s", jobPath)
}
}
}
}
func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) *Runner {
ls := labels.Labels{}
for _, v := range reg.Labels {
@@ -95,6 +138,7 @@ func (r *Runner) Run(ctx context.Context, task *runnerv1.Task) error {
}
r.runningTasks.Store(task.Id, struct{}{})
defer r.runningTasks.Delete(task.Id)
defer r.cleanupJobCache(task.Id)
ctx, cancel := context.WithTimeout(ctx, r.cfg.Runner.Timeout)
defer cancel()
@@ -197,19 +241,30 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
maxLifetime = time.Until(deadline)
}
// Create job-specific environment with isolated cache directories
jobCacheDir := r.getJobCacheDir(task.Id)
jobEnvs := make(map[string]string, len(r.envs)+2)
for k, v := range r.envs {
jobEnvs[k] = v
}
// Isolate golangci-lint cache to prevent parallel job conflicts
jobEnvs["GOLANGCI_LINT_CACHE"] = filepath.Join(jobCacheDir, "golangci-lint")
// Set XDG_CACHE_HOME to isolate other tools that respect it
jobEnvs["XDG_CACHE_HOME"] = jobCacheDir
runnerConfig := &runner.Config{
// On Linux, Workdir will be like "/<parent_directory>/<owner>/<repo>"
// On Windows, Workdir will be like "\<parent_directory>\<owner>\<repo>"
Workdir: filepath.FromSlash(fmt.Sprintf("/%s/%s", strings.TrimLeft(r.cfg.Container.WorkdirParent, "/"), preset.Repository)),
BindWorkdir: false,
ActionCacheDir: filepath.FromSlash(r.cfg.Host.WorkdirParent),
ActionCacheDir: filepath.FromSlash(jobCacheDir),
ReuseContainers: false,
ForcePull: r.cfg.Container.ForcePull,
ForceRebuild: r.cfg.Container.ForceRebuild,
LogOutput: true,
JSONLogger: false,
Env: r.envs,
Env: jobEnvs,
Secrets: task.Secrets,
GitHubInstance: strings.TrimSuffix(r.client.Address(), "/"),
AutoRemove: true,

View File

@@ -0,0 +1,145 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package artifact
import (
"bytes"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"time"
log "github.com/sirupsen/logrus"
)
// UploadHelper handles reliable file uploads with retry logic
type UploadHelper struct {
MaxRetries int
RetryDelay time.Duration
ChunkSize int64
ConnectTimeout time.Duration
MaxTimeout time.Duration
}
// NewUploadHelper creates a new upload helper with sensible defaults
func NewUploadHelper() *UploadHelper {
return &UploadHelper{
MaxRetries: 5,
RetryDelay: 10 * time.Second,
ChunkSize: 10 * 1024 * 1024, // 10MB
ConnectTimeout: 120 * time.Second,
MaxTimeout: 3600 * time.Second,
}
}
// UploadWithRetry uploads a file with automatic retry on failure
func (u *UploadHelper) UploadWithRetry(url, token, filepath string) error {
client := &http.Client{
Timeout: u.MaxTimeout,
Transport: &http.Transport{
MaxIdleConns: 10,
MaxIdleConnsPerHost: 5,
IdleConnTimeout: 90 * time.Second,
DisableKeepAlives: false, // Keep connections alive
ForceAttemptHTTP2: false, // Use HTTP/1.1 for large uploads
},
}
var lastErr error
for attempt := 0; attempt < u.MaxRetries; attempt++ {
if attempt > 0 {
delay := u.RetryDelay * time.Duration(attempt)
log.Infof("Upload attempt %d/%d, waiting %v before retry...", attempt+1, u.MaxRetries, delay)
time.Sleep(delay)
}
// Pre-resolve DNS / warm connection
if err := u.prewarmConnection(url); err != nil {
lastErr = fmt.Errorf("connection prewarm failed: %w", err)
log.Warnf("Prewarm failed: %v", err)
continue
}
// Attempt upload
if err := u.doUpload(client, url, token, filepath); err != nil {
lastErr = err
log.Warnf("Upload attempt %d failed: %v", attempt+1, err)
continue
}
log.Infof("Upload succeeded on attempt %d", attempt+1)
return nil // Success
}
return fmt.Errorf("upload failed after %d attempts: %w", u.MaxRetries, lastErr)
}
// prewarmConnection establishes a connection to help with DNS and TCP setup
func (u *UploadHelper) prewarmConnection(url string) error {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return err
}
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// doUpload performs the actual file upload
func (u *UploadHelper) doUpload(client *http.Client, url, token, filepath string) error {
file, err := os.Open(filepath)
if err != nil {
return fmt.Errorf("failed to open file: %w", err)
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return fmt.Errorf("failed to stat file: %w", err)
}
log.Infof("Uploading %s (%d bytes) to %s", filepath, stat.Size(), url)
// Create multipart form
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("attachment", stat.Name())
if err != nil {
return fmt.Errorf("failed to create form file: %w", err)
}
if _, err := io.Copy(part, file); err != nil {
return fmt.Errorf("failed to copy file to form: %w", err)
}
writer.Close()
req, err := http.NewRequest("POST", url, body)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", token))
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Connection", "keep-alive")
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("upload request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
respBody, _ := io.ReadAll(resp.Body)
return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(respBody))
}
log.Infof("Upload completed successfully, status: %d", resp.StatusCode)
return nil
}

View File

@@ -0,0 +1,355 @@
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package cleanup
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"gitea.com/gitea/act_runner/internal/pkg/config"
log "github.com/sirupsen/logrus"
)
// CleanupResult contains the results of a cleanup operation
type CleanupResult struct {
BytesFreed int64
FilesDeleted int
Errors []error
Duration time.Duration
}
// RunCleanup performs cleanup operations to free disk space
func RunCleanup(ctx context.Context, cfg *config.Config) (*CleanupResult, error) {
start := time.Now()
result := &CleanupResult{}
log.Info("Starting runner cleanup...")
// 1. Clean old cache directories
cacheDir := filepath.Join(cfg.Cache.Dir, "_cache")
if cacheDir != "" {
if bytes, files, err := cleanOldDir(cacheDir, 24*time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("cache cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned cache: freed %d bytes, deleted %d files", bytes, files)
}
}
// 2. Clean old work directories
workDir := cfg.Container.WorkdirParent
if workDir != "" {
if bytes, files, err := cleanOldWorkDirs(workDir, 48*time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("workdir cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned work dirs: freed %d bytes, deleted %d files", bytes, files)
}
}
// 3. Clean old artifact staging directories
artifactDir := cfg.Cache.Dir
if bytes, files, err := cleanOldArtifacts(artifactDir, 72*time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("artifact cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned artifacts: freed %d bytes, deleted %d files", bytes, files)
}
// 4. Clean system temp files (older than 24h)
if bytes, files, err := cleanTempDir(24 * time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("temp cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned temp: freed %d bytes, deleted %d files", bytes, files)
}
// 5. Clean build tool caches (older than 7 days)
// These can grow very large from Go, npm, nuget, gradle, maven builds
if bytes, files, err := cleanBuildCaches(7 * 24 * time.Hour); err != nil {
result.Errors = append(result.Errors, fmt.Errorf("build cache cleanup: %w", err))
} else {
result.BytesFreed += bytes
result.FilesDeleted += files
log.Infof("Cleaned build caches: freed %d bytes, deleted %d files", bytes, files)
}
result.Duration = time.Since(start)
log.Infof("Cleanup completed: freed %s in %s", formatBytes(result.BytesFreed), result.Duration)
return result, nil
}
// cleanOldDir removes files older than maxAge from a directory
func cleanOldDir(dir string, maxAge time.Duration) (int64, int, error) {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return 0, 0, nil
}
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Skip errors
}
if info.IsDir() {
return nil
}
if info.ModTime().Before(cutoff) {
size := info.Size()
if err := os.Remove(path); err == nil {
bytesFreed += size
filesDeleted++
}
}
return nil
})
return bytesFreed, filesDeleted, err
}
// cleanOldWorkDirs removes work directories older than maxAge
func cleanOldWorkDirs(baseDir string, maxAge time.Duration) (int64, int, error) {
if _, err := os.Stat(baseDir); os.IsNotExist(err) {
return 0, 0, nil
}
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
entries, err := os.ReadDir(baseDir)
if err != nil {
return 0, 0, err
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
path := filepath.Join(baseDir, entry.Name())
info, err := entry.Info()
if err != nil {
continue
}
if info.ModTime().Before(cutoff) {
size := dirSize(path)
if err := os.RemoveAll(path); err == nil {
bytesFreed += size
filesDeleted++
log.Debugf("Removed old work dir: %s", path)
}
}
}
return bytesFreed, filesDeleted, nil
}
// cleanOldArtifacts removes artifact staging files older than maxAge
func cleanOldArtifacts(baseDir string, maxAge time.Duration) (int64, int, error) {
if _, err := os.Stat(baseDir); os.IsNotExist(err) {
return 0, 0, nil
}
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
// Look for artifact staging dirs
patterns := []string{"artifact-*", "upload-*", "download-*"}
for _, pattern := range patterns {
matches, _ := filepath.Glob(filepath.Join(baseDir, pattern))
for _, path := range matches {
info, err := os.Stat(path)
if err != nil {
continue
}
if info.ModTime().Before(cutoff) {
var size int64
if info.IsDir() {
size = dirSize(path)
err = os.RemoveAll(path)
} else {
size = info.Size()
err = os.Remove(path)
}
if err == nil {
bytesFreed += size
filesDeleted++
}
}
}
}
return bytesFreed, filesDeleted, nil
}
// cleanTempDir removes old files from system temp directory
func cleanTempDir(maxAge time.Duration) (int64, int, error) {
tmpDir := os.TempDir()
var bytesFreed int64
var filesDeleted int
cutoff := time.Now().Add(-maxAge)
entries, err := os.ReadDir(tmpDir)
if err != nil {
return 0, 0, err
}
// Only clean files/dirs that look like runner/act artifacts
runnerPatterns := []string{"act-", "runner-", "gitea-", "workflow-"}
for _, entry := range entries {
name := entry.Name()
isRunner := false
for _, p := range runnerPatterns {
if len(name) >= len(p) && name[:len(p)] == p {
isRunner = true
break
}
}
if !isRunner {
continue
}
path := filepath.Join(tmpDir, name)
info, err := entry.Info()
if err != nil {
continue
}
if info.ModTime().Before(cutoff) {
var size int64
if info.IsDir() {
size = dirSize(path)
err = os.RemoveAll(path)
} else {
size = info.Size()
err = os.Remove(path)
}
if err == nil {
bytesFreed += size
filesDeleted++
}
}
}
return bytesFreed, filesDeleted, nil
}
// dirSize calculates the total size of a directory
func dirSize(path string) int64 {
var size int64
filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if !info.IsDir() {
size += info.Size()
}
return nil
})
return size
}
// cleanBuildCaches removes old build tool caches that accumulate from CI jobs
// These are cleaned more aggressively (files older than 7 days) since they can grow very large
func cleanBuildCaches(maxAge time.Duration) (int64, int, error) {
home := os.Getenv("HOME")
if home == "" {
home = "/root" // fallback for runners typically running as root
}
var totalBytesFreed int64
var totalFilesDeleted int
// Build cache directories to clean
// Format: {path, description}
cacheDirs := []struct {
path string
desc string
}{
{filepath.Join(home, ".cache", "go-build"), "Go build cache"},
{filepath.Join(home, ".cache", "golangci-lint"), "golangci-lint cache"},
{filepath.Join(home, ".npm", "_cacache"), "npm cache"},
{filepath.Join(home, ".cache", "pnpm"), "pnpm cache"},
{filepath.Join(home, ".cache", "yarn"), "yarn cache"},
{filepath.Join(home, ".nuget", "packages"), "NuGet cache"},
{filepath.Join(home, ".gradle", "caches"), "Gradle cache"},
{filepath.Join(home, ".m2", "repository"), "Maven cache"},
{filepath.Join(home, ".cache", "pip"), "pip cache"},
{filepath.Join(home, ".cargo", "registry", "cache"), "Cargo cache"},
{filepath.Join(home, ".rustup", "tmp"), "Rustup temp"},
}
cutoff := time.Now().Add(-maxAge)
for _, cache := range cacheDirs {
if _, err := os.Stat(cache.path); os.IsNotExist(err) {
continue
}
var bytesFreed int64
var filesDeleted int
err := filepath.Walk(cache.path, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Skip errors
}
if info.IsDir() {
return nil
}
if info.ModTime().Before(cutoff) {
size := info.Size()
if err := os.Remove(path); err == nil {
bytesFreed += size
filesDeleted++
}
}
return nil
})
if err == nil && (bytesFreed > 0 || filesDeleted > 0) {
log.Infof("Cleaned %s: freed %s, deleted %d files", cache.desc, formatBytes(bytesFreed), filesDeleted)
totalBytesFreed += bytesFreed
totalFilesDeleted += filesDeleted
}
// Also remove empty directories
filepath.Walk(cache.path, func(path string, info os.FileInfo, err error) error {
if err != nil || !info.IsDir() || path == cache.path {
return nil
}
entries, _ := os.ReadDir(path)
if len(entries) == 0 {
os.Remove(path)
}
return nil
})
}
return totalBytesFreed, totalFilesDeleted, nil
}
// formatBytes formats bytes into human readable string
func formatBytes(bytes int64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}

View File

@@ -0,0 +1,209 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package envcheck
import (
"context"
"fmt"
"io"
"net/http"
"sync"
"time"
)
// BandwidthInfo holds network bandwidth test results
type BandwidthInfo struct {
DownloadMbps float64 `json:"download_mbps"`
UploadMbps float64 `json:"upload_mbps,omitempty"`
Latency float64 `json:"latency_ms,omitempty"`
TestedAt time.Time `json:"tested_at"`
}
// BandwidthManager handles periodic bandwidth testing
type BandwidthManager struct {
serverURL string
lastResult *BandwidthInfo
mu sync.RWMutex
testInterval time.Duration
stopChan chan struct{}
}
// NewBandwidthManager creates a new bandwidth manager
func NewBandwidthManager(serverURL string, testInterval time.Duration) *BandwidthManager {
return &BandwidthManager{
serverURL: serverURL,
testInterval: testInterval,
stopChan: make(chan struct{}),
}
}
// Start begins periodic bandwidth testing
func (bm *BandwidthManager) Start(ctx context.Context) {
// Run initial test
bm.RunTest(ctx)
// Start periodic testing
go func() {
ticker := time.NewTicker(bm.testInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
bm.RunTest(ctx)
case <-bm.stopChan:
return
case <-ctx.Done():
return
}
}
}()
}
// Stop stops the periodic testing
func (bm *BandwidthManager) Stop() {
close(bm.stopChan)
}
// RunTest runs a bandwidth test and stores the result
func (bm *BandwidthManager) RunTest(ctx context.Context) *BandwidthInfo {
result := TestBandwidth(ctx, bm.serverURL)
bm.mu.Lock()
bm.lastResult = result
bm.mu.Unlock()
return result
}
// GetLastResult returns the most recent bandwidth test result
func (bm *BandwidthManager) GetLastResult() *BandwidthInfo {
bm.mu.RLock()
defer bm.mu.RUnlock()
return bm.lastResult
}
// TestBandwidth tests network bandwidth to the GitCaddy server
func TestBandwidth(ctx context.Context, serverURL string) *BandwidthInfo {
if serverURL == "" {
return nil
}
info := &BandwidthInfo{
TestedAt: time.Now(),
}
// Test latency first
info.Latency = testLatency(ctx, serverURL)
// Test download speed
info.DownloadMbps = testDownloadSpeed(ctx, serverURL)
return info
}
func testLatency(ctx context.Context, serverURL string) float64 {
client := &http.Client{
Timeout: 10 * time.Second,
}
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(reqCtx, "HEAD", serverURL, nil)
if err != nil {
return 0
}
start := time.Now()
resp, err := client.Do(req)
if err != nil {
return 0
}
resp.Body.Close()
latency := time.Since(start).Seconds() * 1000 // Convert to ms
return float64(int(latency*100)) / 100 // Round to 2 decimals
}
func testDownloadSpeed(ctx context.Context, serverURL string) float64 {
// Try multiple endpoints to accumulate ~1MB of data
endpoints := []string{
"/assets/css/index.css",
"/assets/js/index.js",
"/assets/img/logo.svg",
"/assets/img/logo.png",
"/",
}
client := &http.Client{
Timeout: 30 * time.Second,
}
var totalBytes int64
var totalDuration time.Duration
targetBytes := int64(1024 * 1024) // 1MB target
maxAttempts := 10 // Limit iterations
for attempt := 0; attempt < maxAttempts && totalBytes < targetBytes; attempt++ {
for _, endpoint := range endpoints {
if totalBytes >= targetBytes {
break
}
url := serverURL + endpoint
reqCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
req, err := http.NewRequestWithContext(reqCtx, "GET", url, nil)
if err != nil {
cancel()
continue
}
start := time.Now()
resp, err := client.Do(req)
if err != nil {
cancel()
continue
}
n, _ := io.Copy(io.Discard, resp.Body)
resp.Body.Close()
cancel()
duration := time.Since(start)
if n > 0 {
totalBytes += n
totalDuration += duration
}
}
}
if totalBytes == 0 || totalDuration == 0 {
return 0
}
// Calculate speed in Mbps
seconds := totalDuration.Seconds()
if seconds == 0 {
return 0
}
bytesPerSecond := float64(totalBytes) / seconds
mbps := (bytesPerSecond * 8) / (1024 * 1024)
return float64(int(mbps*100)) / 100
}
// FormatBandwidth formats bandwidth for display
func FormatBandwidth(mbps float64) string {
if mbps == 0 {
return "Unknown"
}
if mbps >= 1000 {
return fmt.Sprintf("%.1f Gbps", mbps/1000)
}
return fmt.Sprintf("%.1f Mbps", mbps)
}

View File

@@ -1,12 +1,15 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// Copyright 2026 MarketAlly. All rights reserved.
// SPDX-License-Identifier: MIT
package envcheck
import (
"bufio"
"context"
"encoding/json"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
@@ -14,17 +17,58 @@ import (
"github.com/docker/docker/client"
)
// DiskInfo holds disk space information
type DiskInfo struct {
Path string `json:"path,omitempty"` // Path being checked (working directory)
Total uint64 `json:"total_bytes"`
Free uint64 `json:"free_bytes"`
Used uint64 `json:"used_bytes"`
UsedPercent float64 `json:"used_percent"`
}
// CPUInfo holds CPU load information
type CPUInfo struct {
NumCPU int `json:"num_cpu"` // Number of logical CPUs
LoadAvg1m float64 `json:"load_avg_1m"` // 1-minute load average
LoadAvg5m float64 `json:"load_avg_5m"` // 5-minute load average
LoadAvg15m float64 `json:"load_avg_15m"` // 15-minute load average
LoadPercent float64 `json:"load_percent"` // (load_avg_1m / num_cpu) * 100
}
// DistroInfo holds Linux distribution information
type DistroInfo struct {
ID string `json:"id,omitempty"` // e.g., "ubuntu", "debian", "fedora"
VersionID string `json:"version_id,omitempty"` // e.g., "24.04", "12"
PrettyName string `json:"pretty_name,omitempty"` // e.g., "Ubuntu 24.04 LTS"
}
// XcodeInfo holds Xcode and iOS development information
type XcodeInfo struct {
Version string `json:"version,omitempty"`
Build string `json:"build,omitempty"`
SDKs []string `json:"sdks,omitempty"` // e.g., ["iOS 17.0", "macOS 14.0"]
Simulators []string `json:"simulators,omitempty"` // Available iOS simulators
}
// RunnerCapabilities represents the capabilities of a runner for AI consumption
type RunnerCapabilities struct {
OS string `json:"os"`
Arch string `json:"arch"`
Distro *DistroInfo `json:"distro,omitempty"`
Xcode *XcodeInfo `json:"xcode,omitempty"`
Docker bool `json:"docker"`
DockerCompose bool `json:"docker_compose"`
ContainerRuntime string `json:"container_runtime,omitempty"`
Shell []string `json:"shell,omitempty"`
Tools map[string][]string `json:"tools,omitempty"`
BuildTools []string `json:"build_tools,omitempty"` // Available build/installer tools
PackageManagers []string `json:"package_managers,omitempty"`
Features *CapabilityFeatures `json:"features,omitempty"`
Limitations []string `json:"limitations,omitempty"`
Disk *DiskInfo `json:"disk,omitempty"`
CPU *CPUInfo `json:"cpu,omitempty"`
Bandwidth *BandwidthInfo `json:"bandwidth,omitempty"`
SuggestedLabels []string `json:"suggested_labels,omitempty"`
}
// CapabilityFeatures represents feature support flags
@@ -36,12 +80,15 @@ type CapabilityFeatures struct {
}
// DetectCapabilities detects the runner's capabilities
func DetectCapabilities(ctx context.Context, dockerHost string) *RunnerCapabilities {
// workingDir is the directory where builds will run (for disk space detection)
func DetectCapabilities(ctx context.Context, dockerHost string, workingDir string) *RunnerCapabilities {
cap := &RunnerCapabilities{
OS: runtime.GOOS,
Arch: runtime.GOARCH,
Tools: make(map[string][]string),
Shell: detectShells(),
OS: runtime.GOOS,
Arch: runtime.GOARCH,
Tools: make(map[string][]string),
BuildTools: []string{},
PackageManagers: []string{},
Shell: detectShells(),
Features: &CapabilityFeatures{
ArtifactsV4: false, // Gitea doesn't support v4 artifacts
Cache: true,
@@ -54,6 +101,16 @@ func DetectCapabilities(ctx context.Context, dockerHost string) *RunnerCapabilit
},
}
// Detect Linux distribution
if runtime.GOOS == "linux" {
cap.Distro = detectLinuxDistro()
}
// Detect macOS Xcode/iOS
if runtime.GOOS == "darwin" {
cap.Xcode = detectXcode(ctx)
}
// Detect Docker
cap.Docker, cap.ContainerRuntime = detectDocker(ctx, dockerHost)
if cap.Docker {
@@ -64,9 +121,218 @@ func DetectCapabilities(ctx context.Context, dockerHost string) *RunnerCapabilit
// Detect common tools
detectTools(ctx, cap)
// Detect build tools
detectBuildTools(ctx, cap)
// Detect package managers
detectPackageManagers(ctx, cap)
// Detect disk space on the working directory's filesystem
cap.Disk = detectDiskSpace(workingDir)
// Detect CPU load
cap.CPU = detectCPULoad()
// Generate suggested labels based on detected capabilities
cap.SuggestedLabels = generateSuggestedLabels(cap)
return cap
}
// detectXcode detects Xcode and iOS development capabilities on macOS
func detectXcode(ctx context.Context) *XcodeInfo {
timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
// Check for xcodebuild
cmd := exec.CommandContext(timeoutCtx, "xcodebuild", "-version")
output, err := cmd.Output()
if err != nil {
return nil
}
xcode := &XcodeInfo{}
lines := strings.Split(string(output), "\n")
for _, line := range lines {
if strings.HasPrefix(line, "Xcode ") {
xcode.Version = strings.TrimPrefix(line, "Xcode ")
} else if strings.HasPrefix(line, "Build version ") {
xcode.Build = strings.TrimPrefix(line, "Build version ")
}
}
// Get available SDKs
cmd = exec.CommandContext(timeoutCtx, "xcodebuild", "-showsdks")
output, err = cmd.Output()
if err == nil {
lines = strings.Split(string(output), "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
// Look for SDK lines like "-sdk iphoneos17.0" or "iOS 17.0"
if strings.Contains(line, "SDK") || strings.HasPrefix(line, "-sdk") {
continue // Skip header lines
}
if strings.Contains(line, "iOS") || strings.Contains(line, "macOS") ||
strings.Contains(line, "watchOS") || strings.Contains(line, "tvOS") ||
strings.Contains(line, "visionOS") || strings.Contains(line, "xrOS") {
// Extract SDK name
if idx := strings.Index(line, "-sdk"); idx != -1 {
sdkPart := strings.TrimSpace(line[:idx])
if sdkPart != "" {
xcode.SDKs = append(xcode.SDKs, sdkPart)
}
}
}
}
}
// Get available simulators
cmd = exec.CommandContext(timeoutCtx, "xcrun", "simctl", "list", "devices", "available", "-j")
output, err = cmd.Output()
if err == nil {
var simData struct {
Devices map[string][]struct {
Name string `json:"name"`
State string `json:"state"`
} `json:"devices"`
}
if json.Unmarshal(output, &simData) == nil {
seen := make(map[string]bool)
for runtime, devices := range simData.Devices {
if strings.Contains(runtime, "iOS") {
for _, dev := range devices {
key := dev.Name
if !seen[key] {
seen[key] = true
xcode.Simulators = append(xcode.Simulators, dev.Name)
}
}
}
}
}
}
if xcode.Version == "" {
return nil
}
return xcode
}
// detectLinuxDistro reads /etc/os-release to get distribution info
func detectLinuxDistro() *DistroInfo {
file, err := os.Open("/etc/os-release")
if err != nil {
return nil
}
defer file.Close()
distro := &DistroInfo{}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "ID=") {
distro.ID = strings.Trim(strings.TrimPrefix(line, "ID="), "\"")
} else if strings.HasPrefix(line, "VERSION_ID=") {
distro.VersionID = strings.Trim(strings.TrimPrefix(line, "VERSION_ID="), "\"")
} else if strings.HasPrefix(line, "PRETTY_NAME=") {
distro.PrettyName = strings.Trim(strings.TrimPrefix(line, "PRETTY_NAME="), "\"")
}
}
if distro.ID == "" {
return nil
}
return distro
}
// generateSuggestedLabels creates industry-standard labels based on capabilities
func generateSuggestedLabels(cap *RunnerCapabilities) []string {
labels := []string{}
seen := make(map[string]bool)
addLabel := func(label string) {
if label != "" && !seen[label] {
seen[label] = true
labels = append(labels, label)
}
}
// OS labels
switch cap.OS {
case "linux":
addLabel("linux")
addLabel("linux-latest")
case "windows":
addLabel("windows")
addLabel("windows-latest")
case "darwin":
addLabel("macos")
addLabel("macos-latest")
}
// Distro labels (Linux only)
if cap.Distro != nil && cap.Distro.ID != "" {
distro := strings.ToLower(cap.Distro.ID)
addLabel(distro)
addLabel(distro + "-latest")
}
// Xcode/iOS labels (macOS only)
if cap.Xcode != nil {
addLabel("xcode")
// Check for SDKs
for _, sdk := range cap.Xcode.SDKs {
sdkLower := strings.ToLower(sdk)
if strings.Contains(sdkLower, "ios") {
addLabel("ios")
}
if strings.Contains(sdkLower, "visionos") || strings.Contains(sdkLower, "xros") {
addLabel("visionos")
}
if strings.Contains(sdkLower, "watchos") {
addLabel("watchos")
}
if strings.Contains(sdkLower, "tvos") {
addLabel("tvos")
}
}
// If simulators available, add simulator label
if len(cap.Xcode.Simulators) > 0 {
addLabel("ios-simulator")
}
}
// Tool-based labels
if _, ok := cap.Tools["dotnet"]; ok {
addLabel("dotnet")
}
if _, ok := cap.Tools["java"]; ok {
addLabel("java")
}
if _, ok := cap.Tools["node"]; ok {
addLabel("node")
}
// Build tool labels
for _, tool := range cap.BuildTools {
switch tool {
case "msbuild":
addLabel("msbuild")
case "visual-studio":
addLabel("vs2022") // or detect actual version
case "inno-setup":
addLabel("inno-setup")
case "nsis":
addLabel("nsis")
}
}
return labels
}
// ToJSON converts capabilities to JSON string for transmission
func (c *RunnerCapabilities) ToJSON() string {
data, err := json.Marshal(c)
@@ -154,12 +420,19 @@ func detectDockerCompose(ctx context.Context) bool {
func detectTools(ctx context.Context, cap *RunnerCapabilities) {
toolDetectors := map[string]func(context.Context) []string{
"node": detectNodeVersions,
"go": detectGoVersions,
"python": detectPythonVersions,
"java": detectJavaVersions,
"dotnet": detectDotnetVersions,
"rust": detectRustVersions,
"node": detectNodeVersions,
"go": detectGoVersions,
"python": detectPythonVersions,
"java": detectJavaVersions,
"dotnet": detectDotnetVersions,
"rust": detectRustVersions,
"ruby": detectRubyVersions,
"php": detectPHPVersions,
"swift": detectSwiftVersions,
"kotlin": detectKotlinVersions,
"flutter": detectFlutterVersions,
"dart": detectDartVersions,
"powershell": detectPowerShellVersions,
}
for tool, detector := range toolDetectors {
@@ -167,6 +440,242 @@ func detectTools(ctx context.Context, cap *RunnerCapabilities) {
cap.Tools[tool] = versions
}
}
// Detect additional tools that just need presence check
simpleTools := map[string]string{
"git": "git",
"cmake": "cmake",
"make": "make",
"ninja": "ninja",
"gradle": "gradle",
"maven": "mvn",
"npm": "npm",
"yarn": "yarn",
"pnpm": "pnpm",
"cargo": "cargo",
"pip": "pip3",
}
for name, cmd := range simpleTools {
if v := detectSimpleToolVersion(ctx, cmd); v != "" {
cap.Tools[name] = []string{v}
}
}
}
func detectBuildTools(ctx context.Context, cap *RunnerCapabilities) {
switch runtime.GOOS {
case "windows":
detectWindowsBuildTools(ctx, cap)
case "darwin":
detectMacOSBuildTools(ctx, cap)
case "linux":
detectLinuxBuildTools(ctx, cap)
}
}
func detectWindowsBuildTools(ctx context.Context, cap *RunnerCapabilities) {
// Check for Visual Studio via vswhere
vswherePaths := []string{
`C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe`,
`C:\Program Files\Microsoft Visual Studio\Installer\vswhere.exe`,
}
for _, vswhere := range vswherePaths {
if _, err := os.Stat(vswhere); err == nil {
cmd := exec.CommandContext(ctx, vswhere, "-latest", "-property", "displayName")
if output, err := cmd.Output(); err == nil && len(output) > 0 {
cap.BuildTools = append(cap.BuildTools, "visual-studio")
break
}
}
}
// Check for MSBuild
msbuildPaths := []string{
`C:\Program Files\Microsoft Visual Studio\2022\Enterprise\MSBuild\Current\Bin\MSBuild.exe`,
`C:\Program Files\Microsoft Visual Studio\2022\Professional\MSBuild\Current\Bin\MSBuild.exe`,
`C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Current\Bin\MSBuild.exe`,
`C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\MSBuild\Current\Bin\MSBuild.exe`,
`C:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\MSBuild\Current\Bin\MSBuild.exe`,
`C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe`,
}
for _, msbuild := range msbuildPaths {
if _, err := os.Stat(msbuild); err == nil {
cap.BuildTools = append(cap.BuildTools, "msbuild")
break
}
}
// Check for Inno Setup
innoSetupPaths := []string{
`C:\Program Files (x86)\Inno Setup 6\ISCC.exe`,
`C:\Program Files\Inno Setup 6\ISCC.exe`,
`C:\Program Files (x86)\Inno Setup 5\ISCC.exe`,
`C:\Program Files\Inno Setup 5\ISCC.exe`,
}
for _, iscc := range innoSetupPaths {
if _, err := os.Stat(iscc); err == nil {
cap.BuildTools = append(cap.BuildTools, "inno-setup")
break
}
}
// Also check PATH
if _, err := exec.LookPath("iscc"); err == nil {
if !contains(cap.BuildTools, "inno-setup") {
cap.BuildTools = append(cap.BuildTools, "inno-setup")
}
}
// Check for NSIS
nsisPaths := []string{
`C:\Program Files (x86)\NSIS\makensis.exe`,
`C:\Program Files\NSIS\makensis.exe`,
}
for _, nsis := range nsisPaths {
if _, err := os.Stat(nsis); err == nil {
cap.BuildTools = append(cap.BuildTools, "nsis")
break
}
}
if _, err := exec.LookPath("makensis"); err == nil {
if !contains(cap.BuildTools, "nsis") {
cap.BuildTools = append(cap.BuildTools, "nsis")
}
}
// Check for WiX Toolset
wixPaths := []string{
`C:\Program Files (x86)\WiX Toolset v3.11\bin\candle.exe`,
`C:\Program Files (x86)\WiX Toolset v3.14\bin\candle.exe`,
}
for _, wix := range wixPaths {
if _, err := os.Stat(wix); err == nil {
cap.BuildTools = append(cap.BuildTools, "wix")
break
}
}
// Check for signtool (Windows SDK)
signtoolPaths, _ := filepath.Glob(`C:\Program Files (x86)\Windows Kits\10\bin\*\x64\signtool.exe`)
if len(signtoolPaths) > 0 {
cap.BuildTools = append(cap.BuildTools, "signtool")
}
}
func detectMacOSBuildTools(ctx context.Context, cap *RunnerCapabilities) {
// Check for xcpretty
if _, err := exec.LookPath("xcpretty"); err == nil {
cap.BuildTools = append(cap.BuildTools, "xcpretty")
}
// Check for fastlane
if _, err := exec.LookPath("fastlane"); err == nil {
cap.BuildTools = append(cap.BuildTools, "fastlane")
}
// Check for CocoaPods
if _, err := exec.LookPath("pod"); err == nil {
cap.BuildTools = append(cap.BuildTools, "cocoapods")
}
// Check for Carthage
if _, err := exec.LookPath("carthage"); err == nil {
cap.BuildTools = append(cap.BuildTools, "carthage")
}
// Check for SwiftLint
if _, err := exec.LookPath("swiftlint"); err == nil {
cap.BuildTools = append(cap.BuildTools, "swiftlint")
}
// Check for create-dmg or similar
if _, err := exec.LookPath("create-dmg"); err == nil {
cap.BuildTools = append(cap.BuildTools, "create-dmg")
}
// Check for Packages (packagesbuild)
if _, err := exec.LookPath("packagesbuild"); err == nil {
cap.BuildTools = append(cap.BuildTools, "packages")
}
// Check for pkgbuild (built-in)
if _, err := exec.LookPath("pkgbuild"); err == nil {
cap.BuildTools = append(cap.BuildTools, "pkgbuild")
}
// Check for codesign (built-in)
if _, err := exec.LookPath("codesign"); err == nil {
cap.BuildTools = append(cap.BuildTools, "codesign")
}
// Check for notarytool (built-in with Xcode)
if _, err := exec.LookPath("notarytool"); err == nil {
cap.BuildTools = append(cap.BuildTools, "notarytool")
}
}
func detectLinuxBuildTools(ctx context.Context, cap *RunnerCapabilities) {
// Check for common Linux build tools
tools := []string{
"gcc", "g++", "clang", "clang++",
"autoconf", "automake", "libtool",
"pkg-config", "meson",
"dpkg-deb", "rpmbuild", "fpm",
"appimage-builder", "linuxdeploy",
}
for _, tool := range tools {
if _, err := exec.LookPath(tool); err == nil {
cap.BuildTools = append(cap.BuildTools, tool)
}
}
}
func detectPackageManagers(ctx context.Context, cap *RunnerCapabilities) {
switch runtime.GOOS {
case "windows":
if _, err := exec.LookPath("choco"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "chocolatey")
}
if _, err := exec.LookPath("scoop"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "scoop")
}
if _, err := exec.LookPath("winget"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "winget")
}
case "darwin":
if _, err := exec.LookPath("brew"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "homebrew")
}
if _, err := exec.LookPath("port"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "macports")
}
case "linux":
if _, err := exec.LookPath("apt"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "apt")
}
if _, err := exec.LookPath("yum"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "yum")
}
if _, err := exec.LookPath("dnf"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "dnf")
}
if _, err := exec.LookPath("pacman"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "pacman")
}
if _, err := exec.LookPath("zypper"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "zypper")
}
if _, err := exec.LookPath("apk"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "apk")
}
if _, err := exec.LookPath("snap"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "snap")
}
if _, err := exec.LookPath("flatpak"); err == nil {
cap.PackageManagers = append(cap.PackageManagers, "flatpak")
}
}
}
func detectNodeVersions(ctx context.Context) []string {
@@ -187,16 +696,8 @@ func detectPythonVersions(ctx context.Context) []string {
// Also try python
if v := detectToolVersion(ctx, "python", "--version", "Python "); len(v) > 0 {
// Avoid duplicates
for _, ver := range v {
found := false
for _, existing := range versions {
if existing == ver {
found = true
break
}
}
if !found {
if !contains(versions, ver) {
versions = append(versions, ver)
}
}
@@ -212,20 +713,17 @@ func detectJavaVersions(ctx context.Context) []string {
return nil
}
// Java version output goes to stderr and looks like: openjdk version "17.0.1" or java version "1.8.0_301"
lines := strings.Split(string(output), "\n")
for _, line := range lines {
if strings.Contains(line, "version") {
// Extract version from quotes
start := strings.Index(line, "\"")
end := strings.LastIndex(line, "\"")
if start != -1 && end > start {
version := line[start+1 : end]
// Simplify version (e.g., "17.0.1" -> "17")
parts := strings.Split(version, ".")
if len(parts) > 0 {
if parts[0] == "1" && len(parts) > 1 {
return []string{parts[1]} // Java 8 style: 1.8 -> 8
return []string{parts[1]}
}
return []string{parts[0]}
}
@@ -250,21 +748,11 @@ func detectDotnetVersions(ctx context.Context) []string {
if line == "" {
continue
}
// Format: "8.0.100 [/path/to/sdk]"
parts := strings.Split(line, " ")
if len(parts) > 0 {
version := parts[0]
// Simplify to major version
major := strings.Split(version, ".")[0]
// Avoid duplicates
found := false
for _, v := range versions {
if v == major {
found = true
break
}
}
if !found {
if !contains(versions, major) {
versions = append(versions, major)
}
}
@@ -277,6 +765,102 @@ func detectRustVersions(ctx context.Context) []string {
return detectToolVersion(ctx, "rustc", "--version", "rustc ")
}
func detectRubyVersions(ctx context.Context) []string {
return detectToolVersion(ctx, "ruby", "--version", "ruby ")
}
func detectPHPVersions(ctx context.Context) []string {
return detectToolVersion(ctx, "php", "--version", "PHP ")
}
func detectSwiftVersions(ctx context.Context) []string {
return detectToolVersion(ctx, "swift", "--version", "Swift version ")
}
func detectKotlinVersions(ctx context.Context) []string {
return detectToolVersion(ctx, "kotlin", "-version", "Kotlin version ")
}
func detectFlutterVersions(ctx context.Context) []string {
return detectToolVersion(ctx, "flutter", "--version", "Flutter ")
}
func detectDartVersions(ctx context.Context) []string {
return detectToolVersion(ctx, "dart", "--version", "Dart SDK version: ")
}
func detectPowerShellVersions(ctx context.Context) []string {
versions := []string{}
// Check for pwsh (PowerShell Core / PowerShell 7+)
if v := detectPwshVersion(ctx, "pwsh"); v != "" {
versions = append(versions, "pwsh:"+v)
}
// Check for powershell (Windows PowerShell 5.x)
if runtime.GOOS == "windows" {
if v := detectPwshVersion(ctx, "powershell"); v != "" {
versions = append(versions, "powershell:"+v)
}
}
return versions
}
func detectPwshVersion(ctx context.Context, cmd string) string {
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
// Use -Command to get version
var c *exec.Cmd
if cmd == "pwsh" {
c = exec.CommandContext(timeoutCtx, cmd, "-Command", "$PSVersionTable.PSVersion.ToString()")
} else {
c = exec.CommandContext(timeoutCtx, cmd, "-Command", "$PSVersionTable.PSVersion.ToString()")
}
output, err := c.Output()
if err != nil {
return ""
}
version := strings.TrimSpace(string(output))
// Return major.minor
parts := strings.Split(version, ".")
if len(parts) >= 2 {
return parts[0] + "." + parts[1]
}
return version
}
func detectSimpleToolVersion(ctx context.Context, cmd string) string {
if _, err := exec.LookPath(cmd); err != nil {
return ""
}
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
c := exec.CommandContext(timeoutCtx, cmd, "--version")
output, err := c.Output()
if err != nil {
// Try without --version for tools that don't support it
return "installed"
}
line := strings.TrimSpace(strings.Split(string(output), "\n")[0])
// Extract version number if possible
parts := strings.Fields(line)
for _, part := range parts {
// Look for something that looks like a version
if len(part) > 0 && (part[0] >= '0' && part[0] <= '9' || part[0] == 'v') {
return strings.TrimPrefix(part, "v")
}
}
return "installed"
}
func detectToolVersion(ctx context.Context, cmd string, args string, prefix string) []string {
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
@@ -294,13 +878,10 @@ func detectToolVersion(ctx context.Context, cmd string, args string, prefix stri
}
}
// Get just the version number
parts := strings.Fields(line)
if len(parts) > 0 {
version := parts[0]
// Clean up version string
version = strings.TrimPrefix(version, "v")
// Return major.minor or just major
vparts := strings.Split(version, ".")
if len(vparts) >= 2 {
return []string{vparts[0] + "." + vparts[1]}
@@ -310,3 +891,98 @@ func detectToolVersion(ctx context.Context, cmd string, args string, prefix stri
return nil
}
func contains(slice []string, item string) bool {
for _, s := range slice {
if s == item {
return true
}
}
return false
}
// detectCPULoad detects the current CPU load
func detectCPULoad() *CPUInfo {
numCPU := runtime.NumCPU()
info := &CPUInfo{
NumCPU: numCPU,
}
switch runtime.GOOS {
case "linux":
// Read from /proc/loadavg
data, err := os.ReadFile("/proc/loadavg")
if err != nil {
return info
}
parts := strings.Fields(string(data))
if len(parts) >= 3 {
if load, err := parseFloat(parts[0]); err == nil {
info.LoadAvg1m = load
}
if load, err := parseFloat(parts[1]); err == nil {
info.LoadAvg5m = load
}
if load, err := parseFloat(parts[2]); err == nil {
info.LoadAvg15m = load
}
}
case "darwin":
// Use sysctl on macOS
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "sysctl", "-n", "vm.loadavg")
output, err := cmd.Output()
if err == nil {
// Output format: "{ 1.23 4.56 7.89 }"
line := strings.Trim(string(output), "{ }\n")
parts := strings.Fields(line)
if len(parts) >= 3 {
if load, err := parseFloat(parts[0]); err == nil {
info.LoadAvg1m = load
}
if load, err := parseFloat(parts[1]); err == nil {
info.LoadAvg5m = load
}
if load, err := parseFloat(parts[2]); err == nil {
info.LoadAvg15m = load
}
}
}
case "windows":
// Windows doesn't have load average, use CPU usage via wmic
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "wmic", "cpu", "get", "loadpercentage")
output, err := cmd.Output()
if err == nil {
lines := strings.Split(string(output), "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
if line != "" && line != "LoadPercentage" {
if load, err := parseFloat(line); err == nil {
// Convert percentage to "load" equivalent
info.LoadPercent = load
info.LoadAvg1m = load * float64(numCPU) / 100.0
return info
}
}
}
}
}
// Calculate load percent (load_avg_1m / num_cpu * 100)
if info.LoadAvg1m > 0 && numCPU > 0 {
info.LoadPercent = (info.LoadAvg1m / float64(numCPU)) * 100.0
}
return info
}
// parseFloat parses a string to float64
func parseFloat(s string) (float64, error) {
s = strings.TrimSpace(s)
var f float64
err := json.Unmarshal([]byte(s), &f)
return f, err
}

View File

@@ -0,0 +1,43 @@
//go:build unix
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package envcheck
import (
"golang.org/x/sys/unix"
)
// detectDiskSpace detects disk space on the specified path's filesystem (Unix version)
// If path is empty, defaults to "/"
func detectDiskSpace(path string) *DiskInfo {
if path == "" {
path = "/"
}
var stat unix.Statfs_t
err := unix.Statfs(path, &stat)
if err != nil {
// Fallback to root if the path doesn't exist
err = unix.Statfs("/", &stat)
if err != nil {
return nil
}
path = "/"
}
total := stat.Blocks * uint64(stat.Bsize)
free := stat.Bavail * uint64(stat.Bsize)
used := total - free
usedPercent := float64(used) / float64(total) * 100
return &DiskInfo{
Path: path,
Total: total,
Free: free,
Used: used,
UsedPercent: usedPercent,
}
}

View File

@@ -0,0 +1,57 @@
//go:build windows
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package envcheck
import (
"path/filepath"
"golang.org/x/sys/windows"
)
// detectDiskSpace detects disk space on the specified path's drive (Windows version)
// If path is empty, defaults to "C:\"
func detectDiskSpace(path string) *DiskInfo {
if path == "" {
path = "C:\\"
}
// Resolve to absolute path
absPath, err := filepath.Abs(path)
if err != nil {
absPath = "C:\\"
}
// Extract drive letter (e.g., "D:\" from "D:\builds\runner")
drivePath := filepath.VolumeName(absPath) + "\\"
if drivePath == "\\" {
drivePath = "C:\\"
}
var freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes uint64
pathPtr := windows.StringToUTF16Ptr(drivePath)
err = windows.GetDiskFreeSpaceEx(pathPtr, &freeBytesAvailable, &totalNumberOfBytes, &totalNumberOfFreeBytes)
if err != nil {
// Fallback to C: drive
pathPtr = windows.StringToUTF16Ptr("C:\\")
err = windows.GetDiskFreeSpaceEx(pathPtr, &freeBytesAvailable, &totalNumberOfBytes, &totalNumberOfFreeBytes)
if err != nil {
return nil
}
drivePath = "C:\\"
}
used := totalNumberOfBytes - totalNumberOfFreeBytes
usedPercent := float64(used) / float64(totalNumberOfBytes) * 100
return &DiskInfo{
Path: drivePath,
Total: totalNumberOfBytes,
Free: totalNumberOfFreeBytes,
Used: used,
UsedPercent: usedPercent,
}
}