Replace wg.Add(1)/go func()/defer wg.Done() pattern with the simpler wg.Go() method as required by the modernize linter. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
684 lines
17 KiB
Go
684 lines
17 KiB
Go
// Copyright 2026 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package cmd
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"code.gitea.io/gitea/modules/json"
|
|
|
|
"github.com/spf13/cobra"
|
|
)
|
|
|
|
const (
|
|
defaultChunkSize = 10 * 1024 * 1024 // 10MB
|
|
maxChunkSize = 100 * 1024 * 1024 // 100MB
|
|
)
|
|
|
|
var uploadCmd = &cobra.Command{
|
|
Use: "upload",
|
|
Short: "Upload files to Gitea",
|
|
Long: `Upload files to Gitea with chunked upload support for large files.`,
|
|
}
|
|
|
|
var releaseAssetCmd = &cobra.Command{
|
|
Use: "release-asset",
|
|
Short: "Upload a release asset",
|
|
Long: `Upload a release asset using chunked upload.
|
|
|
|
This command supports large files with progress tracking and resume capability.
|
|
Interrupted uploads can be resumed using the session ID.`,
|
|
Example: ` # Basic upload
|
|
gitea-cli upload release-asset --repo owner/repo --release v1.0.0 --file ./app.tar.gz
|
|
|
|
# With options
|
|
gitea-cli upload release-asset \
|
|
--repo owner/repo \
|
|
--release v1.0.0 \
|
|
--file ./app.tar.gz \
|
|
--chunk-size 50MB \
|
|
--parallel 4 \
|
|
--verify-checksum`,
|
|
RunE: runReleaseAssetUpload,
|
|
}
|
|
|
|
var resumeCmd = &cobra.Command{
|
|
Use: "resume",
|
|
Short: "Resume an interrupted upload",
|
|
Long: `Resume a previously interrupted chunked upload using its session ID.`,
|
|
Example: ` gitea-cli upload resume --session sess_abc123 --file ./app.tar.gz`,
|
|
RunE: runResumeUpload,
|
|
}
|
|
|
|
var listCmd = &cobra.Command{
|
|
Use: "list",
|
|
Short: "List pending uploads",
|
|
Long: `List all pending upload sessions for a repository.`,
|
|
RunE: runListUploads,
|
|
}
|
|
|
|
func init() {
|
|
// release-asset flags
|
|
releaseAssetCmd.Flags().StringP("repo", "r", "", "Repository (owner/repo)")
|
|
releaseAssetCmd.Flags().String("release", "", "Release tag or ID")
|
|
releaseAssetCmd.Flags().StringP("file", "f", "", "File to upload")
|
|
releaseAssetCmd.Flags().String("name", "", "Asset name (defaults to filename)")
|
|
releaseAssetCmd.Flags().String("chunk-size", "10MB", "Chunk size (e.g., 10MB, 50MB)")
|
|
releaseAssetCmd.Flags().IntP("parallel", "p", 4, "Number of parallel uploads")
|
|
releaseAssetCmd.Flags().Bool("verify-checksum", true, "Verify checksum after upload")
|
|
releaseAssetCmd.Flags().Bool("progress", true, "Show progress bar")
|
|
_ = releaseAssetCmd.MarkFlagRequired("repo")
|
|
_ = releaseAssetCmd.MarkFlagRequired("release")
|
|
_ = releaseAssetCmd.MarkFlagRequired("file")
|
|
|
|
// resume flags
|
|
resumeCmd.Flags().String("session", "", "Upload session ID")
|
|
resumeCmd.Flags().StringP("file", "f", "", "File to upload")
|
|
_ = resumeCmd.MarkFlagRequired("session")
|
|
_ = resumeCmd.MarkFlagRequired("file")
|
|
|
|
// list flags
|
|
listCmd.Flags().StringP("repo", "r", "", "Repository (owner/repo)")
|
|
_ = listCmd.MarkFlagRequired("repo")
|
|
|
|
uploadCmd.AddCommand(releaseAssetCmd)
|
|
uploadCmd.AddCommand(resumeCmd)
|
|
uploadCmd.AddCommand(listCmd)
|
|
}
|
|
|
|
// UploadSession represents a chunked upload session
|
|
type UploadSession struct {
|
|
ID string `json:"id"`
|
|
FileName string `json:"file_name"`
|
|
FileSize int64 `json:"file_size"`
|
|
ChunkSize int64 `json:"chunk_size"`
|
|
TotalChunks int64 `json:"total_chunks"`
|
|
ChunksReceived int64 `json:"chunks_received"`
|
|
Status string `json:"status"`
|
|
ExpiresAt time.Time `json:"expires_at"`
|
|
Checksum string `json:"checksum,omitempty"`
|
|
}
|
|
|
|
// ProgressTracker tracks upload progress
|
|
type ProgressTracker struct {
|
|
totalBytes int64
|
|
bytesWritten int64
|
|
startTime time.Time
|
|
}
|
|
|
|
func (p *ProgressTracker) Add(n int64) {
|
|
atomic.AddInt64(&p.bytesWritten, n)
|
|
}
|
|
|
|
func (p *ProgressTracker) Progress() (current, total int64, percent, speed float64, eta time.Duration) {
|
|
current = atomic.LoadInt64(&p.bytesWritten)
|
|
total = p.totalBytes
|
|
if total > 0 {
|
|
percent = float64(current) / float64(total) * 100
|
|
}
|
|
elapsed := time.Since(p.startTime).Seconds()
|
|
if elapsed > 0 {
|
|
speed = float64(current) / elapsed
|
|
if speed > 0 {
|
|
remaining := total - current
|
|
eta = time.Duration(float64(remaining)/speed) * time.Second
|
|
}
|
|
}
|
|
return current, total, percent, speed, eta
|
|
}
|
|
|
|
func runReleaseAssetUpload(cmd *cobra.Command, args []string) error {
|
|
repo, _ := cmd.Flags().GetString("repo")
|
|
release, _ := cmd.Flags().GetString("release")
|
|
filePath, _ := cmd.Flags().GetString("file")
|
|
assetName, _ := cmd.Flags().GetString("name")
|
|
chunkSizeStr, _ := cmd.Flags().GetString("chunk-size")
|
|
parallel, _ := cmd.Flags().GetInt("parallel")
|
|
verifyChecksum, _ := cmd.Flags().GetBool("verify-checksum")
|
|
showProgress, _ := cmd.Flags().GetBool("progress")
|
|
|
|
server := getServer()
|
|
token := getToken()
|
|
|
|
if server == "" || token == "" {
|
|
return errors.New("not logged in. Use 'gitea-cli auth login' first")
|
|
}
|
|
|
|
// Parse repo
|
|
parts := strings.Split(repo, "/")
|
|
if len(parts) != 2 {
|
|
return errors.New("invalid repository format. Use owner/repo")
|
|
}
|
|
owner, repoName := parts[0], parts[1]
|
|
|
|
// Parse chunk size
|
|
chunkSize, err := parseSize(chunkSizeStr)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid chunk size: %w", err)
|
|
}
|
|
chunkSize = min(chunkSize, maxChunkSize)
|
|
|
|
// Open file
|
|
file, err := os.Open(filePath)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to open file: %w", err)
|
|
}
|
|
defer file.Close()
|
|
|
|
stat, err := file.Stat()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to stat file: %w", err)
|
|
}
|
|
fileSize := stat.Size()
|
|
|
|
if assetName == "" {
|
|
assetName = filepath.Base(filePath)
|
|
}
|
|
|
|
fmt.Printf("Uploading %s (%s)\n", assetName, formatSize(fileSize))
|
|
|
|
// Calculate checksum if requested
|
|
var checksum string
|
|
if verifyChecksum {
|
|
fmt.Print("Calculating checksum... ")
|
|
checksum, err = calculateSHA256(file)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to calculate checksum: %w", err)
|
|
}
|
|
fmt.Printf("done (%s)\n", checksum[:16]+"...")
|
|
if _, err := file.Seek(0, 0); err != nil {
|
|
return fmt.Errorf("failed to seek file: %w", err)
|
|
}
|
|
}
|
|
|
|
// Create upload session
|
|
fmt.Print("Creating upload session... ")
|
|
session, err := createUploadSession(server, token, owner, repoName, release, assetName, fileSize, chunkSize, checksum)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create session: %w", err)
|
|
}
|
|
fmt.Printf("done (%s)\n", session.ID)
|
|
|
|
// Upload chunks
|
|
tracker := &ProgressTracker{
|
|
totalBytes: fileSize,
|
|
startTime: time.Now(),
|
|
}
|
|
|
|
ctx := context.Background()
|
|
err = uploadChunks(ctx, server, token, session, file, parallel, tracker, showProgress)
|
|
if err != nil {
|
|
fmt.Printf("\n❌ Upload failed: %v\n", err)
|
|
fmt.Printf(" Resume with: gitea-cli upload resume --session %s --file %s\n", session.ID, filePath)
|
|
return err
|
|
}
|
|
|
|
// Complete upload
|
|
fmt.Print("\nFinalizing... ")
|
|
result, err := completeUpload(server, token, session.ID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to complete upload: %w", err)
|
|
}
|
|
fmt.Println("done")
|
|
|
|
if verifyChecksum && result.ChecksumVerified {
|
|
fmt.Println("Verifying checksum... ✓ SHA256 matches")
|
|
}
|
|
|
|
elapsed := time.Since(tracker.startTime)
|
|
fmt.Printf("\n✅ Upload complete!\n")
|
|
fmt.Printf(" Asset ID: %d\n", result.ID)
|
|
fmt.Printf(" Time: %s\n", elapsed.Round(time.Second))
|
|
fmt.Printf(" Speed: %s/s (avg)\n", formatSize(int64(float64(fileSize)/elapsed.Seconds())))
|
|
if result.DownloadURL != "" {
|
|
fmt.Printf(" Download: %s\n", result.DownloadURL)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func runResumeUpload(cmd *cobra.Command, args []string) error {
|
|
sessionID, _ := cmd.Flags().GetString("session")
|
|
filePath, _ := cmd.Flags().GetString("file")
|
|
|
|
server := getServer()
|
|
token := getToken()
|
|
|
|
if server == "" || token == "" {
|
|
return errors.New("not logged in")
|
|
}
|
|
|
|
// Get session status
|
|
session, err := getUploadSession(server, token, sessionID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get session: %w", err)
|
|
}
|
|
|
|
if session.Status == "complete" {
|
|
fmt.Println("Upload already completed")
|
|
return nil
|
|
}
|
|
|
|
if session.Status == "expired" {
|
|
return errors.New("upload session has expired")
|
|
}
|
|
|
|
// Open file
|
|
file, err := os.Open(filePath)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to open file: %w", err)
|
|
}
|
|
defer file.Close()
|
|
|
|
fmt.Printf("Resuming upload: %s\n", session.FileName)
|
|
fmt.Printf(" Chunks: %d/%d complete\n", session.ChunksReceived, session.TotalChunks)
|
|
|
|
tracker := &ProgressTracker{
|
|
totalBytes: session.FileSize,
|
|
bytesWritten: session.ChunksReceived * session.ChunkSize,
|
|
startTime: time.Now(),
|
|
}
|
|
|
|
ctx := context.Background()
|
|
err = uploadChunks(ctx, server, token, session, file, 4, tracker, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Complete
|
|
fmt.Print("\nFinalizing... ")
|
|
result, err := completeUpload(server, token, session.ID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to complete: %w", err)
|
|
}
|
|
fmt.Println("done")
|
|
|
|
fmt.Printf("\n✅ Upload complete!\n")
|
|
fmt.Printf(" Asset ID: %d\n", result.ID)
|
|
|
|
return nil
|
|
}
|
|
|
|
func runListUploads(cmd *cobra.Command, args []string) error {
|
|
repo, _ := cmd.Flags().GetString("repo")
|
|
|
|
server := getServer()
|
|
token := getToken()
|
|
|
|
if server == "" || token == "" {
|
|
return errors.New("not logged in")
|
|
}
|
|
|
|
parts := strings.Split(repo, "/")
|
|
if len(parts) != 2 {
|
|
return errors.New("invalid repository format")
|
|
}
|
|
|
|
sessions, err := listUploadSessions(server, token, parts[0], parts[1])
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if len(sessions) == 0 {
|
|
fmt.Println("No pending uploads")
|
|
return nil
|
|
}
|
|
|
|
fmt.Printf("Pending uploads for %s:\n\n", repo)
|
|
for _, s := range sessions {
|
|
progress := float64(s.ChunksReceived) / float64(s.TotalChunks) * 100
|
|
fmt.Printf(" %s\n", s.ID)
|
|
fmt.Printf(" File: %s (%s)\n", s.FileName, formatSize(s.FileSize))
|
|
fmt.Printf(" Progress: %.1f%% (%d/%d chunks)\n", progress, s.ChunksReceived, s.TotalChunks)
|
|
fmt.Printf(" Expires: %s\n", s.ExpiresAt.Format(time.RFC3339))
|
|
fmt.Println()
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func uploadChunks(ctx context.Context, server, token string, session *UploadSession, file *os.File, parallel int, tracker *ProgressTracker, showProgress bool) error {
|
|
totalChunks := session.TotalChunks
|
|
chunkSize := session.ChunkSize
|
|
|
|
// Create worker pool
|
|
type chunkJob struct {
|
|
number int64
|
|
data []byte
|
|
}
|
|
|
|
jobs := make(chan chunkJob, parallel)
|
|
errors := make(chan error, totalChunks)
|
|
var wg sync.WaitGroup
|
|
|
|
// Start workers
|
|
for range parallel {
|
|
wg.Go(func() {
|
|
for job := range jobs {
|
|
err := uploadChunk(server, token, session.ID, job.number, job.data)
|
|
if err != nil {
|
|
errors <- fmt.Errorf("chunk %d: %w", job.number, err)
|
|
return
|
|
}
|
|
tracker.Add(int64(len(job.data)))
|
|
}
|
|
})
|
|
}
|
|
|
|
// Progress display
|
|
done := make(chan struct{})
|
|
if showProgress {
|
|
go func() {
|
|
ticker := time.NewTicker(100 * time.Millisecond)
|
|
defer ticker.Stop()
|
|
for {
|
|
select {
|
|
case <-done:
|
|
return
|
|
case <-ticker.C:
|
|
current, total, percent, speed, eta := tracker.Progress()
|
|
fmt.Printf("\r [%-50s] %5.1f%% %s/%s %s/s ETA %s ",
|
|
progressBar(percent, 50),
|
|
percent,
|
|
formatSize(current),
|
|
formatSize(total),
|
|
formatSize(int64(speed)),
|
|
formatDuration(eta))
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
// Read and queue chunks
|
|
for chunkNum := session.ChunksReceived; chunkNum < totalChunks; chunkNum++ {
|
|
offset := chunkNum * chunkSize
|
|
if _, err := file.Seek(offset, 0); err != nil {
|
|
close(jobs)
|
|
close(done)
|
|
return fmt.Errorf("failed to seek: %w", err)
|
|
}
|
|
|
|
size := chunkSize
|
|
if chunkNum == totalChunks-1 {
|
|
size = session.FileSize - offset
|
|
}
|
|
|
|
data := make([]byte, size)
|
|
n, err := io.ReadFull(file, data)
|
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
|
close(jobs)
|
|
close(done)
|
|
return fmt.Errorf("failed to read chunk %d: %w", chunkNum, err)
|
|
}
|
|
data = data[:n]
|
|
|
|
select {
|
|
case err := <-errors:
|
|
close(jobs)
|
|
close(done)
|
|
return err
|
|
case jobs <- chunkJob{number: chunkNum, data: data}:
|
|
case <-ctx.Done():
|
|
close(jobs)
|
|
close(done)
|
|
return ctx.Err()
|
|
}
|
|
}
|
|
|
|
close(jobs)
|
|
wg.Wait()
|
|
close(done)
|
|
|
|
// Check for errors
|
|
select {
|
|
case err := <-errors:
|
|
return err
|
|
default:
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func progressBar(percent float64, width int) string {
|
|
filled := int(percent / 100 * float64(width))
|
|
filled = min(filled, width)
|
|
return strings.Repeat("█", filled) + strings.Repeat("░", width-filled)
|
|
}
|
|
|
|
func formatSize(bytes int64) string {
|
|
const unit = 1024
|
|
if bytes < unit {
|
|
return fmt.Sprintf("%d B", bytes)
|
|
}
|
|
div, exp := int64(unit), 0
|
|
for n := bytes / unit; n >= unit; n /= unit {
|
|
div *= unit
|
|
exp++
|
|
}
|
|
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
|
|
}
|
|
|
|
func formatDuration(d time.Duration) string {
|
|
if d < time.Second {
|
|
return "<1s"
|
|
}
|
|
d = d.Round(time.Second)
|
|
h := d / time.Hour
|
|
d -= h * time.Hour
|
|
m := d / time.Minute
|
|
d -= m * time.Minute
|
|
s := d / time.Second
|
|
if h > 0 {
|
|
return fmt.Sprintf("%dh%dm", h, m)
|
|
}
|
|
if m > 0 {
|
|
return fmt.Sprintf("%dm%ds", m, s)
|
|
}
|
|
return fmt.Sprintf("%ds", s)
|
|
}
|
|
|
|
func parseSize(s string) (int64, error) {
|
|
s = strings.ToUpper(strings.TrimSpace(s))
|
|
multiplier := int64(1)
|
|
|
|
if strings.HasSuffix(s, "GB") {
|
|
multiplier = 1024 * 1024 * 1024
|
|
s = strings.TrimSuffix(s, "GB")
|
|
} else if strings.HasSuffix(s, "MB") {
|
|
multiplier = 1024 * 1024
|
|
s = strings.TrimSuffix(s, "MB")
|
|
} else if strings.HasSuffix(s, "KB") {
|
|
multiplier = 1024
|
|
s = strings.TrimSuffix(s, "KB")
|
|
} else if suffix, found := strings.CutSuffix(s, "B"); found {
|
|
s = suffix
|
|
}
|
|
|
|
var value int64
|
|
_, err := fmt.Sscanf(s, "%d", &value)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
|
|
return value * multiplier, nil
|
|
}
|
|
|
|
func calculateSHA256(file *os.File) (string, error) {
|
|
hash := sha256.New()
|
|
if _, err := io.Copy(hash, file); err != nil {
|
|
return "", err
|
|
}
|
|
return hex.EncodeToString(hash.Sum(nil)), nil
|
|
}
|
|
|
|
// API functions
|
|
|
|
func createUploadSession(server, token, owner, repo, release, fileName string, fileSize, chunkSize int64, checksum string) (*UploadSession, error) {
|
|
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/releases/%s/assets/upload-session", server, owner, repo, release)
|
|
|
|
body := map[string]any{
|
|
"name": fileName,
|
|
"size": fileSize,
|
|
"chunk_size": chunkSize,
|
|
}
|
|
if checksum != "" {
|
|
body["checksum"] = checksum
|
|
}
|
|
|
|
jsonBody, _ := json.Marshal(body)
|
|
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonBody))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
req.Header.Set("Authorization", "token "+token)
|
|
req.Header.Set("Content-Type", "application/json")
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
|
|
body, _ := io.ReadAll(resp.Body)
|
|
return nil, fmt.Errorf("server returned %d: %s", resp.StatusCode, string(body))
|
|
}
|
|
|
|
var session UploadSession
|
|
if err := json.NewDecoder(resp.Body).Decode(&session); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &session, nil
|
|
}
|
|
|
|
func getUploadSession(server, token, sessionID string) (*UploadSession, error) {
|
|
url := fmt.Sprintf("%s/api/v1/repos/uploads/%s", server, sessionID)
|
|
|
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
req.Header.Set("Authorization", "token "+token)
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return nil, errors.New("session not found")
|
|
}
|
|
|
|
var session UploadSession
|
|
if err := json.NewDecoder(resp.Body).Decode(&session); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &session, nil
|
|
}
|
|
|
|
func uploadChunk(server, token, sessionID string, chunkNum int64, data []byte) error {
|
|
url := fmt.Sprintf("%s/api/v1/repos/uploads/%s/chunks/%d", server, sessionID, chunkNum)
|
|
|
|
req, err := http.NewRequest(http.MethodPut, url, bytes.NewReader(data))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
req.Header.Set("Authorization", "token "+token)
|
|
req.Header.Set("Content-Type", "application/octet-stream")
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
|
body, _ := io.ReadAll(resp.Body)
|
|
return fmt.Errorf("failed: %s", string(body))
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
type CompleteResult struct {
|
|
ID int64 `json:"id"`
|
|
DownloadURL string `json:"browser_download_url"`
|
|
ChecksumVerified bool `json:"checksum_verified"`
|
|
}
|
|
|
|
func completeUpload(server, token, sessionID string) (*CompleteResult, error) {
|
|
url := fmt.Sprintf("%s/api/v1/repos/uploads/%s/complete", server, sessionID)
|
|
|
|
req, err := http.NewRequest(http.MethodPost, url, nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
req.Header.Set("Authorization", "token "+token)
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
|
body, _ := io.ReadAll(resp.Body)
|
|
return nil, fmt.Errorf("failed: %s", string(body))
|
|
}
|
|
|
|
var result CompleteResult
|
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &result, nil
|
|
}
|
|
|
|
func listUploadSessions(server, token, owner, repo string) ([]*UploadSession, error) {
|
|
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/uploads", server, owner, repo)
|
|
|
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
req.Header.Set("Authorization", "token "+token)
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return nil, errors.New("failed to list sessions")
|
|
}
|
|
|
|
var sessions []*UploadSession
|
|
if err := json.NewDecoder(resp.Body).Decode(&sessions); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return sessions, nil
|
|
}
|