forked from mirrors/pkg-proxy
When storage.direct_serve is enabled and the backend supports it (S3, Azure), cached artifact downloads return a 302 redirect to a presigned URL instead of streaming bytes through the proxy. Falls back to streaming when the backend can't sign (fileblob, local filesystem) or signing fails. Adds the azureblob driver so azblob:// storage URLs work. Cache-hit accounting already happened before io.Copy so redirects are counted correctly; the metrics calls are pulled into a helper so both paths share them. Closes #96
114 lines
3.3 KiB
Go
114 lines
3.3 KiB
Go
// Package storage provides artifact storage backends for the proxy cache.
|
|
//
|
|
// Storage backends are accessed via gocloud.dev/blob URLs:
|
|
//
|
|
// - file:///path/to/dir - Local filesystem storage
|
|
// - s3://bucket-name - Amazon S3
|
|
// - s3://bucket?endpoint=http://localhost:9000 - S3-compatible (MinIO)
|
|
//
|
|
// Use OpenBucket to create a storage backend from a URL.
|
|
package storage
|
|
|
|
import (
|
|
"context"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"errors"
|
|
"io"
|
|
"time"
|
|
)
|
|
|
|
const dirPermissions = 0755
|
|
|
|
var (
|
|
ErrNotFound = errors.New("artifact not found")
|
|
|
|
// ErrSignedURLUnsupported is returned by SignedURL when the backend
|
|
// cannot generate presigned URLs (e.g. local filesystem).
|
|
ErrSignedURLUnsupported = errors.New("signed URLs not supported by storage backend")
|
|
)
|
|
|
|
// Storage defines the interface for artifact storage backends.
|
|
type Storage interface {
|
|
// Store writes content from r to the given path.
|
|
// Returns the number of bytes written and the SHA256 hash of the content.
|
|
Store(ctx context.Context, path string, r io.Reader) (size int64, hash string, err error)
|
|
|
|
// Open returns a reader for the content at path.
|
|
// The caller must close the reader when done.
|
|
// Returns ErrNotFound if the path does not exist.
|
|
Open(ctx context.Context, path string) (io.ReadCloser, error)
|
|
|
|
// Exists returns true if content exists at path.
|
|
Exists(ctx context.Context, path string) (bool, error)
|
|
|
|
// Delete removes the content at path.
|
|
// Returns nil if the path does not exist.
|
|
Delete(ctx context.Context, path string) error
|
|
|
|
// Size returns the size in bytes of content at path.
|
|
// Returns ErrNotFound if the path does not exist.
|
|
Size(ctx context.Context, path string) (int64, error)
|
|
|
|
// SignedURL returns a presigned URL granting time-limited GET access to path.
|
|
// Returns ErrSignedURLUnsupported if the backend cannot generate presigned URLs.
|
|
SignedURL(ctx context.Context, path string, expiry time.Duration) (string, error)
|
|
|
|
// UsedSpace returns the total bytes used by all stored content.
|
|
UsedSpace(ctx context.Context) (int64, error)
|
|
|
|
// URL returns the storage backend URL (e.g. "file:///path" or "s3://bucket").
|
|
URL() string
|
|
|
|
// Close releases any resources held by the storage backend.
|
|
Close() error
|
|
}
|
|
|
|
// ArtifactPath builds a storage path for an artifact.
|
|
// Format: {ecosystem}/{namespace}/{name}/{version}/{filename}
|
|
// For packages without namespace: {ecosystem}/{name}/{version}/{filename}
|
|
func ArtifactPath(ecosystem, namespace, name, version, filename string) string {
|
|
if namespace != "" {
|
|
return ecosystem + "/" + namespace + "/" + name + "/" + version + "/" + filename
|
|
}
|
|
return ecosystem + "/" + name + "/" + version + "/" + filename
|
|
}
|
|
|
|
// HashingReader wraps a reader and computes SHA256 hash as content is read.
|
|
type HashingReader struct {
|
|
r io.Reader
|
|
hash []byte
|
|
h interface{ Sum([]byte) []byte }
|
|
size int64
|
|
done bool
|
|
}
|
|
|
|
func NewHashingReader(r io.Reader) *HashingReader {
|
|
h := sha256.New()
|
|
return &HashingReader{
|
|
r: io.TeeReader(r, h),
|
|
h: h,
|
|
}
|
|
}
|
|
|
|
func (hr *HashingReader) Read(p []byte) (n int, err error) {
|
|
n, err = hr.r.Read(p)
|
|
hr.size += int64(n)
|
|
if err == io.EOF {
|
|
hr.done = true
|
|
hr.hash = hr.h.Sum(nil)
|
|
}
|
|
return
|
|
}
|
|
|
|
func (hr *HashingReader) Sum() string {
|
|
if !hr.done {
|
|
hr.hash = hr.h.Sum(nil)
|
|
hr.done = true
|
|
}
|
|
return hex.EncodeToString(hr.hash)
|
|
}
|
|
|
|
func (hr *HashingReader) Size() int64 {
|
|
return hr.size
|
|
}
|