Compare commits

..

2 commits

10 changed files with 32 additions and 673 deletions

View file

@ -1,9 +0,0 @@
package sdk
import "git.geekeey.de/actions/sdk/cache"
func (c *Action) Cache() *cache.Client {
token := c.env("ACTIONS_RUNTIME_TOKEN")
url := c.env("ACTIONS_CACHE_URL")
return cache.New(token, url)
}

56
cache/blob.go vendored
View file

@ -1,56 +0,0 @@
package cache
import (
"bytes"
"io"
"os"
)
type Blob interface {
io.ReaderAt
io.Closer
Size() int64
}
type byteBlob struct {
buf *bytes.Reader
}
func NewByteBlob(b []byte) Blob {
return &byteBlob{buf: bytes.NewReader(b)}
}
func (blob *byteBlob) ReadAt(p []byte, off int64) (n int, err error) {
return blob.buf.ReadAt(p, off)
}
func (blob *byteBlob) Size() int64 {
return blob.buf.Size()
}
func (blob *byteBlob) Close() error {
return nil
}
type fileBlob struct {
buf *os.File
}
func NewFileBlob(f *os.File) Blob {
return &fileBlob{buf: f}
}
func (blob *fileBlob) ReadAt(p []byte, off int64) (n int, err error) {
return blob.buf.ReadAt(p, off)
}
func (blob *fileBlob) Size() int64 {
if i, err := blob.buf.Stat(); err != nil {
return i.Size()
}
return 0
}
func (blob *fileBlob) Close() error {
return nil
}

329
cache/cache.go vendored
View file

@ -1,329 +0,0 @@
package cache
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strings"
"sync"
"golang.org/x/sync/errgroup"
)
var UploadConcurrency = 4
var UploadChunkSize = 32 * 1024 * 1024
type Client struct {
base string
http *http.Client
}
type auth struct {
transport http.RoundTripper
token string
}
func (t *auth) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.token))
return t.transport.RoundTrip(req)
}
func New(token, url string) *Client {
t := &auth{transport: &retry{transport: &http.Transport{}}, token: token}
return &Client{
base: url,
http: &http.Client{Transport: t},
}
}
func (c *Client) url(p string) string {
return path.Join(c.base, "_apis/artifactcache", p)
}
func (c *Client) version(k string) string {
h := sha256.New()
h.Write([]byte("|go-actionscache-1.0"))
return hex.EncodeToString(h.Sum(nil))
}
type ApiError struct {
Message string `json:"message"`
TypeName string `json:"typeName"`
TypeKey string `json:"typeKey"`
ErrorCode int `json:"errorCode"`
}
func (e ApiError) Error() string {
return e.Message
}
func (e ApiError) Is(err error) bool {
if err == os.ErrExist {
if strings.Contains(e.TypeKey, "AlreadyExists") {
return true
}
}
return false
}
func checkApiError(res *http.Response) error {
if res.StatusCode >= 200 && res.StatusCode < 300 {
return nil
}
dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024))
var details ApiError
if err := dec.Decode(&details); err != nil {
return err
}
if details.Message != "" {
return details
} else {
return fmt.Errorf("unknown error %s", res.Status)
}
}
func (c *Client) Load(ctx context.Context, keys ...string) (*Entry, error) {
u, err := url.Parse(c.url("cache"))
if err != nil {
return nil, err
}
q := u.Query()
q.Set("keys", strings.Join(keys, ","))
q.Set("version", c.version(keys[0]))
u.RawQuery = q.Encode()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Add("Accept", "application/json;api-version=6.0-preview.1")
res, err := c.http.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return nil, err
}
dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024))
var ce Entry
if err = dec.Decode(&ce); err != nil {
return nil, err
}
ce.http = c.http
return &ce, nil
}
func (c *Client) Save(ctx context.Context, key string, b Blob) error {
id, err := c.reserve(ctx, key)
if err != nil {
return err
}
err = c.upload(ctx, id, b)
if err != nil {
return err
}
return c.commit(ctx, id, b.Size())
}
type ReserveCacheReq struct {
Key string `json:"key"`
Version string `json:"version"`
}
type ReserveCacheRes struct {
CacheID int `json:"cacheID"`
}
func (c *Client) reserve(ctx context.Context, key string) (int, error) {
payload := ReserveCacheReq{Key: key, Version: c.version(key)}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(payload); err != nil {
return 0, err
}
url := c.url("caches")
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf)
if err != nil {
return 0, err
}
req.Header.Add("Content-Type", "application/json")
res, err := c.http.Do(req)
if err != nil {
return 0, err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return 0, err
}
dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024))
var cr ReserveCacheRes
if err = dec.Decode(&cr); err != nil {
return 0, err
}
if cr.CacheID == 0 {
return 0, fmt.Errorf("invalid response (cache id is 0)")
}
return cr.CacheID, nil
}
type CommitCacheReq struct {
Size int64 `json:"size"`
}
func (c *Client) commit(ctx context.Context, id int, size int64) error {
payload := CommitCacheReq{Size: size}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(payload); err != nil {
return err
}
url := c.url(fmt.Sprintf("caches/%d", id))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
res, err := c.http.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return err
}
return nil
}
func (c *Client) upload(ctx context.Context, id int, b Blob) error {
var mu sync.Mutex
grp, ctx := errgroup.WithContext(ctx)
offset := int64(0)
for i := 0; i < UploadConcurrency; i++ {
grp.Go(func() error {
for {
mu.Lock()
start := offset
if start >= b.Size() {
mu.Unlock()
return nil
}
end := start + int64(UploadChunkSize)
if end > b.Size() {
end = b.Size()
}
offset = end
mu.Unlock()
if err := c.create(ctx, id, b, start, end-start); err != nil {
return err
}
}
})
}
return grp.Wait()
}
func (c *Client) create(ctx context.Context, id int, ra io.ReaderAt, off, n int64) error {
url := c.url(fmt.Sprintf("caches/%d", id))
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, url, io.NewSectionReader(ra, off, n))
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/octet-stream")
req.Header.Add("Content-Range", fmt.Sprintf("bytes %d-%d/*", off, off+n-1))
res, err := c.http.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
err = checkApiError(res)
if err != nil {
return err
}
return nil
}
type Entry struct {
Key string `json:"cacheKey"`
Scope string `json:"scope"`
URL string `json:"archiveLocation"`
http *http.Client
}
// Download returns a ReaderAtCloser for pulling the data. Concurrent reads are not allowed
func (ce *Entry) Download(ctx context.Context) ReaderAtCloser {
return NewReaderAtCloser(func(offset int64) (io.ReadCloser, error) {
req, err := http.NewRequestWithContext(ctx, "GET", ce.URL, nil)
if err != nil {
return nil, err
}
if offset != 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
}
client := ce.http
if client == nil {
client = http.DefaultClient
}
res, err := client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode < 200 || res.StatusCode >= 300 {
if res.StatusCode == http.StatusRequestedRangeNotSatisfiable {
return nil, fmt.Errorf("invalid status response %v for %s, range: %v", res.Status, ce.URL, req.Header.Get("Range"))
}
return nil, fmt.Errorf("invalid status response %v for %s", res.Status, ce.URL)
}
if offset != 0 {
cr := res.Header.Get("content-range")
if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
res.Body.Close()
return nil, fmt.Errorf("unhandled content range in response: %v", cr)
}
}
return res.Body, nil
})
}
func (ce *Entry) WriteTo(ctx context.Context, w io.Writer) error {
rac := ce.Download(ctx)
if _, err := io.Copy(w, &rc{ReaderAt: rac}); err != nil {
return err
}
return rac.Close()
}

89
cache/reader.go vendored
View file

@ -1,89 +0,0 @@
package cache
import (
"io"
)
type ReaderAtCloser interface {
io.ReaderAt
io.Closer
}
type readerAtCloser struct {
offset int64
rc io.ReadCloser
ra io.ReaderAt
open func(offset int64) (io.ReadCloser, error)
closed bool
}
func NewReaderAtCloser(open func(offset int64) (io.ReadCloser, error)) ReaderAtCloser {
return &readerAtCloser{
open: open,
}
}
func (hrs *readerAtCloser) ReadAt(p []byte, off int64) (n int, err error) {
if hrs.closed {
return 0, io.EOF
}
if hrs.ra != nil {
return hrs.ra.ReadAt(p, off)
}
if hrs.rc == nil || off != hrs.offset {
if hrs.rc != nil {
hrs.rc.Close()
hrs.rc = nil
}
rc, err := hrs.open(off)
if err != nil {
return 0, err
}
hrs.rc = rc
}
if ra, ok := hrs.rc.(io.ReaderAt); ok {
hrs.ra = ra
n, err = ra.ReadAt(p, off)
} else {
for {
var nn int
nn, err = hrs.rc.Read(p)
n += nn
p = p[nn:]
if nn == len(p) || err != nil {
break
}
}
}
hrs.offset += int64(n)
return
}
func (hrs *readerAtCloser) Close() error {
if hrs.closed {
return nil
}
hrs.closed = true
if hrs.rc != nil {
return hrs.rc.Close()
}
return nil
}
type rc struct {
io.ReaderAt
offset int
}
func (r *rc) Read(b []byte) (int, error) {
n, err := r.ReadAt(b, int64(r.offset))
r.offset += n
if n > 0 && err == io.EOF {
err = nil
}
return n, err
}

42
cache/retry.go vendored
View file

@ -1,42 +0,0 @@
package cache
import (
"bytes"
"fmt"
"io"
"net/http"
)
type retry struct {
transport http.RoundTripper
retry int
}
func (t *retry) RoundTrip(req *http.Request) (*http.Response, error) {
var body []byte
if req.Body != nil {
body, _ = io.ReadAll(req.Body)
}
for count := 0; count < t.retry; count++ {
req.Body = io.NopCloser(bytes.NewBuffer(body))
res, err := t.transport.RoundTrip(req)
if err != nil {
return nil, err
}
if t.check(res) {
if res.Body != nil {
io.Copy(io.Discard, res.Body)
res.Body.Close()
}
continue
}
return res, err
}
return nil, fmt.Errorf("too many retries")
}
func (t *retry) check(res *http.Response) bool {
return res.StatusCode > 399
}

115
cache/tar.go vendored
View file

@ -1,115 +0,0 @@
package cache
import (
"archive/tar"
"compress/gzip"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
// Tar takes a source and variable writers and walks 'source' writing each file
// found to the tar writer; the purpose for accepting multiple writers is to allow
// for multiple outputs (for example a file, or md5 hash)
func Tar(src string, writers ...io.Writer) error {
if _, err := os.Stat(src); err != nil {
return fmt.Errorf("unable to tar files - %v", err.Error())
}
mw := io.MultiWriter(writers...)
gzw := gzip.NewWriter(mw)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
// walk path
return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.Mode().IsRegular() {
return nil
}
header, err := tar.FileInfoHeader(fi, fi.Name())
if err != nil {
return err
}
// update the name to correctly reflect the desired destination when untaring
header.Name = strings.TrimPrefix(strings.Replace(file, src, "", -1), string(filepath.Separator))
if err := tw.WriteHeader(header); err != nil {
return err
}
f, err := os.Open(file)
if err != nil {
return err
}
if _, err := io.Copy(tw, f); err != nil {
f.Close()
return err
}
f.Close()
return nil
})
}
// Untar takes a destination path and a reader; a tar reader loops over the tarfile
// creating the file structure at 'dst' along the way, and writing any files
func Untar(dst string, r io.Reader) error {
gzr, err := gzip.NewReader(r)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) || header == nil {
break
}
if err != nil {
return err
}
target := filepath.Join(dst, header.Name)
switch header.Typeflag {
// if its a dir and it doesn't exist create it
case tar.TypeDir:
if _, err := os.Stat(target); err != nil {
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
}
// if it's a file create it
case tar.TypeReg:
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
return err
}
if _, err := io.Copy(f, tr); err != nil {
return err
}
f.Close()
}
}
return nil
}

32
client.go Normal file
View file

@ -0,0 +1,32 @@
package sdk
import (
"fmt"
"net/http"
"net/url"
)
func (a *Action) Client() *Client {
c := &Client{Client: &http.Client{}}
c.base = a.env("GITHUB_API_URL")
c.token = fmt.Sprintf("Bearer %s", a.env("GITHUB_TOKEN"))
return c
}
type Client struct {
*http.Client
base string
token string
}
func (c *Client) Do(req *http.Request) (*http.Response, error) {
req.Header.Set("Authorization", c.token)
if !req.URL.IsAbs() {
u, err := url.Parse(fmt.Sprintf("%s%s", c.base, req.URL))
if err != nil {
return nil, err
}
req.URL = u
}
return c.Client.Do(req)
}

View file

@ -1,29 +0,0 @@
package main
import (
"context"
"os"
"git.geekeey.de/actions/sdk"
"git.geekeey.de/actions/sdk/cache"
)
func main() {
a := sdk.New()
a.AddMask("hello")
a.WithFieldsSlice("foo=bar", "biz=baz").Debugf("hello world")
blob, err := a.Cache().Load(context.Background(), "example")
if err != nil {
panic(err)
}
cache.Tar("./foo")
f, err := os.Open("")
if err != nil {
panic(err)
}
a.Cache().Save(context.Background(), "", cache.NewFileBlob(f))
entry := blob.Download(context.Background())
if entry == nil {
return
}
}

2
go.mod
View file

@ -1,5 +1,3 @@
module git.geekeey.de/actions/sdk module git.geekeey.de/actions/sdk
go 1.22.5 go 1.22.5
require golang.org/x/sync v0.7.0 // indirect

2
go.sum
View file

@ -1,2 +0,0 @@
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=