diff --git a/cache.go b/cache.go new file mode 100644 index 0000000..f3a17da --- /dev/null +++ b/cache.go @@ -0,0 +1,9 @@ +package sdk + +import "git.geekeey.de/actions/sdk/cache" + +func (c *Action) Cache() *cache.Client { + token := c.env("ACTIONS_RUNTIME_TOKEN") + url := c.env("ACTIONS_CACHE_URL") + return cache.New(token, url) +} diff --git a/cache/blob.go b/cache/blob.go new file mode 100644 index 0000000..d2cbca3 --- /dev/null +++ b/cache/blob.go @@ -0,0 +1,56 @@ +package cache + +import ( + "bytes" + "io" + "os" +) + +type Blob interface { + io.ReaderAt + io.Closer + Size() int64 +} + +type byteBlob struct { + buf *bytes.Reader +} + +func NewByteBlob(b []byte) Blob { + return &byteBlob{buf: bytes.NewReader(b)} +} + +func (blob *byteBlob) ReadAt(p []byte, off int64) (n int, err error) { + return blob.buf.ReadAt(p, off) +} + +func (blob *byteBlob) Size() int64 { + return blob.buf.Size() +} + +func (blob *byteBlob) Close() error { + return nil +} + +type fileBlob struct { + buf *os.File +} + +func NewFileBlob(f *os.File) Blob { + return &fileBlob{buf: f} +} + +func (blob *fileBlob) ReadAt(p []byte, off int64) (n int, err error) { + return blob.buf.ReadAt(p, off) +} + +func (blob *fileBlob) Size() int64 { + if i, err := blob.buf.Stat(); err != nil { + return i.Size() + } + return 0 +} + +func (blob *fileBlob) Close() error { + return nil +} diff --git a/cache/cache.go b/cache/cache.go new file mode 100644 index 0000000..bc7ad6b --- /dev/null +++ b/cache/cache.go @@ -0,0 +1,329 @@ +package cache + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "sync" + + "golang.org/x/sync/errgroup" +) + +var UploadConcurrency = 4 +var UploadChunkSize = 32 * 1024 * 1024 + +type Client struct { + base string + http *http.Client +} + +type auth struct { + transport http.RoundTripper + token string +} + +func (t *auth) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.token)) + return t.transport.RoundTrip(req) +} + +func New(token, url string) *Client { + t := &auth{transport: &retry{transport: &http.Transport{}}, token: token} + return &Client{ + base: url, + http: &http.Client{Transport: t}, + } +} + +func (c *Client) url(p string) string { + return path.Join(c.base, "_apis/artifactcache", p) +} + +func (c *Client) version(k string) string { + h := sha256.New() + h.Write([]byte("|go-actionscache-1.0")) + return hex.EncodeToString(h.Sum(nil)) +} + +type ApiError struct { + Message string `json:"message"` + TypeName string `json:"typeName"` + TypeKey string `json:"typeKey"` + ErrorCode int `json:"errorCode"` +} + +func (e ApiError) Error() string { + return e.Message +} + +func (e ApiError) Is(err error) bool { + if err == os.ErrExist { + if strings.Contains(e.TypeKey, "AlreadyExists") { + return true + } + } + return false +} + +func checkApiError(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode < 300 { + return nil + } + dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024)) + + var details ApiError + if err := dec.Decode(&details); err != nil { + return err + } + + if details.Message != "" { + return details + } else { + return fmt.Errorf("unknown error %s", res.Status) + } +} + +func (c *Client) Load(ctx context.Context, keys ...string) (*Entry, error) { + u, err := url.Parse(c.url("cache")) + if err != nil { + return nil, err + } + q := u.Query() + q.Set("keys", strings.Join(keys, ",")) + q.Set("version", c.version(keys[0])) + u.RawQuery = q.Encode() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Add("Accept", "application/json;api-version=6.0-preview.1") + + res, err := c.http.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + err = checkApiError(res) + if err != nil { + return nil, err + } + + dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024)) + + var ce Entry + if err = dec.Decode(&ce); err != nil { + return nil, err + } + + ce.http = c.http + return &ce, nil +} + +func (c *Client) Save(ctx context.Context, key string, b Blob) error { + id, err := c.reserve(ctx, key) + if err != nil { + return err + } + err = c.upload(ctx, id, b) + if err != nil { + return err + } + return c.commit(ctx, id, b.Size()) +} + +type ReserveCacheReq struct { + Key string `json:"key"` + Version string `json:"version"` +} + +type ReserveCacheRes struct { + CacheID int `json:"cacheID"` +} + +func (c *Client) reserve(ctx context.Context, key string) (int, error) { + payload := ReserveCacheReq{Key: key, Version: c.version(key)} + + buf := new(bytes.Buffer) + if err := json.NewEncoder(buf).Encode(payload); err != nil { + return 0, err + } + + url := c.url("caches") + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf) + if err != nil { + return 0, err + } + req.Header.Add("Content-Type", "application/json") + + res, err := c.http.Do(req) + if err != nil { + return 0, err + } + defer res.Body.Close() + + err = checkApiError(res) + if err != nil { + return 0, err + } + + dec := json.NewDecoder(io.LimitReader(res.Body, 32*1024)) + + var cr ReserveCacheRes + if err = dec.Decode(&cr); err != nil { + return 0, err + } + + if cr.CacheID == 0 { + return 0, fmt.Errorf("invalid response (cache id is 0)") + } + return cr.CacheID, nil +} + +type CommitCacheReq struct { + Size int64 `json:"size"` +} + +func (c *Client) commit(ctx context.Context, id int, size int64) error { + payload := CommitCacheReq{Size: size} + + buf := new(bytes.Buffer) + if err := json.NewEncoder(buf).Encode(payload); err != nil { + return err + } + + url := c.url(fmt.Sprintf("caches/%d", id)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf) + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/json") + + res, err := c.http.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + + err = checkApiError(res) + if err != nil { + return err + } + + return nil +} + +func (c *Client) upload(ctx context.Context, id int, b Blob) error { + var mu sync.Mutex + grp, ctx := errgroup.WithContext(ctx) + offset := int64(0) + for i := 0; i < UploadConcurrency; i++ { + grp.Go(func() error { + for { + mu.Lock() + start := offset + if start >= b.Size() { + mu.Unlock() + return nil + } + end := start + int64(UploadChunkSize) + if end > b.Size() { + end = b.Size() + } + offset = end + mu.Unlock() + + if err := c.create(ctx, id, b, start, end-start); err != nil { + return err + } + } + }) + } + return grp.Wait() +} + +func (c *Client) create(ctx context.Context, id int, ra io.ReaderAt, off, n int64) error { + url := c.url(fmt.Sprintf("caches/%d", id)) + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, url, io.NewSectionReader(ra, off, n)) + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.Header.Add("Content-Range", fmt.Sprintf("bytes %d-%d/*", off, off+n-1)) + + res, err := c.http.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + + err = checkApiError(res) + if err != nil { + return err + } + + return nil +} + +type Entry struct { + Key string `json:"cacheKey"` + Scope string `json:"scope"` + URL string `json:"archiveLocation"` + + http *http.Client +} + +// Download returns a ReaderAtCloser for pulling the data. Concurrent reads are not allowed +func (ce *Entry) Download(ctx context.Context) ReaderAtCloser { + return NewReaderAtCloser(func(offset int64) (io.ReadCloser, error) { + req, err := http.NewRequestWithContext(ctx, "GET", ce.URL, nil) + if err != nil { + return nil, err + } + if offset != 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + client := ce.http + if client == nil { + client = http.DefaultClient + } + + res, err := client.Do(req) + if err != nil { + return nil, err + } + + if res.StatusCode < 200 || res.StatusCode >= 300 { + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return nil, fmt.Errorf("invalid status response %v for %s, range: %v", res.Status, ce.URL, req.Header.Get("Range")) + } + return nil, fmt.Errorf("invalid status response %v for %s", res.Status, ce.URL) + } + if offset != 0 { + cr := res.Header.Get("content-range") + if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { + res.Body.Close() + return nil, fmt.Errorf("unhandled content range in response: %v", cr) + } + } + return res.Body, nil + }) +} + +func (ce *Entry) WriteTo(ctx context.Context, w io.Writer) error { + rac := ce.Download(ctx) + if _, err := io.Copy(w, &rc{ReaderAt: rac}); err != nil { + return err + } + return rac.Close() +} diff --git a/cache/reader.go b/cache/reader.go new file mode 100644 index 0000000..9225a76 --- /dev/null +++ b/cache/reader.go @@ -0,0 +1,89 @@ +package cache + +import ( + "io" +) + +type ReaderAtCloser interface { + io.ReaderAt + io.Closer +} + +type readerAtCloser struct { + offset int64 + rc io.ReadCloser + ra io.ReaderAt + open func(offset int64) (io.ReadCloser, error) + closed bool +} + +func NewReaderAtCloser(open func(offset int64) (io.ReadCloser, error)) ReaderAtCloser { + return &readerAtCloser{ + open: open, + } +} + +func (hrs *readerAtCloser) ReadAt(p []byte, off int64) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + if hrs.ra != nil { + return hrs.ra.ReadAt(p, off) + } + + if hrs.rc == nil || off != hrs.offset { + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } + rc, err := hrs.open(off) + if err != nil { + return 0, err + } + hrs.rc = rc + } + if ra, ok := hrs.rc.(io.ReaderAt); ok { + hrs.ra = ra + n, err = ra.ReadAt(p, off) + } else { + for { + var nn int + nn, err = hrs.rc.Read(p) + n += nn + p = p[nn:] + if nn == len(p) || err != nil { + break + } + } + } + + hrs.offset += int64(n) + return +} + +func (hrs *readerAtCloser) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} + +type rc struct { + io.ReaderAt + offset int +} + +func (r *rc) Read(b []byte) (int, error) { + n, err := r.ReadAt(b, int64(r.offset)) + r.offset += n + if n > 0 && err == io.EOF { + err = nil + } + return n, err +} diff --git a/cache/retry.go b/cache/retry.go new file mode 100644 index 0000000..ce93305 --- /dev/null +++ b/cache/retry.go @@ -0,0 +1,42 @@ +package cache + +import ( + "bytes" + "fmt" + "io" + "net/http" +) + +type retry struct { + transport http.RoundTripper + retry int +} + +func (t *retry) RoundTrip(req *http.Request) (*http.Response, error) { + var body []byte + if req.Body != nil { + body, _ = io.ReadAll(req.Body) + } + + for count := 0; count < t.retry; count++ { + req.Body = io.NopCloser(bytes.NewBuffer(body)) + res, err := t.transport.RoundTrip(req) + if err != nil { + return nil, err + } + if t.check(res) { + if res.Body != nil { + io.Copy(io.Discard, res.Body) + res.Body.Close() + } + continue + } + return res, err + } + + return nil, fmt.Errorf("too many retries") +} + +func (t *retry) check(res *http.Response) bool { + return res.StatusCode > 399 +} diff --git a/cache/tar.go b/cache/tar.go new file mode 100644 index 0000000..8899812 --- /dev/null +++ b/cache/tar.go @@ -0,0 +1,115 @@ +package cache + +import ( + "archive/tar" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +// Tar takes a source and variable writers and walks 'source' writing each file +// found to the tar writer; the purpose for accepting multiple writers is to allow +// for multiple outputs (for example a file, or md5 hash) +func Tar(src string, writers ...io.Writer) error { + if _, err := os.Stat(src); err != nil { + return fmt.Errorf("unable to tar files - %v", err.Error()) + } + + mw := io.MultiWriter(writers...) + + gzw := gzip.NewWriter(mw) + defer gzw.Close() + + tw := tar.NewWriter(gzw) + defer tw.Close() + + // walk path + return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if !fi.Mode().IsRegular() { + return nil + } + + header, err := tar.FileInfoHeader(fi, fi.Name()) + if err != nil { + return err + } + + // update the name to correctly reflect the desired destination when untaring + header.Name = strings.TrimPrefix(strings.Replace(file, src, "", -1), string(filepath.Separator)) + + if err := tw.WriteHeader(header); err != nil { + return err + } + + f, err := os.Open(file) + if err != nil { + return err + } + + if _, err := io.Copy(tw, f); err != nil { + f.Close() + return err + } + + f.Close() + + return nil + }) +} + +// Untar takes a destination path and a reader; a tar reader loops over the tarfile +// creating the file structure at 'dst' along the way, and writing any files +func Untar(dst string, r io.Reader) error { + + gzr, err := gzip.NewReader(r) + if err != nil { + return err + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + + for { + header, err := tr.Next() + + if errors.Is(err, io.EOF) || header == nil { + break + } + if err != nil { + return err + } + + target := filepath.Join(dst, header.Name) + + switch header.Typeflag { + + // if its a dir and it doesn't exist create it + case tar.TypeDir: + if _, err := os.Stat(target); err != nil { + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + } + + // if it's a file create it + case tar.TypeReg: + f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + if _, err := io.Copy(f, tr); err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/client.go b/client.go deleted file mode 100644 index 9898adb..0000000 --- a/client.go +++ /dev/null @@ -1,33 +0,0 @@ -package sdk - -import ( - "fmt" - "net/http" - "net/url" -) - -func (a *Action) Client() *Client { - c := &Client{Client: &http.Client{}} - context := a.Context() - c.base = context.APIURL - c.token = fmt.Sprintf("Bearer %s", context.Token) - return c -} - -type Client struct { - *http.Client - base string - token string -} - -func (c *Client) Do(req *http.Request) (*http.Response, error) { - req.Header.Set("Authorization", c.token) - if !req.URL.IsAbs() { - u, err := url.Parse(fmt.Sprintf("%s%s", c.base, req.URL)) - if err != nil { - return nil, err - } - req.URL = u - } - return c.Client.Do(req) -} diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000..69ceabc --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "context" + "os" + + "git.geekeey.de/actions/sdk" + "git.geekeey.de/actions/sdk/cache" +) + +func main() { + a := sdk.New() + a.AddMask("hello") + a.WithFieldsSlice("foo=bar", "biz=baz").Debugf("hello world") + blob, err := a.Cache().Load(context.Background(), "example") + if err != nil { + panic(err) + } + cache.Tar("./foo") + f, err := os.Open("") + if err != nil { + panic(err) + } + a.Cache().Save(context.Background(), "", cache.NewFileBlob(f)) + entry := blob.Download(context.Background()) + if entry == nil { + return + } +} diff --git a/context.go b/context.go index 838c088..76477d2 100644 --- a/context.go +++ b/context.go @@ -2,8 +2,10 @@ package sdk import ( "encoding/json" + "errors" "fmt" "os" + "strconv" ) // GitHubContext of current workflow. @@ -12,7 +14,7 @@ type GitHubContext struct { Action string `env:"GITHUB_ACTION"` ActionPath string `env:"GITHUB_ACTION_PATH"` ActionRepository string `env:"GITHUB_ACTION_REPOSITORY"` - Actions string `env:"GITHUB_ACTIONS"` + Actions bool `env:"GITHUB_ACTIONS"` Actor string `env:"GITHUB_ACTOR"` APIURL string `env:"GITHUB_API_URL,default=https://api.github.com"` BaseRef string `env:"GITHUB_BASE_REF"` @@ -25,36 +27,34 @@ type GitHubContext struct { Path string `env:"GITHUB_PATH"` Ref string `env:"GITHUB_REF"` RefName string `env:"GITHUB_REF_NAME"` - RefProtected string `env:"GITHUB_REF_PROTECTED"` + RefProtected bool `env:"GITHUB_REF_PROTECTED"` RefType string `env:"GITHUB_REF_TYPE"` Repository string `env:"GITHUB_REPOSITORY"` RepositoryOwner string `env:"GITHUB_REPOSITORY_OWNER"` - RetentionDays string `env:"GITHUB_RETENTION_DAYS"` - RunAttempt string `env:"GITHUB_RUN_ATTEMPT"` - RunID string `env:"GITHUB_RUN_ID"` - RunNumber string `env:"GITHUB_RUN_NUMBER"` + RetentionDays int64 `env:"GITHUB_RETENTION_DAYS"` + RunAttempt int64 `env:"GITHUB_RUN_ATTEMPT"` + RunID int64 `env:"GITHUB_RUN_ID"` + RunNumber int64 `env:"GITHUB_RUN_NUMBER"` ServerURL string `env:"GITHUB_SERVER_URL,default=https://github.com"` SHA string `env:"GITHUB_SHA"` StepSummary string `env:"GITHUB_STEP_SUMMARY"` Workflow string `env:"GITHUB_WORKFLOW"` Workspace string `env:"GITHUB_WORKSPACE"` - Token string `env:"GITHUB_WORKSPACE"` - // Event is populated by parsing the file at EventPath, if it exists. - event map[string]any + Event map[string]any } // Context returns the context of current action with the payload object // that triggered the workflow -func (c *Action) Context() *GitHubContext { +func (c *Action) Context() (*GitHubContext, error) { + var merr error context := &GitHubContext{ APIURL: "https://api.github.com", GraphqlURL: "https://api.github.com/graphql", ServerURL: "https://github.com", - event: map[string]any{}, } if v := c.env("GITHUB_ACTION"); v != "" { @@ -66,8 +66,10 @@ func (c *Action) Context() *GitHubContext { if v := c.env("GITHUB_ACTION_REPOSITORY"); v != "" { context.ActionRepository = v } - if v := c.env("GITHUB_ACTIONS"); v != "" { + if v, err := parseBool(c.env("GITHUB_ACTIONS")); err == nil { context.Actions = v + } else { + merr = errors.Join(merr, err) } if v := c.env("GITHUB_ACTOR"); v != "" { context.Actor = v @@ -105,29 +107,41 @@ func (c *Action) Context() *GitHubContext { if v := c.env("GITHUB_REF_NAME"); v != "" { context.RefName = v } - if v := c.env("GITHUB_REF_PROTECTED"); v != "" { + if v, err := parseBool(c.env("GITHUB_REF_PROTECTED")); err == nil { context.RefProtected = v + } else { + merr = errors.Join(merr, err) } if v := c.env("GITHUB_REF_TYPE"); v != "" { context.RefType = v } + if v := c.env("GITHUB_REPOSITORY"); v != "" { context.Repository = v } if v := c.env("GITHUB_REPOSITORY_OWNER"); v != "" { context.RepositoryOwner = v } - if v := c.env("GITHUB_RETENTION_DAYS"); v != "" { + + if v, err := parseInt(c.env("GITHUB_RETENTION_DAYS")); err == nil { context.RetentionDays = v + } else { + merr = errors.Join(merr, err) } - if v := c.env("GITHUB_RUN_ATTEMPT"); v != "" { + if v, err := parseInt(c.env("GITHUB_RUN_ATTEMPT")); err == nil { context.RunAttempt = v + } else { + merr = errors.Join(merr, err) } - if v := c.env("GITHUB_RUN_ID"); v != "" { + if v, err := parseInt(c.env("GITHUB_RUN_ID")); err == nil { context.RunID = v + } else { + merr = errors.Join(merr, err) } - if v := c.env("GITHUB_RUN_NUMBER"); v != "" { + if v, err := parseInt(c.env("GITHUB_RUN_NUMBER")); err == nil { context.RunNumber = v + } else { + merr = errors.Join(merr, err) } if v := c.env("GITHUB_SERVER_URL"); v != "" { context.ServerURL = v @@ -144,24 +158,32 @@ func (c *Action) Context() *GitHubContext { if v := c.env("GITHUB_WORKSPACE"); v != "" { context.Workspace = v } - if v := c.env("GITHUB_TOKEN"); v != "" { - context.Token = v - } - return context -} - -func (c *GitHubContext) Event() (map[string]any, error) { - if c.EventPath != "" { - eventData, err := os.ReadFile(c.EventPath) + if context.EventPath != "" { + eventData, err := os.ReadFile(context.EventPath) if err != nil && !os.IsNotExist(err) { return nil, fmt.Errorf("could not read event file: %w", err) } if eventData != nil { - if err := json.Unmarshal(eventData, &c.event); err != nil { + if err := json.Unmarshal(eventData, &context.Event); err != nil { return nil, fmt.Errorf("failed to unmarshal event payload: %w", err) } } } - return c.event, nil + + return context, merr +} + +func parseBool(v string) (bool, error) { + if v == "" { + return false, nil + } + return strconv.ParseBool(v) +} + +func parseInt(v string) (int64, error) { + if v == "" { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) } diff --git a/context_test.go b/context_test.go index 89f3ab2..f261d42 100644 --- a/context_test.go +++ b/context_test.go @@ -37,7 +37,6 @@ func TestAction_Context(t *testing.T) { APIURL: "https://api.github.com", ServerURL: "https://github.com", GraphqlURL: "https://api.github.com/graphql", - event: map[string]any{}, }, }, { @@ -71,13 +70,12 @@ func TestAction_Context(t *testing.T) { "GITHUB_STEP_SUMMARY": "/path/to/summary", "GITHUB_WORKFLOW": "test", "GITHUB_WORKSPACE": "/path/to/workspace", - "GITHUB_TOKEN": "somerandomtoken", }, exp: &GitHubContext{ Action: "__repo-owner_name-of-action-repo", ActionPath: "/path/to/action", ActionRepository: "repo-owner/name-of-action-repo", - Actions: "true", + Actions: true, Actor: "sethvargo", APIURL: "https://foo.com", BaseRef: "main", @@ -90,21 +88,19 @@ func TestAction_Context(t *testing.T) { Path: "/path/to/path", Ref: "refs/tags/v1.0", RefName: "v1.0", - RefProtected: "true", + RefProtected: true, RefType: "tag", Repository: "sethvargo/baz", RepositoryOwner: "sethvargo", - RetentionDays: "90", - RunAttempt: "6", - RunID: "56", - RunNumber: "34", + RetentionDays: 90, + RunAttempt: 6, + RunID: 56, + RunNumber: 34, ServerURL: "https://bar.com", SHA: "abcd1234", StepSummary: "/path/to/summary", Workflow: "test", Workspace: "/path/to/workspace", - Token: "somerandomtoken", - event: map[string]any{}, }, }, { @@ -120,7 +116,7 @@ func TestAction_Context(t *testing.T) { ServerURL: "https://github.com", GraphqlURL: "https://api.github.com/graphql", - event: map[string]any{ + Event: map[string]any{ "foo": "bar", }, }, @@ -135,8 +131,7 @@ func TestAction_Context(t *testing.T) { a := New() a.env = func(s string) string { return tc.env[s] } - got := a.Context() - _, err := got.Event() + got, err := a.Context() if err != nil { t.Fatal(err) } diff --git a/go.mod b/go.mod index 5f045b8..1348d41 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,5 @@ module git.geekeey.de/actions/sdk go 1.22.5 + +require golang.org/x/sync v0.7.0 // indirect diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..e8ef4a3 --- /dev/null +++ b/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=