diff options
Diffstat (limited to 'pkg/model')
-rw-r--r-- | pkg/model/action.go | 112 | ||||
-rw-r--r-- | pkg/model/github_context.go | 213 | ||||
-rw-r--r-- | pkg/model/github_context_test.go | 212 | ||||
-rw-r--r-- | pkg/model/job_context.go | 12 | ||||
-rw-r--r-- | pkg/model/planner.go | 415 | ||||
-rw-r--r-- | pkg/model/planner_test.go | 63 | ||||
-rw-r--r-- | pkg/model/step_result.go | 45 | ||||
-rw-r--r-- | pkg/model/testdata/container-volumes/push.yml | 19 | ||||
-rw-r--r-- | pkg/model/testdata/empty-workflow/push.yml | 0 | ||||
-rw-r--r-- | pkg/model/testdata/invalid-job-name/invalid-1.yml | 12 | ||||
-rw-r--r-- | pkg/model/testdata/invalid-job-name/invalid-2.yml | 8 | ||||
-rw-r--r-- | pkg/model/testdata/invalid-job-name/valid-1.yml | 8 | ||||
-rw-r--r-- | pkg/model/testdata/invalid-job-name/valid-2.yml | 8 | ||||
-rw-r--r-- | pkg/model/testdata/nested/success.yml | 9 | ||||
-rw-r--r-- | pkg/model/testdata/nested/workflows/fail.yml | 0 | ||||
-rw-r--r-- | pkg/model/testdata/strategy/push.yml | 50 | ||||
-rw-r--r-- | pkg/model/workflow.go | 763 | ||||
-rw-r--r-- | pkg/model/workflow_test.go | 624 |
18 files changed, 2573 insertions, 0 deletions
diff --git a/pkg/model/action.go b/pkg/model/action.go new file mode 100644 index 0000000..2fc39db --- /dev/null +++ b/pkg/model/action.go @@ -0,0 +1,112 @@ +package model + +import ( + "fmt" + "io" + "strings" + + "gopkg.in/yaml.v3" +) + +// ActionRunsUsing is the type of runner for the action +type ActionRunsUsing string + +func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error { + var using string + if err := unmarshal(&using); err != nil { + return err + } + + // Force input to lowercase for case insensitive comparison + format := ActionRunsUsing(strings.ToLower(using)) + switch format { + case ActionRunsUsingNode20, ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite, ActionRunsUsingGo: + *a = format + default: + return fmt.Errorf(fmt.Sprintf("The runs.using key in action.yml must be one of: %v, got %s", []string{ + ActionRunsUsingComposite, + ActionRunsUsingDocker, + ActionRunsUsingNode12, + ActionRunsUsingNode16, + ActionRunsUsingNode20, + ActionRunsUsingGo, + }, format)) + } + return nil +} + +const ( + // ActionRunsUsingNode12 for running with node12 + ActionRunsUsingNode12 = "node12" + // ActionRunsUsingNode16 for running with node16 + ActionRunsUsingNode16 = "node16" + // ActionRunsUsingNode20 for running with node20 + ActionRunsUsingNode20 = "node20" + // ActionRunsUsingDocker for running with docker + ActionRunsUsingDocker = "docker" + // ActionRunsUsingComposite for running composite + ActionRunsUsingComposite = "composite" + // ActionRunsUsingGo for running with go + ActionRunsUsingGo = "go" +) + +// ActionRuns are a field in Action +type ActionRuns struct { + Using ActionRunsUsing `yaml:"using"` + Env map[string]string `yaml:"env"` + Main string `yaml:"main"` + Pre string `yaml:"pre"` + PreIf string `yaml:"pre-if"` + Post string `yaml:"post"` + PostIf string `yaml:"post-if"` + Image string `yaml:"image"` + Entrypoint string `yaml:"entrypoint"` + Args []string `yaml:"args"` + Steps []Step `yaml:"steps"` +} + +// Action describes a metadata file for GitHub actions. The metadata filename must be either action.yml or action.yaml. The data in the metadata file defines the inputs, outputs and main entrypoint for your action. +type Action struct { + Name string `yaml:"name"` + Author string `yaml:"author"` + Description string `yaml:"description"` + Inputs map[string]Input `yaml:"inputs"` + Outputs map[string]Output `yaml:"outputs"` + Runs ActionRuns `yaml:"runs"` + Branding struct { + Color string `yaml:"color"` + Icon string `yaml:"icon"` + } `yaml:"branding"` +} + +// Input parameters allow you to specify data that the action expects to use during runtime. GitHub stores input parameters as environment variables. Input ids with uppercase letters are converted to lowercase during runtime. We recommended using lowercase input ids. +type Input struct { + Description string `yaml:"description"` + Required bool `yaml:"required"` + Default string `yaml:"default"` +} + +// Output parameters allow you to declare data that an action sets. Actions that run later in a workflow can use the output data set in previously run actions. For example, if you had an action that performed the addition of two inputs (x + y = z), the action could output the sum (z) for other actions to use as an input. +type Output struct { + Description string `yaml:"description"` + Value string `yaml:"value"` +} + +// ReadAction reads an action from a reader +func ReadAction(in io.Reader) (*Action, error) { + a := new(Action) + err := yaml.NewDecoder(in).Decode(a) + if err != nil { + return nil, err + } + + // set defaults + if a.Runs.PreIf == "" { + a.Runs.PreIf = "always()" + } + if a.Runs.PostIf == "" { + a.Runs.PostIf = "always()" + } + + return a, nil +} diff --git a/pkg/model/github_context.go b/pkg/model/github_context.go new file mode 100644 index 0000000..71221a5 --- /dev/null +++ b/pkg/model/github_context.go @@ -0,0 +1,213 @@ +package model + +import ( + "context" + "fmt" + "strings" + + "github.com/nektos/act/pkg/common" + "github.com/nektos/act/pkg/common/git" +) + +type GithubContext struct { + Event map[string]interface{} `json:"event"` + EventPath string `json:"event_path"` + Workflow string `json:"workflow"` + RunID string `json:"run_id"` + RunNumber string `json:"run_number"` + Actor string `json:"actor"` + Repository string `json:"repository"` + EventName string `json:"event_name"` + Sha string `json:"sha"` + Ref string `json:"ref"` + RefName string `json:"ref_name"` + RefType string `json:"ref_type"` + HeadRef string `json:"head_ref"` + BaseRef string `json:"base_ref"` + Token string `json:"token"` + Workspace string `json:"workspace"` + Action string `json:"action"` + ActionPath string `json:"action_path"` + ActionRef string `json:"action_ref"` + ActionRepository string `json:"action_repository"` + Job string `json:"job"` + JobName string `json:"job_name"` + RepositoryOwner string `json:"repository_owner"` + RetentionDays string `json:"retention_days"` + RunnerPerflog string `json:"runner_perflog"` + RunnerTrackingID string `json:"runner_tracking_id"` + ServerURL string `json:"server_url"` + APIURL string `json:"api_url"` + GraphQLURL string `json:"graphql_url"` +} + +func asString(v interface{}) string { + if v == nil { + return "" + } else if s, ok := v.(string); ok { + return s + } + return "" +} + +func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{}) { + var ok bool + + if len(ks) == 0 { // degenerate input + return nil + } + if rval, ok = m[ks[0]]; !ok { + return nil + } else if len(ks) == 1 { // we've reached the final key + return rval + } else if m, ok = rval.(map[string]interface{}); !ok { + return nil + } else { // 1+ more keys + return nestedMapLookup(m, ks[1:]...) + } +} + +func withDefaultBranch(ctx context.Context, b string, event map[string]interface{}) map[string]interface{} { + repoI, ok := event["repository"] + if !ok { + repoI = make(map[string]interface{}) + } + + repo, ok := repoI.(map[string]interface{}) + if !ok { + common.Logger(ctx).Warnf("unable to set default branch to %v", b) + return event + } + + // if the branch is already there return with no changes + if _, ok = repo["default_branch"]; ok { + return event + } + + repo["default_branch"] = b + event["repository"] = repo + + return event +} + +var findGitRef = git.FindGitRef +var findGitRevision = git.FindGitRevision + +func (ghc *GithubContext) SetRef(ctx context.Context, defaultBranch string, repoPath string) { + logger := common.Logger(ctx) + + // https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows + // https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads + switch ghc.EventName { + case "pull_request_target": + ghc.Ref = fmt.Sprintf("refs/heads/%s", ghc.BaseRef) + case "pull_request", "pull_request_review", "pull_request_review_comment": + ghc.Ref = fmt.Sprintf("refs/pull/%.0f/merge", ghc.Event["number"]) + case "deployment", "deployment_status": + ghc.Ref = asString(nestedMapLookup(ghc.Event, "deployment", "ref")) + case "release": + ghc.Ref = fmt.Sprintf("refs/tags/%s", asString(nestedMapLookup(ghc.Event, "release", "tag_name"))) + case "push", "create", "workflow_dispatch": + ghc.Ref = asString(ghc.Event["ref"]) + default: + defaultBranch := asString(nestedMapLookup(ghc.Event, "repository", "default_branch")) + if defaultBranch != "" { + ghc.Ref = fmt.Sprintf("refs/heads/%s", defaultBranch) + } + } + + if ghc.Ref == "" { + ref, err := findGitRef(ctx, repoPath) + if err != nil { + logger.Warningf("unable to get git ref: %v", err) + } else { + logger.Debugf("using github ref: %s", ref) + ghc.Ref = ref + } + + // set the branch in the event data + if defaultBranch != "" { + ghc.Event = withDefaultBranch(ctx, defaultBranch, ghc.Event) + } else { + ghc.Event = withDefaultBranch(ctx, "master", ghc.Event) + } + + if ghc.Ref == "" { + ghc.Ref = fmt.Sprintf("refs/heads/%s", asString(nestedMapLookup(ghc.Event, "repository", "default_branch"))) + } + } +} + +func (ghc *GithubContext) SetSha(ctx context.Context, repoPath string) { + logger := common.Logger(ctx) + + // https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows + // https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads + switch ghc.EventName { + case "pull_request_target": + ghc.Sha = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "sha")) + case "deployment", "deployment_status": + ghc.Sha = asString(nestedMapLookup(ghc.Event, "deployment", "sha")) + case "push", "create", "workflow_dispatch": + if deleted, ok := ghc.Event["deleted"].(bool); ok && !deleted { + ghc.Sha = asString(ghc.Event["after"]) + } + } + + if ghc.Sha == "" { + _, sha, err := findGitRevision(ctx, repoPath) + if err != nil { + logger.Warningf("unable to get git revision: %v", err) + } else { + ghc.Sha = sha + } + } +} + +func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInstance string, remoteName string, repoPath string) { + if ghc.Repository == "" { + repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName) + if err != nil { + common.Logger(ctx).Warningf("unable to get git repo (githubInstance: %v; remoteName: %v, repoPath: %v): %v", githubInstance, remoteName, repoPath, err) + return + } + ghc.Repository = repo + } + ghc.RepositoryOwner = strings.Split(ghc.Repository, "/")[0] +} + +func (ghc *GithubContext) SetRefTypeAndName() { + var refType, refName string + + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + if strings.HasPrefix(ghc.Ref, "refs/tags/") { + refType = "tag" + refName = ghc.Ref[len("refs/tags/"):] + } else if strings.HasPrefix(ghc.Ref, "refs/heads/") { + refType = "branch" + refName = ghc.Ref[len("refs/heads/"):] + } else if strings.HasPrefix(ghc.Ref, "refs/pull/") { + refType = "" + refName = ghc.Ref[len("refs/pull/"):] + } + + if ghc.RefType == "" { + ghc.RefType = refType + } + + if ghc.RefName == "" { + ghc.RefName = refName + } +} + +func (ghc *GithubContext) SetBaseAndHeadRef() { + if ghc.EventName == "pull_request" || ghc.EventName == "pull_request_target" { + if ghc.BaseRef == "" { + ghc.BaseRef = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "ref")) + } + + if ghc.HeadRef == "" { + ghc.HeadRef = asString(nestedMapLookup(ghc.Event, "pull_request", "head", "ref")) + } + } +} diff --git a/pkg/model/github_context_test.go b/pkg/model/github_context_test.go new file mode 100644 index 0000000..ed08e23 --- /dev/null +++ b/pkg/model/github_context_test.go @@ -0,0 +1,212 @@ +package model + +import ( + "context" + "fmt" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestSetRef(t *testing.T) { + log.SetLevel(log.DebugLevel) + + oldFindGitRef := findGitRef + oldFindGitRevision := findGitRevision + defer func() { findGitRef = oldFindGitRef }() + defer func() { findGitRevision = oldFindGitRevision }() + + findGitRef = func(ctx context.Context, file string) (string, error) { + return "refs/heads/master", nil + } + + findGitRevision = func(ctx context.Context, file string) (string, string, error) { + return "", "1234fakesha", nil + } + + tables := []struct { + eventName string + event map[string]interface{} + ref string + refName string + }{ + { + eventName: "pull_request_target", + event: map[string]interface{}{}, + ref: "refs/heads/master", + refName: "master", + }, + { + eventName: "pull_request", + event: map[string]interface{}{ + "number": 1234., + }, + ref: "refs/pull/1234/merge", + refName: "1234/merge", + }, + { + eventName: "deployment", + event: map[string]interface{}{ + "deployment": map[string]interface{}{ + "ref": "refs/heads/somebranch", + }, + }, + ref: "refs/heads/somebranch", + refName: "somebranch", + }, + { + eventName: "release", + event: map[string]interface{}{ + "release": map[string]interface{}{ + "tag_name": "v1.0.0", + }, + }, + ref: "refs/tags/v1.0.0", + refName: "v1.0.0", + }, + { + eventName: "push", + event: map[string]interface{}{ + "ref": "refs/heads/somebranch", + }, + ref: "refs/heads/somebranch", + refName: "somebranch", + }, + { + eventName: "unknown", + event: map[string]interface{}{ + "repository": map[string]interface{}{ + "default_branch": "main", + }, + }, + ref: "refs/heads/main", + refName: "main", + }, + { + eventName: "no-event", + event: map[string]interface{}{}, + ref: "refs/heads/master", + refName: "master", + }, + } + + for _, table := range tables { + t.Run(table.eventName, func(t *testing.T) { + ghc := &GithubContext{ + EventName: table.eventName, + BaseRef: "master", + Event: table.event, + } + + ghc.SetRef(context.Background(), "main", "/some/dir") + ghc.SetRefTypeAndName() + + assert.Equal(t, table.ref, ghc.Ref) + assert.Equal(t, table.refName, ghc.RefName) + }) + } + + t.Run("no-default-branch", func(t *testing.T) { + findGitRef = func(ctx context.Context, file string) (string, error) { + return "", fmt.Errorf("no default branch") + } + + ghc := &GithubContext{ + EventName: "no-default-branch", + Event: map[string]interface{}{}, + } + + ghc.SetRef(context.Background(), "", "/some/dir") + + assert.Equal(t, "refs/heads/master", ghc.Ref) + }) +} + +func TestSetSha(t *testing.T) { + log.SetLevel(log.DebugLevel) + + oldFindGitRef := findGitRef + oldFindGitRevision := findGitRevision + defer func() { findGitRef = oldFindGitRef }() + defer func() { findGitRevision = oldFindGitRevision }() + + findGitRef = func(ctx context.Context, file string) (string, error) { + return "refs/heads/master", nil + } + + findGitRevision = func(ctx context.Context, file string) (string, string, error) { + return "", "1234fakesha", nil + } + + tables := []struct { + eventName string + event map[string]interface{} + sha string + }{ + { + eventName: "pull_request_target", + event: map[string]interface{}{ + "pull_request": map[string]interface{}{ + "base": map[string]interface{}{ + "sha": "pr-base-sha", + }, + }, + }, + sha: "pr-base-sha", + }, + { + eventName: "pull_request", + event: map[string]interface{}{ + "number": 1234., + }, + sha: "1234fakesha", + }, + { + eventName: "deployment", + event: map[string]interface{}{ + "deployment": map[string]interface{}{ + "sha": "deployment-sha", + }, + }, + sha: "deployment-sha", + }, + { + eventName: "release", + event: map[string]interface{}{}, + sha: "1234fakesha", + }, + { + eventName: "push", + event: map[string]interface{}{ + "after": "push-sha", + "deleted": false, + }, + sha: "push-sha", + }, + { + eventName: "unknown", + event: map[string]interface{}{}, + sha: "1234fakesha", + }, + { + eventName: "no-event", + event: map[string]interface{}{}, + sha: "1234fakesha", + }, + } + + for _, table := range tables { + t.Run(table.eventName, func(t *testing.T) { + ghc := &GithubContext{ + EventName: table.eventName, + BaseRef: "master", + Event: table.event, + } + + ghc.SetSha(context.Background(), "/some/dir") + + assert.Equal(t, table.sha, ghc.Sha) + }) + } +} diff --git a/pkg/model/job_context.go b/pkg/model/job_context.go new file mode 100644 index 0000000..1d27e49 --- /dev/null +++ b/pkg/model/job_context.go @@ -0,0 +1,12 @@ +package model + +type JobContext struct { + Status string `json:"status"` + Container struct { + ID string `json:"id"` + Network string `json:"network"` + } `json:"container"` + Services map[string]struct { + ID string `json:"id"` + } `json:"services"` +} diff --git a/pkg/model/planner.go b/pkg/model/planner.go new file mode 100644 index 0000000..2d68145 --- /dev/null +++ b/pkg/model/planner.go @@ -0,0 +1,415 @@ +package model + +import ( + "fmt" + "io" + "io/fs" + "math" + "os" + "path/filepath" + "regexp" + "sort" + + log "github.com/sirupsen/logrus" +) + +// WorkflowPlanner contains methods for creating plans +type WorkflowPlanner interface { + PlanEvent(eventName string) (*Plan, error) + PlanJob(jobName string) (*Plan, error) + PlanAll() (*Plan, error) + GetEvents() []string +} + +// Plan contains a list of stages to run in series +type Plan struct { + Stages []*Stage +} + +// Stage contains a list of runs to execute in parallel +type Stage struct { + Runs []*Run +} + +// Run represents a job from a workflow that needs to be run +type Run struct { + Workflow *Workflow + JobID string +} + +func (r *Run) String() string { + jobName := r.Job().Name + if jobName == "" { + jobName = r.JobID + } + return jobName +} + +// Job returns the job for this Run +func (r *Run) Job() *Job { + return r.Workflow.GetJob(r.JobID) +} + +type WorkflowFiles struct { + workflowDirEntry os.DirEntry + dirPath string +} + +// NewWorkflowPlanner will load a specific workflow, all workflows from a directory or all workflows from a directory and its subdirectories +// +//nolint:gocyclo +func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, error) { + path, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + var workflows []WorkflowFiles + + if fi.IsDir() { + log.Debugf("Loading workflows from '%s'", path) + if noWorkflowRecurse { + files, err := os.ReadDir(path) + if err != nil { + return nil, err + } + + for _, v := range files { + workflows = append(workflows, WorkflowFiles{ + dirPath: path, + workflowDirEntry: v, + }) + } + } else { + log.Debug("Loading workflows recursively") + if err := filepath.Walk(path, + func(p string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + if !f.IsDir() { + log.Debugf("Found workflow '%s' in '%s'", f.Name(), p) + workflows = append(workflows, WorkflowFiles{ + dirPath: filepath.Dir(p), + workflowDirEntry: fs.FileInfoToDirEntry(f), + }) + } + + return nil + }); err != nil { + return nil, err + } + } + } else { + log.Debugf("Loading workflow '%s'", path) + dirname := filepath.Dir(path) + + workflows = append(workflows, WorkflowFiles{ + dirPath: dirname, + workflowDirEntry: fs.FileInfoToDirEntry(fi), + }) + } + if err != nil { + return nil, err + } + + wp := new(workflowPlanner) + for _, wf := range workflows { + ext := filepath.Ext(wf.workflowDirEntry.Name()) + if ext == ".yml" || ext == ".yaml" { + f, err := os.Open(filepath.Join(wf.dirPath, wf.workflowDirEntry.Name())) + if err != nil { + return nil, err + } + + log.Debugf("Reading workflow '%s'", f.Name()) + workflow, err := ReadWorkflow(f) + if err != nil { + _ = f.Close() + if err == io.EOF { + return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", wf.workflowDirEntry.Name(), err) + } + return nil, fmt.Errorf("workflow is not valid. '%s': %w", wf.workflowDirEntry.Name(), err) + } + _, err = f.Seek(0, 0) + if err != nil { + _ = f.Close() + return nil, fmt.Errorf("error occurring when resetting io pointer in '%s': %w", wf.workflowDirEntry.Name(), err) + } + + workflow.File = wf.workflowDirEntry.Name() + if workflow.Name == "" { + workflow.Name = wf.workflowDirEntry.Name() + } + + err = validateJobName(workflow) + if err != nil { + _ = f.Close() + return nil, err + } + + wp.workflows = append(wp.workflows, workflow) + _ = f.Close() + } + } + + return wp, nil +} + +// CombineWorkflowPlanner combines workflows to a WorkflowPlanner +func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner { + return &workflowPlanner{ + workflows: workflows, + } +} + +func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) { + wp := new(workflowPlanner) + + log.Debugf("Reading workflow %s", name) + workflow, err := ReadWorkflow(f) + if err != nil { + if err == io.EOF { + return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err) + } + return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err) + } + workflow.File = name + if workflow.Name == "" { + workflow.Name = name + } + + err = validateJobName(workflow) + if err != nil { + return nil, err + } + + wp.workflows = append(wp.workflows, workflow) + + return wp, nil +} + +func validateJobName(workflow *Workflow) error { + jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`) + for k := range workflow.Jobs { + if ok := jobNameRegex.MatchString(k); !ok { + return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k) + } + } + return nil +} + +type workflowPlanner struct { + workflows []*Workflow +} + +// PlanEvent builds a new list of runs to execute in parallel for an event name +func (wp *workflowPlanner) PlanEvent(eventName string) (*Plan, error) { + plan := new(Plan) + if len(wp.workflows) == 0 { + log.Debug("no workflows found by planner") + return plan, nil + } + var lastErr error + + for _, w := range wp.workflows { + events := w.On() + if len(events) == 0 { + log.Debugf("no events found for workflow: %s", w.File) + continue + } + + for _, e := range events { + if e == eventName { + stages, err := createStages(w, w.GetJobIDs()...) + if err != nil { + log.Warn(err) + lastErr = err + } else { + plan.mergeStages(stages) + } + } + } + } + return plan, lastErr +} + +// PlanJob builds a new run to execute in parallel for a job name +func (wp *workflowPlanner) PlanJob(jobName string) (*Plan, error) { + plan := new(Plan) + if len(wp.workflows) == 0 { + log.Debugf("no jobs found for workflow: %s", jobName) + } + var lastErr error + + for _, w := range wp.workflows { + stages, err := createStages(w, jobName) + if err != nil { + log.Warn(err) + lastErr = err + } else { + plan.mergeStages(stages) + } + } + return plan, lastErr +} + +// PlanAll builds a new run to execute in parallel all +func (wp *workflowPlanner) PlanAll() (*Plan, error) { + plan := new(Plan) + if len(wp.workflows) == 0 { + log.Debug("no workflows found by planner") + return plan, nil + } + var lastErr error + + for _, w := range wp.workflows { + stages, err := createStages(w, w.GetJobIDs()...) + if err != nil { + log.Warn(err) + lastErr = err + } else { + plan.mergeStages(stages) + } + } + + return plan, lastErr +} + +// GetEvents gets all the events in the workflows file +func (wp *workflowPlanner) GetEvents() []string { + events := make([]string, 0) + for _, w := range wp.workflows { + found := false + for _, e := range events { + for _, we := range w.On() { + if e == we { + found = true + break + } + } + if found { + break + } + } + + if !found { + events = append(events, w.On()...) + } + } + + // sort the list based on depth of dependencies + sort.Slice(events, func(i, j int) bool { + return events[i] < events[j] + }) + + return events +} + +// MaxRunNameLen determines the max name length of all jobs +func (p *Plan) MaxRunNameLen() int { + maxRunNameLen := 0 + for _, stage := range p.Stages { + for _, run := range stage.Runs { + runNameLen := len(run.String()) + if runNameLen > maxRunNameLen { + maxRunNameLen = runNameLen + } + } + } + return maxRunNameLen +} + +// GetJobIDs will get all the job names in the stage +func (s *Stage) GetJobIDs() []string { + names := make([]string, 0) + for _, r := range s.Runs { + names = append(names, r.JobID) + } + return names +} + +// Merge stages with existing stages in plan +func (p *Plan) mergeStages(stages []*Stage) { + newStages := make([]*Stage, int(math.Max(float64(len(p.Stages)), float64(len(stages))))) + for i := 0; i < len(newStages); i++ { + newStages[i] = new(Stage) + if i >= len(p.Stages) { + newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...) + } else if i >= len(stages) { + newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...) + } else { + newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...) + newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...) + } + } + p.Stages = newStages +} + +func createStages(w *Workflow, jobIDs ...string) ([]*Stage, error) { + // first, build a list of all the necessary jobs to run, and their dependencies + jobDependencies := make(map[string][]string) + for len(jobIDs) > 0 { + newJobIDs := make([]string, 0) + for _, jID := range jobIDs { + // make sure we haven't visited this job yet + if _, ok := jobDependencies[jID]; !ok { + if job := w.GetJob(jID); job != nil { + jobDependencies[jID] = job.Needs() + newJobIDs = append(newJobIDs, job.Needs()...) + } + } + } + jobIDs = newJobIDs + } + + // next, build an execution graph + stages := make([]*Stage, 0) + for len(jobDependencies) > 0 { + stage := new(Stage) + for jID, jDeps := range jobDependencies { + // make sure all deps are in the graph already + if listInStages(jDeps, stages...) { + stage.Runs = append(stage.Runs, &Run{ + Workflow: w, + JobID: jID, + }) + delete(jobDependencies, jID) + } + } + if len(stage.Runs) == 0 { + return nil, fmt.Errorf("unable to build dependency graph for %s (%s)", w.Name, w.File) + } + stages = append(stages, stage) + } + + if len(stages) == 0 { + return nil, fmt.Errorf("Could not find any stages to run. View the valid jobs with `act --list`. Use `act --help` to find how to filter by Job ID/Workflow/Event Name") + } + + return stages, nil +} + +// return true iff all strings in srcList exist in at least one of the stages +func listInStages(srcList []string, stages ...*Stage) bool { + for _, src := range srcList { + found := false + for _, stage := range stages { + for _, search := range stage.GetJobIDs() { + if src == search { + found = true + } + } + } + if !found { + return false + } + } + return true +} diff --git a/pkg/model/planner_test.go b/pkg/model/planner_test.go new file mode 100644 index 0000000..e41f669 --- /dev/null +++ b/pkg/model/planner_test.go @@ -0,0 +1,63 @@ +package model + +import ( + "path/filepath" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +type WorkflowPlanTest struct { + workflowPath string + errorMessage string + noWorkflowRecurse bool +} + +func TestPlanner(t *testing.T) { + log.SetLevel(log.DebugLevel) + + tables := []WorkflowPlanTest{ + {"invalid-job-name/invalid-1.yml", "workflow is not valid. 'invalid-job-name-1': Job name 'invalid-JOB-Name-v1.2.3-docker_hub' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", false}, + {"invalid-job-name/invalid-2.yml", "workflow is not valid. 'invalid-job-name-2': Job name '1234invalid-JOB-Name-v123-docker_hub' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", false}, + {"invalid-job-name/valid-1.yml", "", false}, + {"invalid-job-name/valid-2.yml", "", false}, + {"empty-workflow", "unable to read workflow 'push.yml': file is empty: EOF", false}, + {"nested", "unable to read workflow 'fail.yml': file is empty: EOF", false}, + {"nested", "", true}, + } + + workdir, err := filepath.Abs("testdata") + assert.NoError(t, err, workdir) + for _, table := range tables { + fullWorkflowPath := filepath.Join(workdir, table.workflowPath) + _, err = NewWorkflowPlanner(fullWorkflowPath, table.noWorkflowRecurse) + if table.errorMessage == "" { + assert.NoError(t, err, "WorkflowPlanner should exit without any error") + } else { + assert.EqualError(t, err, table.errorMessage) + } + } +} + +func TestWorkflow(t *testing.T) { + log.SetLevel(log.DebugLevel) + + workflow := Workflow{ + Jobs: map[string]*Job{ + "valid_job": { + Name: "valid_job", + }, + }, + } + + // Check that an invalid job id returns error + result, err := createStages(&workflow, "invalid_job_id") + assert.NotNil(t, err) + assert.Nil(t, result) + + // Check that an valid job id returns non-error + result, err = createStages(&workflow, "valid_job") + assert.Nil(t, err) + assert.NotNil(t, result) +} diff --git a/pkg/model/step_result.go b/pkg/model/step_result.go new file mode 100644 index 0000000..86e5ebf --- /dev/null +++ b/pkg/model/step_result.go @@ -0,0 +1,45 @@ +package model + +import "fmt" + +type stepStatus int + +const ( + StepStatusSuccess stepStatus = iota + StepStatusFailure + StepStatusSkipped +) + +var stepStatusStrings = [...]string{ + "success", + "failure", + "skipped", +} + +func (s stepStatus) MarshalText() ([]byte, error) { + return []byte(s.String()), nil +} + +func (s *stepStatus) UnmarshalText(b []byte) error { + str := string(b) + for i, name := range stepStatusStrings { + if name == str { + *s = stepStatus(i) + return nil + } + } + return fmt.Errorf("invalid step status %q", str) +} + +func (s stepStatus) String() string { + if int(s) >= len(stepStatusStrings) { + return "" + } + return stepStatusStrings[s] +} + +type StepResult struct { + Outputs map[string]string `json:"outputs"` + Conclusion stepStatus `json:"conclusion"` + Outcome stepStatus `json:"outcome"` +} diff --git a/pkg/model/testdata/container-volumes/push.yml b/pkg/model/testdata/container-volumes/push.yml new file mode 100644 index 0000000..1ed27f2 --- /dev/null +++ b/pkg/model/testdata/container-volumes/push.yml @@ -0,0 +1,19 @@ +name: Job Container +on: push + +jobs: + with-volumes: + runs-on: ubuntu-latest + container: + image: node:16-buster-slim + volumes: + - my_docker_volume:/path/to/volume + - /path/to/nonexist/directory + - /proc/sys/kernel/random/boot_id:/current/boot_id + steps: + - run: | + set -e + test -d /path/to/volume + test "$(cat /proc/sys/kernel/random/boot_id)" = "$(cat /current/boot_id)" + test -d /path/to/nonexist/directory +
\ No newline at end of file diff --git a/pkg/model/testdata/empty-workflow/push.yml b/pkg/model/testdata/empty-workflow/push.yml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/pkg/model/testdata/empty-workflow/push.yml diff --git a/pkg/model/testdata/invalid-job-name/invalid-1.yml b/pkg/model/testdata/invalid-job-name/invalid-1.yml new file mode 100644 index 0000000..b7129d4 --- /dev/null +++ b/pkg/model/testdata/invalid-job-name/invalid-1.yml @@ -0,0 +1,12 @@ +name: invalid-job-name-1 +on: push + +jobs: + invalid-JOB-Name-v1.2.3-docker_hub: + runs-on: ubuntu-latest + steps: + - run: echo hi + valid-JOB-Name-v123-docker_hub: + runs-on: ubuntu-latest + steps: + - run: echo hi diff --git a/pkg/model/testdata/invalid-job-name/invalid-2.yml b/pkg/model/testdata/invalid-job-name/invalid-2.yml new file mode 100644 index 0000000..33dd31d --- /dev/null +++ b/pkg/model/testdata/invalid-job-name/invalid-2.yml @@ -0,0 +1,8 @@ +name: invalid-job-name-2 +on: push + +jobs: + 1234invalid-JOB-Name-v123-docker_hub: + runs-on: ubuntu-latest + steps: + - run: echo hi diff --git a/pkg/model/testdata/invalid-job-name/valid-1.yml b/pkg/model/testdata/invalid-job-name/valid-1.yml new file mode 100644 index 0000000..42e8293 --- /dev/null +++ b/pkg/model/testdata/invalid-job-name/valid-1.yml @@ -0,0 +1,8 @@ +name: valid-job-name-1 +on: push + +jobs: + valid-JOB-Name-v123-docker_hub: + runs-on: ubuntu-latest + steps: + - run: echo hi diff --git a/pkg/model/testdata/invalid-job-name/valid-2.yml b/pkg/model/testdata/invalid-job-name/valid-2.yml new file mode 100644 index 0000000..3e5c7bc --- /dev/null +++ b/pkg/model/testdata/invalid-job-name/valid-2.yml @@ -0,0 +1,8 @@ +name: valid-job-name-2 +on: push + +jobs: + ___valid-JOB-Name-v123-docker_hub: + runs-on: ubuntu-latest + steps: + - run: echo hi diff --git a/pkg/model/testdata/nested/success.yml b/pkg/model/testdata/nested/success.yml new file mode 100644 index 0000000..f65ee91 --- /dev/null +++ b/pkg/model/testdata/nested/success.yml @@ -0,0 +1,9 @@ +name: Hello World Workflow +on: push + +jobs: + hello-world: + name: Hello World Job + runs-on: ubuntu-latest + steps: + - run: echo "Hello World!" diff --git a/pkg/model/testdata/nested/workflows/fail.yml b/pkg/model/testdata/nested/workflows/fail.yml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/pkg/model/testdata/nested/workflows/fail.yml diff --git a/pkg/model/testdata/strategy/push.yml b/pkg/model/testdata/strategy/push.yml new file mode 100644 index 0000000..89bea6e --- /dev/null +++ b/pkg/model/testdata/strategy/push.yml @@ -0,0 +1,50 @@ +--- +jobs: + strategy-all: + name: ${{ matrix.node-version }} | ${{ matrix.site }} | ${{ matrix.datacenter }} + runs-on: ubuntu-latest + steps: + - run: echo 'Hello!' + strategy: + fail-fast: false + matrix: + datacenter: + - site-c + - site-d + exclude: + - datacenter: site-d + node-version: 14.x + site: staging + include: + - php-version: 5.4 + - datacenter: site-a + node-version: 10.x + site: prod + - datacenter: site-b + node-version: 12.x + site: dev + node-version: [14.x, 16.x] + site: + - staging + max-parallel: 2 + strategy-no-matrix: + runs-on: ubuntu-latest + steps: + - run: echo 'Hello!' + strategy: + fail-fast: false + max-parallel: 2 + strategy-only-fail-fast: + runs-on: ubuntu-latest + steps: + - run: echo 'Hello!' + strategy: + fail-fast: false + strategy-only-max-parallel: + runs-on: ubuntu-latest + steps: + - run: echo 'Hello!' + strategy: + max-parallel: 2 +'on': + push: null diff --git a/pkg/model/workflow.go b/pkg/model/workflow.go new file mode 100644 index 0000000..7bec766 --- /dev/null +++ b/pkg/model/workflow.go @@ -0,0 +1,763 @@ +package model + +import ( + "fmt" + "io" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/nektos/act/pkg/common" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" +) + +// Workflow is the structure of the files in .github/workflows +type Workflow struct { + File string + Name string `yaml:"name"` + RawOn yaml.Node `yaml:"on"` + Env map[string]string `yaml:"env"` + Jobs map[string]*Job `yaml:"jobs"` + Defaults Defaults `yaml:"defaults"` +} + +// On events for the workflow +func (w *Workflow) On() []string { + switch w.RawOn.Kind { + case yaml.ScalarNode: + var val string + err := w.RawOn.Decode(&val) + if err != nil { + log.Fatal(err) + } + return []string{val} + case yaml.SequenceNode: + var val []string + err := w.RawOn.Decode(&val) + if err != nil { + log.Fatal(err) + } + return val + case yaml.MappingNode: + var val map[string]interface{} + err := w.RawOn.Decode(&val) + if err != nil { + log.Fatal(err) + } + var keys []string + for k := range val { + keys = append(keys, k) + } + return keys + } + return nil +} + +func (w *Workflow) OnEvent(event string) interface{} { + if w.RawOn.Kind == yaml.MappingNode { + var val map[string]interface{} + if !decodeNode(w.RawOn, &val) { + return nil + } + return val[event] + } + return nil +} + +func (w *Workflow) OnSchedule() []string { + schedules := w.OnEvent("schedule") + if schedules == nil { + return []string{} + } + + switch val := schedules.(type) { + case []interface{}: + allSchedules := []string{} + for _, v := range val { + for k, cron := range v.(map[string]interface{}) { + if k != "cron" { + continue + } + allSchedules = append(allSchedules, cron.(string)) + } + } + return allSchedules + default: + } + + return []string{} +} + +type WorkflowDispatchInput struct { + Description string `yaml:"description"` + Required bool `yaml:"required"` + Default string `yaml:"default"` + Type string `yaml:"type"` + Options []string `yaml:"options"` +} + +type WorkflowDispatch struct { + Inputs map[string]WorkflowDispatchInput `yaml:"inputs"` +} + +func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch { + switch w.RawOn.Kind { + case yaml.ScalarNode: + var val string + if !decodeNode(w.RawOn, &val) { + return nil + } + if val == "workflow_dispatch" { + return &WorkflowDispatch{} + } + case yaml.SequenceNode: + var val []string + if !decodeNode(w.RawOn, &val) { + return nil + } + for _, v := range val { + if v == "workflow_dispatch" { + return &WorkflowDispatch{} + } + } + case yaml.MappingNode: + var val map[string]yaml.Node + if !decodeNode(w.RawOn, &val) { + return nil + } + + n, found := val["workflow_dispatch"] + var workflowDispatch WorkflowDispatch + if found && decodeNode(n, &workflowDispatch) { + return &workflowDispatch + } + default: + return nil + } + return nil +} + +type WorkflowCallInput struct { + Description string `yaml:"description"` + Required bool `yaml:"required"` + Default string `yaml:"default"` + Type string `yaml:"type"` +} + +type WorkflowCallOutput struct { + Description string `yaml:"description"` + Value string `yaml:"value"` +} + +type WorkflowCall struct { + Inputs map[string]WorkflowCallInput `yaml:"inputs"` + Outputs map[string]WorkflowCallOutput `yaml:"outputs"` +} + +type WorkflowCallResult struct { + Outputs map[string]string +} + +func (w *Workflow) WorkflowCallConfig() *WorkflowCall { + if w.RawOn.Kind != yaml.MappingNode { + // The callers expect for "on: workflow_call" and "on: [ workflow_call ]" a non nil return value + return &WorkflowCall{} + } + + var val map[string]yaml.Node + if !decodeNode(w.RawOn, &val) { + return &WorkflowCall{} + } + + var config WorkflowCall + node := val["workflow_call"] + if !decodeNode(node, &config) { + return &WorkflowCall{} + } + + return &config +} + +// Job is the structure of one job in a workflow +type Job struct { + Name string `yaml:"name"` + RawNeeds yaml.Node `yaml:"needs"` + RawRunsOn yaml.Node `yaml:"runs-on"` + Env yaml.Node `yaml:"env"` + If yaml.Node `yaml:"if"` + Steps []*Step `yaml:"steps"` + TimeoutMinutes string `yaml:"timeout-minutes"` + Services map[string]*ContainerSpec `yaml:"services"` + Strategy *Strategy `yaml:"strategy"` + RawContainer yaml.Node `yaml:"container"` + Defaults Defaults `yaml:"defaults"` + Outputs map[string]string `yaml:"outputs"` + Uses string `yaml:"uses"` + With map[string]interface{} `yaml:"with"` + RawSecrets yaml.Node `yaml:"secrets"` + Result string +} + +// Strategy for the job +type Strategy struct { + FailFast bool + MaxParallel int + FailFastString string `yaml:"fail-fast"` + MaxParallelString string `yaml:"max-parallel"` + RawMatrix yaml.Node `yaml:"matrix"` +} + +// Default settings that will apply to all steps in the job or workflow +type Defaults struct { + Run RunDefaults `yaml:"run"` +} + +// Defaults for all run steps in the job or workflow +type RunDefaults struct { + Shell string `yaml:"shell"` + WorkingDirectory string `yaml:"working-directory"` +} + +// GetMaxParallel sets default and returns value for `max-parallel` +func (s Strategy) GetMaxParallel() int { + // MaxParallel default value is `GitHub will maximize the number of jobs run in parallel depending on the available runners on GitHub-hosted virtual machines` + // So I take the liberty to hardcode default limit to 4 and this is because: + // 1: tl;dr: self-hosted does only 1 parallel job - https://github.com/actions/runner/issues/639#issuecomment-825212735 + // 2: GH has 20 parallel job limit (for free tier) - https://github.com/github/docs/blob/3ae84420bd10997bb5f35f629ebb7160fe776eae/content/actions/reference/usage-limits-billing-and-administration.md?plain=1#L45 + // 3: I want to add support for MaxParallel to act and 20! parallel jobs is a bit overkill IMHO + maxParallel := 4 + if s.MaxParallelString != "" { + var err error + if maxParallel, err = strconv.Atoi(s.MaxParallelString); err != nil { + log.Errorf("Failed to parse 'max-parallel' option: %v", err) + } + } + return maxParallel +} + +// GetFailFast sets default and returns value for `fail-fast` +func (s Strategy) GetFailFast() bool { + // FailFast option is true by default: https://github.com/github/docs/blob/3ae84420bd10997bb5f35f629ebb7160fe776eae/content/actions/reference/workflow-syntax-for-github-actions.md?plain=1#L1107 + failFast := true + log.Debug(s.FailFastString) + if s.FailFastString != "" { + var err error + if failFast, err = strconv.ParseBool(s.FailFastString); err != nil { + log.Errorf("Failed to parse 'fail-fast' option: %v", err) + } + } + return failFast +} + +func (j *Job) InheritSecrets() bool { + if j.RawSecrets.Kind != yaml.ScalarNode { + return false + } + + var val string + if !decodeNode(j.RawSecrets, &val) { + return false + } + + return val == "inherit" +} + +func (j *Job) Secrets() map[string]string { + if j.RawSecrets.Kind != yaml.MappingNode { + return nil + } + + var val map[string]string + if !decodeNode(j.RawSecrets, &val) { + return nil + } + + return val +} + +// Container details for the job +func (j *Job) Container() *ContainerSpec { + var val *ContainerSpec + switch j.RawContainer.Kind { + case yaml.ScalarNode: + val = new(ContainerSpec) + if !decodeNode(j.RawContainer, &val.Image) { + return nil + } + case yaml.MappingNode: + val = new(ContainerSpec) + if !decodeNode(j.RawContainer, val) { + return nil + } + } + return val +} + +// Needs list for Job +func (j *Job) Needs() []string { + switch j.RawNeeds.Kind { + case yaml.ScalarNode: + var val string + if !decodeNode(j.RawNeeds, &val) { + return nil + } + return []string{val} + case yaml.SequenceNode: + var val []string + if !decodeNode(j.RawNeeds, &val) { + return nil + } + return val + } + return nil +} + +// RunsOn list for Job +func (j *Job) RunsOn() []string { + switch j.RawRunsOn.Kind { + case yaml.MappingNode: + var val struct { + Group string + Labels yaml.Node + } + + if !decodeNode(j.RawRunsOn, &val) { + return nil + } + + labels := nodeAsStringSlice(val.Labels) + + if val.Group != "" { + labels = append(labels, val.Group) + } + + return labels + default: + return nodeAsStringSlice(j.RawRunsOn) + } +} + +func nodeAsStringSlice(node yaml.Node) []string { + switch node.Kind { + case yaml.ScalarNode: + var val string + if !decodeNode(node, &val) { + return nil + } + return []string{val} + case yaml.SequenceNode: + var val []string + if !decodeNode(node, &val) { + return nil + } + return val + } + return nil +} + +func environment(yml yaml.Node) map[string]string { + env := make(map[string]string) + if yml.Kind == yaml.MappingNode { + if !decodeNode(yml, &env) { + return nil + } + } + return env +} + +// Environments returns string-based key=value map for a job +func (j *Job) Environment() map[string]string { + return environment(j.Env) +} + +// Matrix decodes RawMatrix YAML node +func (j *Job) Matrix() map[string][]interface{} { + if j.Strategy.RawMatrix.Kind == yaml.MappingNode { + var val map[string][]interface{} + if !decodeNode(j.Strategy.RawMatrix, &val) { + return nil + } + return val + } + return nil +} + +// GetMatrixes returns the matrix cross product +// It skips includes and hard fails excludes for non-existing keys +// +//nolint:gocyclo +func (j *Job) GetMatrixes() ([]map[string]interface{}, error) { + matrixes := make([]map[string]interface{}, 0) + if j.Strategy != nil { + j.Strategy.FailFast = j.Strategy.GetFailFast() + j.Strategy.MaxParallel = j.Strategy.GetMaxParallel() + + if m := j.Matrix(); m != nil { + includes := make([]map[string]interface{}, 0) + extraIncludes := make([]map[string]interface{}, 0) + for _, v := range m["include"] { + switch t := v.(type) { + case []interface{}: + for _, i := range t { + i := i.(map[string]interface{}) + extraInclude := true + for k := range i { + if _, ok := m[k]; ok { + includes = append(includes, i) + extraInclude = false + break + } + } + if extraInclude { + extraIncludes = append(extraIncludes, i) + } + } + case interface{}: + v := v.(map[string]interface{}) + extraInclude := true + for k := range v { + if _, ok := m[k]; ok { + includes = append(includes, v) + extraInclude = false + break + } + } + if extraInclude { + extraIncludes = append(extraIncludes, v) + } + } + } + delete(m, "include") + + excludes := make([]map[string]interface{}, 0) + for _, e := range m["exclude"] { + e := e.(map[string]interface{}) + for k := range e { + if _, ok := m[k]; ok { + excludes = append(excludes, e) + } else { + // We fail completely here because that's what GitHub does for non-existing matrix keys, fail on exclude, silent skip on include + return nil, fmt.Errorf("the workflow is not valid. Matrix exclude key %q does not match any key within the matrix", k) + } + } + } + delete(m, "exclude") + + matrixProduct := common.CartesianProduct(m) + MATRIX: + for _, matrix := range matrixProduct { + for _, exclude := range excludes { + if commonKeysMatch(matrix, exclude) { + log.Debugf("Skipping matrix '%v' due to exclude '%v'", matrix, exclude) + continue MATRIX + } + } + matrixes = append(matrixes, matrix) + } + for _, include := range includes { + matched := false + for _, matrix := range matrixes { + if commonKeysMatch2(matrix, include, m) { + matched = true + log.Debugf("Adding include values '%v' to existing entry", include) + for k, v := range include { + matrix[k] = v + } + } + } + if !matched { + extraIncludes = append(extraIncludes, include) + } + } + for _, include := range extraIncludes { + log.Debugf("Adding include '%v'", include) + matrixes = append(matrixes, include) + } + if len(matrixes) == 0 { + matrixes = append(matrixes, make(map[string]interface{})) + } + } else { + matrixes = append(matrixes, make(map[string]interface{})) + } + } else { + matrixes = append(matrixes, make(map[string]interface{})) + log.Debugf("Empty Strategy, matrixes=%v", matrixes) + } + return matrixes, nil +} + +func commonKeysMatch(a map[string]interface{}, b map[string]interface{}) bool { + for aKey, aVal := range a { + if bVal, ok := b[aKey]; ok && !reflect.DeepEqual(aVal, bVal) { + return false + } + } + return true +} + +func commonKeysMatch2(a map[string]interface{}, b map[string]interface{}, m map[string][]interface{}) bool { + for aKey, aVal := range a { + _, useKey := m[aKey] + if bVal, ok := b[aKey]; useKey && ok && !reflect.DeepEqual(aVal, bVal) { + return false + } + } + return true +} + +// JobType describes what type of job we are about to run +type JobType int + +const ( + // JobTypeDefault is all jobs that have a `run` attribute + JobTypeDefault JobType = iota + + // JobTypeReusableWorkflowLocal is all jobs that have a `uses` that is a local workflow in the .github/workflows directory + JobTypeReusableWorkflowLocal + + // JobTypeReusableWorkflowRemote is all jobs that have a `uses` that references a workflow file in a github repo + JobTypeReusableWorkflowRemote + + // JobTypeInvalid represents a job which is not configured correctly + JobTypeInvalid +) + +func (j JobType) String() string { + switch j { + case JobTypeDefault: + return "default" + case JobTypeReusableWorkflowLocal: + return "local-reusable-workflow" + case JobTypeReusableWorkflowRemote: + return "remote-reusable-workflow" + } + return "unknown" +} + +// Type returns the type of the job +func (j *Job) Type() (JobType, error) { + isReusable := j.Uses != "" + + if isReusable { + isYaml, _ := regexp.MatchString(`\.(ya?ml)(?:$|@)`, j.Uses) + + if isYaml { + isLocalPath := strings.HasPrefix(j.Uses, "./") + isRemotePath, _ := regexp.MatchString(`^[^.](.+?/){2,}.+\.ya?ml@`, j.Uses) + hasVersion, _ := regexp.MatchString(`\.ya?ml@`, j.Uses) + + if isLocalPath { + return JobTypeReusableWorkflowLocal, nil + } else if isRemotePath && hasVersion { + return JobTypeReusableWorkflowRemote, nil + } + } + + return JobTypeInvalid, fmt.Errorf("`uses` key references invalid workflow path '%s'. Must start with './' if it's a local workflow, or must start with '<org>/<repo>/' and include an '@' if it's a remote workflow", j.Uses) + } + + return JobTypeDefault, nil +} + +// ContainerSpec is the specification of the container to use for the job +type ContainerSpec struct { + Image string `yaml:"image"` + Env map[string]string `yaml:"env"` + Ports []string `yaml:"ports"` + Volumes []string `yaml:"volumes"` + Options string `yaml:"options"` + Credentials map[string]string `yaml:"credentials"` + Entrypoint string + Args string + Name string + Reuse bool + + // Gitea specific + Cmd []string `yaml:"cmd"` +} + +// Step is the structure of one step in a job +type Step struct { + Number int `yaml:"-"` + ID string `yaml:"id"` + If yaml.Node `yaml:"if"` + Name string `yaml:"name"` + Uses string `yaml:"uses"` + Run string `yaml:"run"` + WorkingDirectory string `yaml:"working-directory"` + Shell string `yaml:"shell"` + Env yaml.Node `yaml:"env"` + With map[string]string `yaml:"with"` + RawContinueOnError string `yaml:"continue-on-error"` + TimeoutMinutes string `yaml:"timeout-minutes"` +} + +// String gets the name of step +func (s *Step) String() string { + if s.Name != "" { + return s.Name + } else if s.Uses != "" { + return s.Uses + } else if s.Run != "" { + return s.Run + } + return s.ID +} + +// Environments returns string-based key=value map for a step +func (s *Step) Environment() map[string]string { + return environment(s.Env) +} + +// GetEnv gets the env for a step +func (s *Step) GetEnv() map[string]string { + env := s.Environment() + + for k, v := range s.With { + envKey := regexp.MustCompile("[^A-Z0-9-]").ReplaceAllString(strings.ToUpper(k), "_") + envKey = fmt.Sprintf("INPUT_%s", strings.ToUpper(envKey)) + env[envKey] = v + } + return env +} + +// ShellCommand returns the command for the shell +func (s *Step) ShellCommand() string { + shellCommand := "" + + // Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17 + switch s.Shell { + case "", "bash": + shellCommand = "bash --noprofile --norc -e -o pipefail {0}" + case "pwsh": + shellCommand = "pwsh -command . '{0}'" + case "python": + shellCommand = "python {0}" + case "sh": + shellCommand = "sh -e {0}" + case "cmd": + shellCommand = "cmd /D /E:ON /V:OFF /S /C \"CALL \"{0}\"\"" + case "powershell": + shellCommand = "powershell -command . '{0}'" + default: + shellCommand = s.Shell + } + return shellCommand +} + +// StepType describes what type of step we are about to run +type StepType int + +const ( + // StepTypeRun is all steps that have a `run` attribute + StepTypeRun StepType = iota + + // StepTypeUsesDockerURL is all steps that have a `uses` that is of the form `docker://...` + StepTypeUsesDockerURL + + // StepTypeUsesActionLocal is all steps that have a `uses` that is a local action in a subdirectory + StepTypeUsesActionLocal + + // StepTypeUsesActionRemote is all steps that have a `uses` that is a reference to a github repo + StepTypeUsesActionRemote + + // StepTypeReusableWorkflowLocal is all steps that have a `uses` that is a local workflow in the .github/workflows directory + StepTypeReusableWorkflowLocal + + // StepTypeReusableWorkflowRemote is all steps that have a `uses` that references a workflow file in a github repo + StepTypeReusableWorkflowRemote + + // StepTypeInvalid is for steps that have invalid step action + StepTypeInvalid +) + +func (s StepType) String() string { + switch s { + case StepTypeInvalid: + return "invalid" + case StepTypeRun: + return "run" + case StepTypeUsesActionLocal: + return "local-action" + case StepTypeUsesActionRemote: + return "remote-action" + case StepTypeUsesDockerURL: + return "docker" + case StepTypeReusableWorkflowLocal: + return "local-reusable-workflow" + case StepTypeReusableWorkflowRemote: + return "remote-reusable-workflow" + } + return "unknown" +} + +// Type returns the type of the step +func (s *Step) Type() StepType { + if s.Run == "" && s.Uses == "" { + return StepTypeInvalid + } + + if s.Run != "" { + if s.Uses != "" { + return StepTypeInvalid + } + return StepTypeRun + } else if strings.HasPrefix(s.Uses, "docker://") { + return StepTypeUsesDockerURL + } else if strings.HasPrefix(s.Uses, "./.github/workflows") && (strings.HasSuffix(s.Uses, ".yml") || strings.HasSuffix(s.Uses, ".yaml")) { + return StepTypeReusableWorkflowLocal + } else if !strings.HasPrefix(s.Uses, "./") && strings.Contains(s.Uses, ".github/workflows") && (strings.Contains(s.Uses, ".yml@") || strings.Contains(s.Uses, ".yaml@")) { + return StepTypeReusableWorkflowRemote + } else if strings.HasPrefix(s.Uses, "./") { + return StepTypeUsesActionLocal + } + return StepTypeUsesActionRemote +} + +// ReadWorkflow returns a list of jobs for a given workflow file reader +func ReadWorkflow(in io.Reader) (*Workflow, error) { + w := new(Workflow) + err := yaml.NewDecoder(in).Decode(w) + return w, err +} + +// GetJob will get a job by name in the workflow +func (w *Workflow) GetJob(jobID string) *Job { + for id, j := range w.Jobs { + if jobID == id { + if j.Name == "" { + j.Name = id + } + if j.If.Value == "" { + j.If.Value = "success()" + } + return j + } + } + return nil +} + +// GetJobIDs will get all the job names in the workflow +func (w *Workflow) GetJobIDs() []string { + ids := make([]string, 0) + for id := range w.Jobs { + ids = append(ids, id) + } + return ids +} + +var OnDecodeNodeError = func(node yaml.Node, out interface{}, err error) { + log.Errorf("Failed to decode node %v into %T: %v", node, out, err) +} + +func decodeNode(node yaml.Node, out interface{}) bool { + if err := node.Decode(out); err != nil { + if OnDecodeNodeError != nil { + OnDecodeNodeError(node, out, err) + } + return false + } + return true +} diff --git a/pkg/model/workflow_test.go b/pkg/model/workflow_test.go new file mode 100644 index 0000000..d253906 --- /dev/null +++ b/pkg/model/workflow_test.go @@ -0,0 +1,624 @@ +package model + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestReadWorkflow_ScheduleEvent(t *testing.T) { + yaml := ` +name: local-action-docker-url +on: + schedule: + - cron: '30 5 * * 1,3' + - cron: '30 5 * * 2,4' + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + + schedules := workflow.OnEvent("schedule") + assert.Len(t, schedules, 2) + + newSchedules := workflow.OnSchedule() + assert.Len(t, newSchedules, 2) + + assert.Equal(t, "30 5 * * 1,3", newSchedules[0]) + assert.Equal(t, "30 5 * * 2,4", newSchedules[1]) + + yaml = ` +name: local-action-docker-url +on: + schedule: + test: '30 5 * * 1,3' + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + + newSchedules = workflow.OnSchedule() + assert.Len(t, newSchedules, 0) + + yaml = ` +name: local-action-docker-url +on: + schedule: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + + newSchedules = workflow.OnSchedule() + assert.Len(t, newSchedules, 0) + + yaml = ` +name: local-action-docker-url +on: [push, tag] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + + newSchedules = workflow.OnSchedule() + assert.Len(t, newSchedules, 0) +} + +func TestReadWorkflow_StringEvent(t *testing.T) { + yaml := ` +name: local-action-docker-url +on: push + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + + assert.Len(t, workflow.On(), 1) + assert.Contains(t, workflow.On(), "push") +} + +func TestReadWorkflow_ListEvent(t *testing.T) { + yaml := ` +name: local-action-docker-url +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + + assert.Len(t, workflow.On(), 2) + assert.Contains(t, workflow.On(), "push") + assert.Contains(t, workflow.On(), "pull_request") +} + +func TestReadWorkflow_MapEvent(t *testing.T) { + yaml := ` +name: local-action-docker-url +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.On(), 2) + assert.Contains(t, workflow.On(), "push") + assert.Contains(t, workflow.On(), "pull_request") +} + +func TestReadWorkflow_DecodeNodeError(t *testing.T) { + yaml := ` +on: + push: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - run: echo + env: + foo: {{ a }} +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Nil(t, workflow.GetJob("test").Steps[0].GetEnv()) +} + +func TestReadWorkflow_RunsOnLabels(t *testing.T) { + yaml := ` +name: local-action-docker-url + +jobs: + test: + container: nginx:latest + runs-on: + labels: ubuntu-latest + steps: + - uses: ./actions/docker-url` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest"}) +} + +func TestReadWorkflow_RunsOnLabelsWithGroup(t *testing.T) { + yaml := ` +name: local-action-docker-url + +jobs: + test: + container: nginx:latest + runs-on: + labels: [ubuntu-latest] + group: linux + steps: + - uses: ./actions/docker-url` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest", "linux"}) +} + +func TestReadWorkflow_StringContainer(t *testing.T) { + yaml := ` +name: local-action-docker-url + +jobs: + test: + container: nginx:latest + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url + test2: + container: + image: nginx:latest + env: + foo: bar + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.Jobs, 2) + assert.Contains(t, workflow.Jobs["test"].Container().Image, "nginx:latest") + assert.Contains(t, workflow.Jobs["test2"].Container().Image, "nginx:latest") + assert.Contains(t, workflow.Jobs["test2"].Container().Env["foo"], "bar") +} + +func TestReadWorkflow_ObjectContainer(t *testing.T) { + yaml := ` +name: local-action-docker-url + +jobs: + test: + container: + image: r.example.org/something:latest + credentials: + username: registry-username + password: registry-password + env: + HOME: /home/user + volumes: + - my_docker_volume:/volume_mount + - /data/my_data + - /source/directory:/destination/directory + runs-on: ubuntu-latest + steps: + - uses: ./actions/docker-url +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.Jobs, 1) + + container := workflow.GetJob("test").Container() + + assert.Contains(t, container.Image, "r.example.org/something:latest") + assert.Contains(t, container.Env["HOME"], "/home/user") + assert.Contains(t, container.Credentials["username"], "registry-username") + assert.Contains(t, container.Credentials["password"], "registry-password") + assert.ElementsMatch(t, container.Volumes, []string{ + "my_docker_volume:/volume_mount", + "/data/my_data", + "/source/directory:/destination/directory", + }) +} + +func TestReadWorkflow_JobTypes(t *testing.T) { + yaml := ` +name: invalid job definition + +jobs: + default-job: + runs-on: ubuntu-latest + steps: + - run: echo + remote-reusable-workflow-yml: + uses: remote/repo/some/path/to/workflow.yml@main + remote-reusable-workflow-yaml: + uses: remote/repo/some/path/to/workflow.yaml@main + remote-reusable-workflow-custom-path: + uses: remote/repo/path/to/workflow.yml@main + local-reusable-workflow-yml: + uses: ./some/path/to/workflow.yml + local-reusable-workflow-yaml: + uses: ./some/path/to/workflow.yaml +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.Jobs, 6) + + jobType, err := workflow.Jobs["default-job"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeDefault, jobType) + + jobType, err = workflow.Jobs["remote-reusable-workflow-yml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) + + jobType, err = workflow.Jobs["remote-reusable-workflow-yaml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) + + jobType, err = workflow.Jobs["remote-reusable-workflow-custom-path"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) + + jobType, err = workflow.Jobs["local-reusable-workflow-yml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowLocal, jobType) + + jobType, err = workflow.Jobs["local-reusable-workflow-yaml"].Type() + assert.Equal(t, nil, err) + assert.Equal(t, JobTypeReusableWorkflowLocal, jobType) +} + +func TestReadWorkflow_JobTypes_InvalidPath(t *testing.T) { + yaml := ` +name: invalid job definition + +jobs: + remote-reusable-workflow-missing-version: + uses: remote/repo/some/path/to/workflow.yml + remote-reusable-workflow-bad-extension: + uses: remote/repo/some/path/to/workflow.json + local-reusable-workflow-bad-extension: + uses: ./some/path/to/workflow.json + local-reusable-workflow-bad-path: + uses: some/path/to/workflow.yaml +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.Jobs, 4) + + jobType, err := workflow.Jobs["remote-reusable-workflow-missing-version"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) + + jobType, err = workflow.Jobs["remote-reusable-workflow-bad-extension"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) + + jobType, err = workflow.Jobs["local-reusable-workflow-bad-extension"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) + + jobType, err = workflow.Jobs["local-reusable-workflow-bad-path"].Type() + assert.Equal(t, JobTypeInvalid, jobType) + assert.NotEqual(t, nil, err) +} + +func TestReadWorkflow_StepsTypes(t *testing.T) { + yaml := ` +name: invalid step definition + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: test1 + uses: actions/checkout@v2 + run: echo + - name: test2 + run: echo + - name: test3 + uses: actions/checkout@v2 + - name: test4 + uses: docker://nginx:latest + - name: test5 + uses: ./local-action +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.Jobs, 1) + assert.Len(t, workflow.Jobs["test"].Steps, 5) + assert.Equal(t, workflow.Jobs["test"].Steps[0].Type(), StepTypeInvalid) + assert.Equal(t, workflow.Jobs["test"].Steps[1].Type(), StepTypeRun) + assert.Equal(t, workflow.Jobs["test"].Steps[2].Type(), StepTypeUsesActionRemote) + assert.Equal(t, workflow.Jobs["test"].Steps[3].Type(), StepTypeUsesDockerURL) + assert.Equal(t, workflow.Jobs["test"].Steps[4].Type(), StepTypeUsesActionLocal) +} + +// See: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs +func TestReadWorkflow_JobOutputs(t *testing.T) { + yaml := ` +name: job outputs definition + +jobs: + test1: + runs-on: ubuntu-latest + steps: + - id: test1_1 + run: | + echo "::set-output name=a_key::some-a_value" + echo "::set-output name=b-key::some-b-value" + outputs: + some_a_key: ${{ steps.test1_1.outputs.a_key }} + some-b-key: ${{ steps.test1_1.outputs.b-key }} + + test2: + runs-on: ubuntu-latest + needs: + - test1 + steps: + - name: test2_1 + run: | + echo "${{ needs.test1.outputs.some_a_key }}" + echo "${{ needs.test1.outputs.some-b-key }}" +` + + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + assert.Len(t, workflow.Jobs, 2) + + assert.Len(t, workflow.Jobs["test1"].Steps, 1) + assert.Equal(t, StepTypeRun, workflow.Jobs["test1"].Steps[0].Type()) + assert.Equal(t, "test1_1", workflow.Jobs["test1"].Steps[0].ID) + assert.Len(t, workflow.Jobs["test1"].Outputs, 2) + assert.Contains(t, workflow.Jobs["test1"].Outputs, "some_a_key") + assert.Contains(t, workflow.Jobs["test1"].Outputs, "some-b-key") + assert.Equal(t, "${{ steps.test1_1.outputs.a_key }}", workflow.Jobs["test1"].Outputs["some_a_key"]) + assert.Equal(t, "${{ steps.test1_1.outputs.b-key }}", workflow.Jobs["test1"].Outputs["some-b-key"]) +} + +func TestReadWorkflow_Strategy(t *testing.T) { + w, err := NewWorkflowPlanner("testdata/strategy/push.yml", true) + assert.NoError(t, err) + + p, err := w.PlanJob("strategy-only-max-parallel") + assert.NoError(t, err) + + assert.Equal(t, len(p.Stages), 1) + assert.Equal(t, len(p.Stages[0].Runs), 1) + + wf := p.Stages[0].Runs[0].Workflow + + job := wf.Jobs["strategy-only-max-parallel"] + matrixes, err := job.GetMatrixes() + assert.NoError(t, err) + assert.Equal(t, matrixes, []map[string]interface{}{{}}) + assert.Equal(t, job.Matrix(), map[string][]interface{}(nil)) + assert.Equal(t, job.Strategy.MaxParallel, 2) + assert.Equal(t, job.Strategy.FailFast, true) + + job = wf.Jobs["strategy-only-fail-fast"] + matrixes, err = job.GetMatrixes() + assert.NoError(t, err) + assert.Equal(t, matrixes, []map[string]interface{}{{}}) + assert.Equal(t, job.Matrix(), map[string][]interface{}(nil)) + assert.Equal(t, job.Strategy.MaxParallel, 4) + assert.Equal(t, job.Strategy.FailFast, false) + + job = wf.Jobs["strategy-no-matrix"] + matrixes, err = job.GetMatrixes() + assert.NoError(t, err) + assert.Equal(t, matrixes, []map[string]interface{}{{}}) + assert.Equal(t, job.Matrix(), map[string][]interface{}(nil)) + assert.Equal(t, job.Strategy.MaxParallel, 2) + assert.Equal(t, job.Strategy.FailFast, false) + + job = wf.Jobs["strategy-all"] + matrixes, err = job.GetMatrixes() + assert.NoError(t, err) + assert.Equal(t, matrixes, + []map[string]interface{}{ + {"datacenter": "site-c", "node-version": "14.x", "site": "staging"}, + {"datacenter": "site-c", "node-version": "16.x", "site": "staging"}, + {"datacenter": "site-d", "node-version": "16.x", "site": "staging"}, + {"php-version": 5.4}, + {"datacenter": "site-a", "node-version": "10.x", "site": "prod"}, + {"datacenter": "site-b", "node-version": "12.x", "site": "dev"}, + }, + ) + assert.Equal(t, job.Matrix(), + map[string][]interface{}{ + "datacenter": {"site-c", "site-d"}, + "exclude": { + map[string]interface{}{"datacenter": "site-d", "node-version": "14.x", "site": "staging"}, + }, + "include": { + map[string]interface{}{"php-version": 5.4}, + map[string]interface{}{"datacenter": "site-a", "node-version": "10.x", "site": "prod"}, + map[string]interface{}{"datacenter": "site-b", "node-version": "12.x", "site": "dev"}, + }, + "node-version": {"14.x", "16.x"}, + "site": {"staging"}, + }, + ) + assert.Equal(t, job.Strategy.MaxParallel, 2) + assert.Equal(t, job.Strategy.FailFast, false) +} + +func TestStep_ShellCommand(t *testing.T) { + tests := []struct { + shell string + want string + }{ + {"pwsh -v '. {0}'", "pwsh -v '. {0}'"}, + {"pwsh", "pwsh -command . '{0}'"}, + {"powershell", "powershell -command . '{0}'"}, + } + for _, tt := range tests { + t.Run(tt.shell, func(t *testing.T) { + got := (&Step{Shell: tt.shell}).ShellCommand() + assert.Equal(t, got, tt.want) + }) + } +} + +func TestReadWorkflow_WorkflowDispatchConfig(t *testing.T) { + yaml := ` + name: local-action-docker-url + ` + workflow, err := ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch := workflow.WorkflowDispatchConfig() + assert.Nil(t, workflowDispatch) + + yaml = ` + name: local-action-docker-url + on: push + ` + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch = workflow.WorkflowDispatchConfig() + assert.Nil(t, workflowDispatch) + + yaml = ` + name: local-action-docker-url + on: workflow_dispatch + ` + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch = workflow.WorkflowDispatchConfig() + assert.NotNil(t, workflowDispatch) + assert.Nil(t, workflowDispatch.Inputs) + + yaml = ` + name: local-action-docker-url + on: [push, pull_request] + ` + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch = workflow.WorkflowDispatchConfig() + assert.Nil(t, workflowDispatch) + + yaml = ` + name: local-action-docker-url + on: [push, workflow_dispatch] + ` + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch = workflow.WorkflowDispatchConfig() + assert.NotNil(t, workflowDispatch) + assert.Nil(t, workflowDispatch.Inputs) + + yaml = ` + name: local-action-docker-url + on: + - push + - workflow_dispatch + ` + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch = workflow.WorkflowDispatchConfig() + assert.NotNil(t, workflowDispatch) + assert.Nil(t, workflowDispatch.Inputs) + + yaml = ` + name: local-action-docker-url + on: + push: + pull_request: + ` + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch = workflow.WorkflowDispatchConfig() + assert.Nil(t, workflowDispatch) + + yaml = ` + name: local-action-docker-url + on: + push: + pull_request: + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + ` + workflow, err = ReadWorkflow(strings.NewReader(yaml)) + assert.NoError(t, err, "read workflow should succeed") + workflowDispatch = workflow.WorkflowDispatchConfig() + assert.NotNil(t, workflowDispatch) + assert.Equal(t, WorkflowDispatchInput{ + Default: "warning", + Description: "Log level", + Options: []string{ + "info", + "warning", + "debug", + }, + Required: true, + Type: "choice", + }, workflowDispatch.Inputs["logLevel"]) +} |