diff options
65 files changed, 6053 insertions, 0 deletions
diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..7b6d2b2 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +Dockerfile +forgejo-runner diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..deec24a --- /dev/null +++ b/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +indent_style = space +indent_size = 2 +tab_width = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.{go}] +indent_style = tab + +[Makefile] +indent_style = tab diff --git a/.forgejo/cascading-pr-setup-forgejo b/.forgejo/cascading-pr-setup-forgejo new file mode 100755 index 0000000..06472a7 --- /dev/null +++ b/.forgejo/cascading-pr-setup-forgejo @@ -0,0 +1,16 @@ +#!/bin/bash + +set -ex + +setup_forgejo=$1 +setup_forgejo_pr=$2 +runner=$3 +runner_pr=$4 + +url=$(jq --raw-output .head.repo.html_url < $runner_pr) +test "$url" != null +branch=$(jq --raw-output .head.ref < $runner_pr) +test "$branch" != null +cd $setup_forgejo +./utils/upgrade-runner.sh $url @$branch +date > last-upgrade diff --git a/.forgejo/labelscompare.py b/.forgejo/labelscompare.py new file mode 100644 index 0000000..2274d38 --- /dev/null +++ b/.forgejo/labelscompare.py @@ -0,0 +1,24 @@ +import json + +expectedLabels = { + "maintainer": "contact@forgejo.org", + "org.opencontainers.image.authors": "Forgejo", + "org.opencontainers.image.url": "https://forgejo.org", + "org.opencontainers.image.documentation": "https://forgejo.org/docs/latest/admin/actions/#forgejo-runner", + "org.opencontainers.image.source": "https://code.forgejo.org/forgejo/runner", + "org.opencontainers.image.version": "1.2.3", + "org.opencontainers.image.vendor": "Forgejo", + "org.opencontainers.image.licenses": "MIT", + "org.opencontainers.image.title": "Forgejo Runner", + "org.opencontainers.image.description": "A runner for Forgejo Actions.", +} +inspect = None +with open("./labels.json", "r") as f: + inspect = json.load(f) + +assert inspect +labels = inspect[0]["Config"]["Labels"] + +for k, v in expectedLabels.items(): + assert k in labels, f"'{k}' is missing from labels" + assert labels[k] == v, f"expected {v} in key {k}, found {labels[k]}" diff --git a/.forgejo/testdata/ipv6.yml b/.forgejo/testdata/ipv6.yml new file mode 100644 index 0000000..e0f7588 --- /dev/null +++ b/.forgejo/testdata/ipv6.yml @@ -0,0 +1,11 @@ +--- +on: push +jobs: + ipv6: + runs-on: docker + container: + image: code.forgejo.org/oci/debian:bookworm + steps: + - run: | + apt update -qq ; apt --quiet install -qq --yes iputils-ping + ping -c 1 -6 ::1 diff --git a/.forgejo/workflows/build-release-integration.yml b/.forgejo/workflows/build-release-integration.yml new file mode 100644 index 0000000..7f92218 --- /dev/null +++ b/.forgejo/workflows/build-release-integration.yml @@ -0,0 +1,90 @@ +name: Integration tests for the release process + +on: + push: + paths: + - go.mod + - Dockerfile + - .forgejo/workflows/build-release.yml + - .forgejo/workflows/build-release-integration.yml + pull_request: + paths: + - go.mod + - Dockerfile + - .forgejo/workflows/build-release.yml + - .forgejo/workflows/build-release-integration.yml + +jobs: + release-simulation: + runs-on: self-hosted + if: github.repository_owner != 'forgejo-integration' && github.repository_owner != 'forgejo-release' + steps: + - uses: actions/checkout@v3 + + - id: forgejo + uses: https://code.forgejo.org/actions/setup-forgejo@v1 + with: + user: root + password: admin1234 + image-version: 1.20 + lxc-ip-prefix: 10.0.9 + + - name: publish + run: | + set -x + + version=1.2.3 + cat > /etc/docker/daemon.json <<EOF + { + "insecure-registries" : ["${{ steps.forgejo.outputs.host-port }}"] + } + EOF + systemctl restart docker + + dir=$(mktemp -d) + trap "rm -fr $dir" EXIT + + url=http://root:admin1234@${{ steps.forgejo.outputs.host-port }} + export FORGEJO_RUNNER_LOGS="${{ steps.forgejo.outputs.runner-logs }}" + + # + # Create a new project with the runner and the release workflow only + # + rsync -a --exclude .git ./ $dir/ + rm $(find $dir/.forgejo/workflows/*.yml | grep -v build-release.yml) + forgejo-test-helper.sh push $dir $url root runner + sha=$(forgejo-test-helper.sh branch_tip $url root/runner main) + + # + # Push a tag to trigger the release workflow and wait for it to complete + # + forgejo-curl.sh api_json --data-raw '{"tag_name": "v'$version'", "target": "'$sha'"}' $url/api/v1/repos/root/runner/tags + LOOPS=180 forgejo-test-helper.sh wait_success "$url" root/runner $sha + + # + # uncomment to see the logs even when everything is reported to be working ok + # + #cat $FORGEJO_RUNNER_LOGS + + # + # Minimal sanity checks. e2e test is for the setup-forgejo action + # + for arch in amd64 arm64 ; do + binary=forgejo-runner-$version-linux-$arch + for suffix in '' '.xz' ; do + curl --fail -L -sS $url/root/runner/releases/download/v$version/$binary$suffix > $binary$suffix + if test "$suffix" = .xz ; then + unxz --keep $binary$suffix + fi + chmod +x $binary + ./$binary --version | grep $version + curl --fail -L -sS $url/root/runner/releases/download/v$version/$binary$suffix.sha256 > $binary$suffix.sha256 + shasum -a 256 --check $binary$suffix.sha256 + rm $binary$suffix + done + done + + docker pull ${{ steps.forgejo.outputs.host-port }}/root/runner:$version + + docker inspect ${{ steps.forgejo.outputs.host-port}}/root/runner:$version > labels.json + python3 .forgejo/labelscompare.py diff --git a/.forgejo/workflows/build-release.yml b/.forgejo/workflows/build-release.yml new file mode 100644 index 0000000..4e66a0a --- /dev/null +++ b/.forgejo/workflows/build-release.yml @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: MIT +# +# https://code.forgejo.org/forgejo/runner +# +# Build the runner binaries and OCI images +# +# ROLE: forgejo-integration +# DOER: release-team +# TOKEN: <generated from codeberg.org/release-team> +# +name: Build release + +on: + push: + tags: 'v*' + +jobs: + release: + runs-on: self-hosted + # root is used for testing, allow it + if: secrets.ROLE == 'forgejo-integration' || github.repository_owner == 'root' + steps: + - uses: actions/checkout@v3 + + - name: Increase the verbosity when there are no secrets + id: verbose + run: | + if test -z "${{ secrets.TOKEN }}"; then + value=true + else + value=false + fi + echo "value=$value" >> "$GITHUB_OUTPUT" + + - name: Sanitize the name of the repository + id: repository + run: | + echo "value=${GITHUB_REPOSITORY##*/}" >> "$GITHUB_OUTPUT" + + - name: create test TOKEN + id: token + if: ${{ secrets.TOKEN == '' }} + run: | + apt-get -qq install -y jq + url="${{ env.GITHUB_SERVER_URL }}" + hostport=${url##http*://} + hostport=${hostport%%/} + doer=root + api=http://$doer:admin1234@$hostport/api/v1/users/$doer/tokens + curl -sS -X DELETE $api/release + token=$(curl -sS -X POST -H 'Content-Type: application/json' --data-raw '{"name": "release", "scopes": ["all"]}' $api | jq --raw-output .sha1) + echo "value=${token}" >> "$GITHUB_OUTPUT" + + - name: version from ref_name + id: tag-version + run: | + version=${GITHUB_REF_NAME##*v} + echo "value=$version" >> "$GITHUB_OUTPUT" + + - name: release notes + id: release-notes + run: | + anchor=${{ steps.tag-version.outputs.value }} + anchor=${anchor//./-} + cat >> "$GITHUB_OUTPUT" <<EOF + value<<ENDVAR + See https://code.forgejo.org/forgejo/runner/src/branch/main/RELEASE-NOTES.md#$anchor + ENDVAR + EOF + + - name: build without TOKEN + if: ${{ secrets.TOKEN == '' }} + uses: https://code.forgejo.org/forgejo/forgejo-build-publish/build@v5 + with: + forgejo: "${{ env.GITHUB_SERVER_URL }}" + owner: "${{ env.GITHUB_REPOSITORY_OWNER }}" + repository: "${{ steps.repository.outputs.value }}" + doer: root + sha: "${{ github.sha }}" + release-version: "${{ steps.tag-version.outputs.value }}" + token: ${{ steps.token.outputs.value }} + platforms: linux/amd64,linux/arm64 + release-notes: "${{ steps.release-notes.outputs.value }}" + binary-name: forgejo-runner + binary-path: /bin/forgejo-runner + verbose: ${{ steps.verbose.outputs.value }} + + - name: build with TOKEN + if: ${{ secrets.TOKEN != '' }} + uses: https://code.forgejo.org/forgejo/forgejo-build-publish/build@v5 + with: + forgejo: "${{ env.GITHUB_SERVER_URL }}" + owner: "${{ env.GITHUB_REPOSITORY_OWNER }}" + repository: "${{ steps.repository.outputs.value }}" + doer: "${{ secrets.DOER }}" + sha: "${{ github.sha }}" + release-version: "${{ steps.tag-version.outputs.value }}" + token: "${{ secrets.TOKEN }}" + platforms: linux/amd64,linux/arm64 + release-notes: "${{ steps.release-notes.outputs.value }}" + binary-name: forgejo-runner + binary-path: /bin/forgejo-runner + verbose: ${{ steps.verbose.outputs.value }} diff --git a/.forgejo/workflows/cascade-setup-forgejo.yml b/.forgejo/workflows/cascade-setup-forgejo.yml new file mode 100644 index 0000000..6d94f01 --- /dev/null +++ b/.forgejo/workflows/cascade-setup-forgejo.yml @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: MIT +on: + pull_request_target: + types: + - opened + - synchronize + - closed +jobs: + cascade: + runs-on: docker + if: vars.CASCADE != 'no' + steps: + - uses: actions/cascading-pr@v1 + with: + origin-url: ${{ env.GITHUB_SERVER_URL }} + origin-repo: forgejo/runner + origin-token: ${{ secrets.CASCADING_PR_ORIGIN }} + origin-pr: ${{ github.event.pull_request.number }} + destination-url: ${{ env.GITHUB_SERVER_URL }} + destination-repo: actions/setup-forgejo + destination-fork-repo: cascading-pr/setup-forgejo + destination-branch: main + destination-token: ${{ secrets.CASCADING_PR_DESTINATION }} + close-merge: true + update: .forgejo/cascading-pr-setup-forgejo diff --git a/.forgejo/workflows/example-docker-compose.yml b/.forgejo/workflows/example-docker-compose.yml new file mode 100644 index 0000000..4e2f547 --- /dev/null +++ b/.forgejo/workflows/example-docker-compose.yml @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: MIT +on: + push: + branches: + - 'main' + pull_request: + +jobs: + example-docker-compose: + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + + - name: Install docker + run: | + apt-get update -qq + export DEBIAN_FRONTEND=noninteractive + apt-get install -qq -y ca-certificates curl gnupg + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + apt-get update -qq + apt-get install -qq -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin=2.20.2-1~debian.11~bullseye + docker version + # + # docker compose is prone to non backward compatible changes, pin it + # + apt-get install -qq -y docker-compose-plugin=2.20.2-1~debian.11~bullseye + docker compose version + + - name: run the example + run: | + set -x + cd examples/docker-compose + secret=$(openssl rand -hex 20) + sed -i -e "s/{SHARED_SECRET}/$secret/" compose-forgejo-and-runner.yml + cli="docker compose --progress quiet -f compose-forgejo-and-runner.yml" + # + # Launch Forgejo & the runner + # + $cli up -d + for delay in $(seq 60) ; do test -f /srv/runner-data/.runner && break ; sleep 30 ; done + test -f /srv/runner-data/.runner + # + # Run the demo workflow + # + cli="$cli -f compose-demo-workflow.yml" + $cli up -d demo-workflow + # + # Wait for the demo workflow to complete + # + success='DEMO WORKFLOW SUCCESS' + failure='DEMO WORKFLOW FAILURE' + for delay in $(seq 60) ; do + $cli logs demo-workflow > /tmp/out + grep --quiet "$success" /tmp/out && break + grep --quiet "$failure" /tmp/out && break + $cli ps --all + $cli logs --tail=20 runner-daemon demo-workflow + sleep 30 + done + grep --quiet "$success" /tmp/out + $cli logs runner-daemon > /tmp/runner.log + grep --quiet 'Start image=code.forgejo.org/oci/node:20-bookworm' /tmp/runner.log + + - name: full docker compose logs + if: always() + run: | + cd examples/docker-compose + docker compose -f compose-forgejo-and-runner.yml -f compose-demo-workflow.yml logs diff --git a/.forgejo/workflows/publish-release.yml b/.forgejo/workflows/publish-release.yml new file mode 100644 index 0000000..c888be2 --- /dev/null +++ b/.forgejo/workflows/publish-release.yml @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: MIT +# +# https://forgejo.octopuce.forgejo.org/forgejo-release/runner +# +# Copies & sign a release from code.forgejo.org/forgejo-integration/runner to code.forgejo.org/forgejo/runner +# +# ROLE: forgejo-release +# FORGEJO: https://code.forgejo.org +# FROM_OWNER: forgejo-integration +# TO_OWNER: forgejo +# DOER: release-team +# TOKEN: <generated from codeberg.org/release-team> +# GPG_PRIVATE_KEY: <XYZ> +# GPG_PASSPHRASE: <ABC> +# +name: pubish + +on: + push: + tags: 'v*' + +jobs: + publish: + runs-on: self-hosted + if: secrets.DOER != '' && secrets.FORGEJO != '' && secrets.TO_OWNER != '' && secrets.FROM_OWNER != '' && secrets.TOKEN != '' + steps: + - uses: actions/checkout@v3 + + - name: copy & sign + uses: https://code.forgejo.org/forgejo/forgejo-build-publish/publish@v1 + with: + forgejo: ${{ secrets.FORGEJO }} + from-owner: ${{ secrets.FROM_OWNER }} + to-owner: ${{ secrets.TO_OWNER }} + repo: "runner" + ref-name: ${{ github.ref_name }} + container-suffixes: " " + doer: ${{ secrets.DOER }} + token: ${{ secrets.TOKEN }} + gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} + gpg-passphrase: ${{ secrets.GPG_PASSPHRASE }} + verbose: ${{ secrets.VERBOSE }} diff --git a/.forgejo/workflows/test.yml b/.forgejo/workflows/test.yml new file mode 100644 index 0000000..677ab68 --- /dev/null +++ b/.forgejo/workflows/test.yml @@ -0,0 +1,108 @@ +name: checks +on: + push: + branches: + - 'main' + pull_request: + +env: + FORGEJO_HOST_PORT: 'forgejo:3000' + FORGEJO_ADMIN_USER: 'root' + FORGEJO_ADMIN_PASSWORD: 'admin1234' + FORGEJO_RUNNER_SECRET: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + FORGEJO_SCRIPT: | + /bin/s6-svscan /etc/s6 & sleep 10 ; su -c "forgejo admin user create --admin --username $FORGEJO_ADMIN_USER --password $FORGEJO_ADMIN_PASSWORD --email root@example.com" git && su -c "forgejo forgejo-cli actions register --labels docker --name therunner --secret $FORGEJO_RUNNER_SECRET" git && sleep infinity + GOPROXY: https://goproxy.io,direct + +jobs: + build-and-tests: + name: build and test + if: github.repository_owner != 'forgejo-integration' && github.repository_owner != 'forgejo-experimental' && github.repository_owner != 'forgejo-release' + runs-on: docker + + services: + forgejo: + image: codeberg.org/forgejo/forgejo:1.21 + env: + FORGEJO__security__INSTALL_LOCK: "true" + FORGEJO__log__LEVEL: "debug" + FORGEJO__actions__ENABLED: "true" + FORGEJO_ADMIN_USER: ${{ env.FORGEJO_ADMIN_USER }} + FORGEJO_ADMIN_PASSWORD: ${{ env.FORGEJO_ADMIN_PASSWORD }} + FORGEJO_RUNNER_SECRET: ${{ env.FORGEJO_RUNNER_SECRET }} + cmd: + - 'bash' + - '-c' + - ${{ env.FORGEJO_SCRIPT }} + + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.21' + + - uses: actions/checkout@v4 + + - run: make vet + + - run: make build + + - uses: https://code.forgejo.org/actions/upload-artifact@v3 + with: + name: forgejo-runner + path: forgejo-runner + + - name: check the forgejo server is responding + run: | + apt-get update -qq + apt-get install -y -qq jq curl + test $FORGEJO_ADMIN_USER = $(curl -sS http://$FORGEJO_ADMIN_USER:$FORGEJO_ADMIN_PASSWORD@$FORGEJO_HOST_PORT/api/v1/user | jq --raw-output .login) + + - run: make FORGEJO_URL=http://$FORGEJO_HOST_PORT test + + runner-exec-tests: + needs: [build-and-tests] + name: runner exec tests + if: github.repository_owner != 'forgejo-integration' && github.repository_owner != 'forgejo-experimental' && github.repository_owner != 'forgejo-release' + runs-on: self-hosted + + steps: + + - uses: actions/checkout@v4 + + - uses: https://code.forgejo.org/actions/download-artifact@v3 + with: + name: forgejo-runner + + - name: install docker + run: | + mkdir /etc/docker + cat > /etc/docker/daemon.json <<EOF + { + "ipv6": true, + "experimental": true, + "ip6tables": true, + "fixed-cidr-v6": "fd05:d0ca:1::/64", + "default-address-pools": [ + { + "base": "172.19.0.0/16", + "size": 24 + }, + { + "base": "fd05:d0ca:2::/104", + "size": 112 + } + ] + } + EOF + apt --quiet install --yes -qq docker.io + + - name: forgejo-runner exec --enable-ipv6 + run: | + set -x + chmod +x forgejo-runner + ./forgejo-runner exec --enable-ipv6 --workflows .forgejo/testdata/ipv6.yml + if ./forgejo-runner exec --workflows .forgejo/testdata/ipv6.yml >& /tmp/out ; then + cat /tmp/out + echo "IPv6 not enabled, should fail" + exit 1 + fi diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..6313b56 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3a3808c --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +*~ + +forgejo-runner +.env +.runner +coverage.txt +/gitea-vet +/config.yaml + +# MS VSCode +.vscode +__debug_bin +# gorelease binary folder +dist diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..41f9683 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,165 @@ +linters: + enable: + - gosimple + - typecheck + - govet + - errcheck + - staticcheck + - unused + - dupl + #- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time. + - gofmt + - misspell + - gocritic + - bidichk + - ineffassign + - revive + - gofumpt + - depguard + - nakedret + - unconvert + - wastedassign + - nolintlint + - stylecheck + enable-all: false + disable-all: true + fast: false + +run: + go: 1.18 + timeout: 10m + skip-dirs: + - node_modules + - public + - web_src + +linters-settings: + stylecheck: + checks: ["all", "-ST1005", "-ST1003"] + nakedret: + max-func-lines: 0 + gocritic: + disabled-checks: + - ifElseChain + - singleCaseSwitch # Every time this occurred in the code, there was no other way. + revive: + ignore-generated-header: false + severity: warning + confidence: 0.8 + errorCode: 1 + warningCode: 1 + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: duplicated-imports + - name: modifies-value-receiver + gofumpt: + extra-rules: true + lang-version: "1.18" + depguard: + # TODO: use depguard to replace import checks in gitea-vet + list-type: denylist + # Check the list against standard lib. + include-go-root: true + packages-with-error-message: + - github.com/unknwon/com: "use gitea's util and replacements" + +issues: + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test\.go + linters: + - gocyclo + - errcheck + - dupl + - gosec + - unparam + - staticcheck + - path: models/migrations/v + linters: + - gocyclo + - errcheck + - dupl + - gosec + - linters: + - dupl + text: "webhook" + - linters: + - gocritic + text: "`ID' should not be capitalized" + - path: modules/templates/helper.go + linters: + - gocritic + - linters: + - unused + text: "swagger" + - path: contrib/pr/checkout.go + linters: + - errcheck + - path: models/issue.go + linters: + - errcheck + - path: models/migrations/ + linters: + - errcheck + - path: modules/log/ + linters: + - errcheck + - path: routers/api/v1/repo/issue_subscription.go + linters: + - dupl + - path: routers/repo/view.go + linters: + - dupl + - path: models/migrations/ + linters: + - unused + - linters: + - staticcheck + text: "argument x is overwritten before first use" + - path: modules/httplib/httplib.go + linters: + - staticcheck + # Enabling this would require refactoring the methods and how they are called. + - path: models/issue_comment_list.go + linters: + - dupl + - linters: + - misspell + text: '`Unknwon` is a misspelling of `Unknown`' + - path: models/update.go + linters: + - unused + - path: cmd/dump.go + linters: + - dupl + - text: "commentFormatting: put a space between `//` and comment text" + linters: + - gocritic + - text: "exitAfterDefer:" + linters: + - gocritic + - path: modules/graceful/manager_windows.go + linters: + - staticcheck + text: "svc.IsAnInteractiveSession is deprecated: Use IsWindowsService instead." + - path: models/user/openid.go + linters: + - golint diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..50f1965 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,47 @@ +FROM --platform=$BUILDPLATFORM code.forgejo.org/oci/tonistiigi/xx AS xx + +FROM --platform=$BUILDPLATFORM code.forgejo.org/oci/golang:1.21-alpine3.19 as build-env + +# +# Transparently cross compile for the target platform +# +COPY --from=xx / / +ARG TARGETPLATFORM +RUN apk --no-cache add clang lld +RUN xx-apk --no-cache add gcc musl-dev +RUN xx-go --wrap + +# Do not remove `git` here, it is required for getting runner version when executing `make build` +RUN apk add --no-cache build-base git + +COPY . /srv +WORKDIR /srv + +RUN make clean && make build + +FROM code.forgejo.org/oci/alpine:3.19 +ARG RELEASE_VERSION +RUN apk add --no-cache git bash + +COPY --from=build-env /srv/forgejo-runner /bin/forgejo-runner + +LABEL maintainer="contact@forgejo.org" \ + org.opencontainers.image.authors="Forgejo" \ + org.opencontainers.image.url="https://forgejo.org" \ + org.opencontainers.image.documentation="https://forgejo.org/docs/latest/admin/actions/#forgejo-runner" \ + org.opencontainers.image.source="https://code.forgejo.org/forgejo/runner" \ + org.opencontainers.image.version="${RELEASE_VERSION}" \ + org.opencontainers.image.vendor="Forgejo" \ + org.opencontainers.image.licenses="MIT" \ + org.opencontainers.image.title="Forgejo Runner" \ + org.opencontainers.image.description="A runner for Forgejo Actions." + +ENV HOME=/data + +USER 1000:1000 + +WORKDIR /data + +VOLUME ["/data"] + +CMD ["/bin/forgejo-runner"] @@ -0,0 +1,20 @@ +Copyright (c) 2023 The Forgejo Authors +Copyright (c) 2022 The Gitea Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..3413168 --- /dev/null +++ b/Makefile @@ -0,0 +1,175 @@ +DIST := dist +EXECUTABLE := forgejo-runner +GOFMT ?= gofumpt -l +DIST := dist +DIST_DIRS := $(DIST)/binaries $(DIST)/release +GO ?= go +SHASUM ?= shasum -a 256 +HAS_GO = $(shell hash $(GO) > /dev/null 2>&1 && echo "GO" || echo "NOGO" ) +XGO_PACKAGE ?= src.techknowlogick.com/xgo@latest +XGO_VERSION := go-1.21.x +GXZ_PAGAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.10 + +LINUX_ARCHS ?= linux/amd64,linux/arm64 +DARWIN_ARCHS ?= darwin-12/amd64,darwin-12/arm64 +WINDOWS_ARCHS ?= windows/amd64 +GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*") +GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*") + +DOCKER_IMAGE ?= gitea/act_runner +DOCKER_TAG ?= nightly +DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG) +DOCKER_ROOTLESS_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)-dind-rootless + +EXTLDFLAGS = -extldflags "-static" $(null) + +ifeq ($(HAS_GO), GO) + GOPATH ?= $(shell $(GO) env GOPATH) + export PATH := $(GOPATH)/bin:$(PATH) + + CGO_EXTRA_CFLAGS := -DSQLITE_MAX_VARIABLE_NUMBER=32766 + CGO_CFLAGS ?= $(shell $(GO) env CGO_CFLAGS) $(CGO_EXTRA_CFLAGS) +endif + +ifeq ($(OS), Windows_NT) + GOFLAGS := -v -buildmode=exe + EXECUTABLE ?= $(EXECUTABLE).exe +else ifeq ($(OS), Windows) + GOFLAGS := -v -buildmode=exe + EXECUTABLE ?= $(EXECUTABLE).exe +else + GOFLAGS := -v + EXECUTABLE ?= $(EXECUTABLE) +endif + +STORED_VERSION_FILE := VERSION + +ifneq ($(DRONE_TAG),) + VERSION ?= $(subst v,,$(DRONE_TAG)) + RELASE_VERSION ?= $(VERSION) +else + ifneq ($(DRONE_BRANCH),) + VERSION ?= $(subst release/v,,$(DRONE_BRANCH)) + else + VERSION ?= main + endif + + STORED_VERSION=$(shell cat $(STORED_VERSION_FILE) 2>/dev/null) + ifneq ($(STORED_VERSION),) + RELASE_VERSION ?= $(STORED_VERSION) + else + RELASE_VERSION ?= $(shell git describe --tags --always | sed 's/-/+/' | sed 's/^v//') + endif +endif + +GO_PACKAGES_TO_VET ?= $(filter-out gitea.com/gitea/act_runner/internal/pkg/client/mocks,$(shell $(GO) list ./...)) + + +TAGS ?= +LDFLAGS ?= -X "gitea.com/gitea/act_runner/internal/pkg/ver.version=v$(RELASE_VERSION)" + +all: build + +fmt: + @hash gofumpt > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) install mvdan.cc/gofumpt@latest; \ + fi + $(GOFMT) -w $(GO_FMT_FILES) + +.PHONY: go-check +go-check: + $(eval MIN_GO_VERSION_STR := $(shell grep -Eo '^go\s+[0-9]+\.[0-9]+' go.mod | cut -d' ' -f2)) + $(eval MIN_GO_VERSION := $(shell printf "%03d%03d" $(shell echo '$(MIN_GO_VERSION_STR)' | tr '.' ' '))) + $(eval GO_VERSION := $(shell printf "%03d%03d" $(shell $(GO) version | grep -Eo '[0-9]+\.[0-9]+' | tr '.' ' ');)) + @if [ "$(GO_VERSION)" -lt "$(MIN_GO_VERSION)" ]; then \ + echo "Act Runner requires Go $(MIN_GO_VERSION_STR) or greater to build. You can get it at https://go.dev/dl/"; \ + exit 1; \ + fi + +.PHONY: fmt-check +fmt-check: + @hash gofumpt > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) install mvdan.cc/gofumpt@latest; \ + fi + @diff=$$($(GOFMT) -d $(GO_FMT_FILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +test: fmt-check + @$(GO) test -v -cover -coverprofile coverage.txt ./... && echo "\n==>\033[32m Ok\033[m\n" || exit 1 + +.PHONY: vet +vet: + @echo "Running go vet..." + @$(GO) vet $(GO_PACKAGES_TO_VET) + +install: $(GOFILES) + $(GO) install -v -tags '$(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)' + +build: go-check $(EXECUTABLE) + +$(EXECUTABLE): $(GOFILES) + $(GO) build -v -tags 'netgo osusergo $(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)' -o $@ + +.PHONY: deps-backend +deps-backend: + $(GO) mod download + $(GO) install $(GXZ_PAGAGE) + $(GO) install $(XGO_PACKAGE) + +.PHONY: release +release: release-windows release-linux release-darwin release-copy release-compress release-check + +$(DIST_DIRS): + mkdir -p $(DIST_DIRS) + +.PHONY: release-windows +release-windows: | $(DIST_DIRS) + CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -buildmode exe -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '-linkmode external -extldflags "-static" $(LDFLAGS)' -targets '$(WINDOWS_ARCHS)' -out $(EXECUTABLE)-$(VERSION) . +ifeq ($(CI),true) + cp -r /build/* $(DIST)/binaries/ +endif + +.PHONY: release-linux +release-linux: | $(DIST_DIRS) + CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '-linkmode external -extldflags "-static" $(LDFLAGS)' -targets '$(LINUX_ARCHS)' -out $(EXECUTABLE)-$(VERSION) . +ifeq ($(CI),true) + cp -r /build/* $(DIST)/binaries/ +endif + +.PHONY: release-darwin +release-darwin: | $(DIST_DIRS) + CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) run $(XGO_PACKAGE) -go $(XGO_VERSION) -dest $(DIST)/binaries -tags 'netgo osusergo $(TAGS)' -ldflags '$(LDFLAGS)' -targets '$(DARWIN_ARCHS)' -out $(EXECUTABLE)-$(VERSION) . +ifeq ($(CI),true) + cp -r /build/* $(DIST)/binaries/ +endif + +.PHONY: release-copy +release-copy: | $(DIST_DIRS) + cd $(DIST); for file in `find . -type f -name "*"`; do cp $${file} ./release/; done; + +.PHONY: release-check +release-check: | $(DIST_DIRS) + cd $(DIST)/release/; for file in `find . -type f -name "*"`; do echo "checksumming $${file}" && $(SHASUM) `echo $${file} | sed 's/^..//'` > $${file}.sha256; done; + +.PHONY: release-compress +release-compress: | $(DIST_DIRS) + cd $(DIST)/release/; for file in `find . -type f -name "*"`; do echo "compressing $${file}" && $(GO) run $(GXZ_PAGAGE) -k -9 $${file}; done; + +.PHONY: docker +docker: + if ! docker buildx version >/dev/null 2>&1; then \ + ARG_DISABLE_CONTENT_TRUST=--disable-content-trust=false; \ + fi; \ + docker build $${ARG_DISABLE_CONTENT_TRUST} -t $(DOCKER_REF) . + docker build $${ARG_DISABLE_CONTENT_TRUST} -t $(DOCKER_ROOTLESS_REF) -f Dockerfile.rootless . + +clean: + $(GO) clean -x -i ./... + rm -rf coverage.txt $(EXECUTABLE) $(DIST) + +version: + @echo $(VERSION) diff --git a/README.md b/README.md new file mode 100644 index 0000000..ccf2e8b --- /dev/null +++ b/README.md @@ -0,0 +1,94 @@ +# Forgejo Runner + +**WARNING:** this is [alpha release quality](https://en.wikipedia.org/wiki/Software_release_life_cycle#Alpha) code and should not be considered secure enough to deploy in production. + +A daemon that connects to a Forgejo instance and runs jobs for continous integration. The [installation and usage instructions](https://forgejo.org/docs/next/admin/actions/) are part of the Forgejo documentation. + +# Reporting bugs + +When filing a bug in [the issue tracker](https://code.forgejo.org/forgejo/runner/issues), it is very helpful to propose a pull request [in the end-to-end tests](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions) repository that adds a reproducer. It will fail the CI and unambiguously demonstrate that the problem exists. In most cases it is enough to add a workflow ([see the echo example](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions/example-echo)). For more complicated cases it is also possible to add a runner config file as well as shell scripts to setup and teardown the test case ([see the service example](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions/example-service)). + +# Hacking + +The Forgejo runner depends on [a fork of ACT](https://code.forgejo.org/forgejo/act) and is a dependency of the [setup-forgejo action](https://code.forgejo.org/actions/setup-forgejo). See [the full dependency graph](https://code.forgejo.org/actions/cascading-pr/#forgejo-dependencies) for a global view. + +## Local debug + +The repositories are checked out in the same directory: + +- **runner**: [Forgejo runner](https://code.forgejo.org/forgejo/runner) +- **act**: [ACT](https://code.forgejo.org/forgejo/act) +- **setup-forgejo**: [setup-forgejo](https://code.forgejo.org/actions/setup-forgejo) + +### Install dependencies + +The dependencies are installed manually or with: + +```shell +setup-forgejo/forgejo-dependencies.sh +``` + +### Build the Forgejo runner with the local ACT + +The Forgejo runner is rebuilt with the ACT directory by changing the `runner/go.mod` file to: + +``` +replace github.com/nektos/act => ../act +``` + +Running: + +``` +cd runner ; go mod tidy +``` + +Building: + +```shell +cd runner ; rm -f forgejo-runner ; make forgejo-runner +``` + +### Launch Forgejo and the runner + +A Forgejo instance is launched with: + +```shell +cd setup-forgejo +./forgejo.sh setup +firefox $(cat forgejo-url) +``` + +The user is `root` with password `admin1234`. The runner is registered with: + +``` +cd setup-forgejo +docker exec --user 1000 forgejo forgejo actions generate-runner-token > forgejo-runner-token +../runner/forgejo-runner register --no-interactive --instance "$(cat forgejo-url)" --name runner --token $(cat forgejo-runner-token) --labels docker:docker://node:20-bullseye,self-hosted:host://-self-hosted,lxc:lxc://debian:bullseye +``` + +And launched with: + +```shell +cd setup-forgejo ; ../runner/forgejo-runner --config runner-config.yml daemon +``` + +Note that the `runner-config.yml` is required in that particular case +to configure the network in `bridge` mode, otherwise the runner will +create a network that cannot reach the forgejo instance. + +### Try a sample workflow + +From the Forgejo web interface, create a repository and add the +following to `.forgejo/workflows/try.yaml`. It will launch the job and +the result can be observed from the `actions` tab. + +```yaml +on: [push] +jobs: + ls: + runs-on: docker + steps: + - uses: actions/checkout@v3 + - run: | + ls ${{ github.workspace }} +``` diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md new file mode 100644 index 0000000..761ba5e --- /dev/null +++ b/RELEASE-NOTES.md @@ -0,0 +1,98 @@ +# Release Notes + +## 3.5.1 + +* Fix [CVE-2024-24557](https://nvd.nist.gov/vuln/detail/CVE-2024-24557) +* [Add report_interval option to config](https://code.forgejo.org/forgejo/runner/pulls/220) to allow setting the interval of status and log reports + +## 3.5.0 + +* [Allow graceful shutdowns](https://code.forgejo.org/forgejo/runner/pulls/202): when receiving a signal (INT or TERM) wait for running jobs to complete (up to shutdown_timeout). +* [Fix label declaration](https://code.forgejo.org/forgejo/runner/pulls/176): Runner in daemon mode now takes labels found in config.yml into account when declaration was successful. +* [Fix the docker compose example](https://code.forgejo.org/forgejo/runner/pulls/175) to workaround the race on labels. +* [Fix the kubernetes dind example](https://code.forgejo.org/forgejo/runner/pulls/169). +* [Rewrite ::group:: and ::endgroup:: commands like github](https://code.forgejo.org/forgejo/runner/pulls/183). +* [Added opencontainers labels to the image](https://code.forgejo.org/forgejo/runner/pulls/195) +* [Upgrade the default container to node:20](https://code.forgejo.org/forgejo/runner/pulls/203) + +## 3.4.1 + +* Fixes a regression introduced in 3.4.0 by which a job with no image explicitly set would + [be bound to the host](https://code.forgejo.org/forgejo/runner/issues/165) + network instead of a custom network (empty string in the configuration file). + +## 3.4.0 + +Although this version is able to run [actions/upload-artifact@v4](https://code.forgejo.org/actions/upload-artifact/src/tag/v4) and [actions/download-artifact@v4](https://code.forgejo.org/actions/download-artifact/src/tag/v4), these actions will fail because it does not run against GitHub.com. A fork of those two actions with this check disabled is made available at: + +* https://code.forgejo.org/forgejo/upload-artifact/src/tag/v4 +* https://code.forgejo.org/forgejo/download-artifact/src/tag/v4 + +and they can be used as shown in [an example from the end-to-end test suite](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions/example-artifacts-v4/.forgejo/workflows/test.yml). + +* When running against codeberg.org, the default poll frequency is 30s instead of 2s. +* Fix compatibility issue with actions/{upload,download}-artifact@v4. +* Upgrade ACT v1.20.0 which brings: + * `[container].options` from the config file is exposed in containers created by the workflows + * the expressions in the value of `jobs.<job-id>.runs-on` are evaluated + * fix a bug causing the evaluated expression of `jobs.<job-id>.runs-on` to fail if it was an array + * mount `act-toolcache:/opt/hostedtoolcache` instead of `act-toolcache:/toolcache` + * a few improvements to the readability of the error messages displayed in the logs + * `amd64` can be used instead of `x86_64` and `arm64` intead of `aarch64` when specifying the architecture + * fixed YAML parsing bugs preventing dispatch workflows to be parsed correctly + * add support for `runs-on.labels` which is equivalent to `runs-on` followed by a list of labels + * the expressions in the service `ports` and `volumes` values are evaluated + * network aliases are only supported when the network is user specified, not when it is provided by the runner +* If `[runner].insecure` is true in the configuration, insecure cloning actions is allowed + +## 3.3.0 + +* Support IPv6 with addresses from a private range and NAT for + docker:// with --enable-ipv6 and [container].enable_ipv6 + lxc:// always + +## 3.2.0 + +* Support LXC container capabilities via `lxc:lxc://debian:bookworm:k8s` or `lxc:lxc://debian:bookworm:docker lxc k8s` +* Update ACT v1.16.0 to resolve a [race condition when bootstraping LXC templates](https://code.forgejo.org/forgejo/act/pulls/23) + +## 3.1.0 + +The `self-hosted` label that was hardwired to be a LXC container +running `debian:bullseye` was reworked and documented ([user guide](https://forgejo.org/docs/next/user/actions/#jobsjob_idruns-on) and [admin guide](https://forgejo.org/docs/next/admin/actions/#labels-and-runs-on)). + +There now are two different schemes: `lxc://` for LXC containers and +`host://` for running directly on the host. + +* Support the `host://` scheme for running directly on the host. +* Support the `lxc://` scheme in labels +* Update [code.forgejo.org/forgejo/act v1.14.0](https://code.forgejo.org/forgejo/act/pulls/19) to implement both self-hosted and LXC schemes + +## 3.0.3 + +* Update [code.forgejo.org/forgejo/act v1.13.0](https://code.forgejo.org/forgejo/runner/pulls/106) to keep up with github.com/nektos/act + +## 3.0.2 + +* Update [code.forgejo.org/forgejo/act v1.12.0](https://code.forgejo.org/forgejo/runner/pulls/106) to upgrade the node installed in the LXC container to node20 + +## 3.0.1 + +* Update [code.forgejo.org/forgejo/act v1.11.0](https://code.forgejo.org/forgejo/runner/pulls/86) to resolve a bug preventing actions based on node20 from running, such as [checkout@v4](https://code.forgejo.org/actions/checkout/src/tag/v4). + +## 3.0.0 + +* Publish a rootless OCI image +* Refactor the release process + +## 2.5.0 + +* Update [code.forgejo.org/forgejo/act v1.10.0](https://code.forgejo.org/forgejo/runner/pulls/71) + +## 2.4.0 + +* Update [code.forgejo.org/forgejo/act v1.9.0](https://code.forgejo.org/forgejo/runner/pulls/64) + +## 2.3.0 + +* Add support for [offline registration](https://forgejo.org/docs/next/admin/actions/#offline-registration). diff --git a/build.go b/build.go new file mode 100644 index 0000000..f2e5d82 --- /dev/null +++ b/build.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +//go:build vendor + +package main + +import ( + // for vet + _ "code.gitea.io/gitea-vet" +) diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..f9dd774 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,10 @@ +This directory contains a collection of usage and deployment examples. + +Workflow examples can be found [in the documentation](https://forgejo.org/docs/next/user/actions/) +and in the [sources of the setup-forgejo](https://code.forgejo.org/actions/setup-forgejo/src/branch/main/testdata) action. + +| Section | Description | +|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [`docker`](docker) | using the host docker server by mounting the socket | +| [`docker-compose`](docker-compose) | all in one docker-compose with the Forgejo server, the runner and docker in docker | +| [`kubernetes`](kubernetes) | a sample deployment for the Forgejo runner | diff --git a/examples/docker-compose/README.md b/examples/docker-compose/README.md new file mode 100644 index 0000000..a3e6e9b --- /dev/null +++ b/examples/docker-compose/README.md @@ -0,0 +1,113 @@ +## Docker compose with docker-in-docker + +The `compose-forgejo-and-runner.yml` compose file runs a Forgejo +instance and registers a `Forgejo runner`. A docker server is also +launched within a container (using +[dind](https://hub.docker.com/_/docker/tags?name=dind)) and will be +used by the `Forgejo runner` to execute the workflows. + +### Quick start + +```sh +rm -fr /srv/runner-data /srv/forgejo-data +secret=$(openssl rand -hex 20) +sed -i -e "s/{SHARED_SECRET}/$secret/" compose-forgejo-and-runner.yml +docker compose -f compose-forgejo-and-runner.yml up -d +``` + +Visit http://0.0.0.0:8080/admin/actions/runners with login `root` and password `{ROOT_PASSWORD}` and see the runner is registered with the label `docker`. + +> NOTE: the `Your ROOT_URL in app.ini is "http://localhost:3000/", it's unlikely matching the site you are visiting.` message is a warning that can be ignored in the context of this example. + +```sh +docker compose -f compose-forgejo-and-runner.yml -f compose-demo-workflow.yml up demo-workflow +``` + +Visit http://0.0.0.0:8080/root/test/actions/runs/1 and see that the job ran. + + +### Running + +Create a shared secret with: + +```sh +openssl rand -hex 20 +``` + +Replace all occurences of {SHARED_SECRET} in +[compose-forgejo-and-runner.yml](compose-forgejo-and-runner.yml). + +> **NOTE:** a token obtained from the Forgejo web interface cannot be used as a shared secret. + +Replace {ROOT_PASSWORD} with a secure password in +[compose-forgejo-and-runner.yml](compose-forgejo-and-runner.yml). + +```sh +docker compose -f compose-forgejo-and-runner.yml up +Creating docker-compose_docker-in-docker_1 ... done +Creating docker-compose_forgejo_1 ... done +Creating docker-compose_runner-register_1 ... done +... +docker-in-docker_1 | time="2023-08-24T10:22:15.023338461Z" level=warning msg="WARNING: API is accessible on http://0.0.0.0:2376 +... +forgejo_1 | 2023/08/24 10:22:14 ...s/graceful/server.go:75:func1() [D] Starting server on tcp:0.0.0.0:3000 (PID: 19) +... +runner-daemon_1 | time="2023-08-24T10:22:16Z" level=info msg="Starting runner daemon" +``` + +### Manual testing + +To login the Forgejo instance: + +* URL: http://0.0.0.0:8080 +* user: `root` +* password: `{ROOT_PASSWORD}` + +`Forgejo Actions` is enabled by default when creating a repository. + +## Tests workflow + +The `compose-demo-workflow.yml` compose file runs two demo workflows: +* one to verify the `Forgejo runner` can pick up a task from the Forgejo instance +and run it to completion. +* one to verify docker can be run inside the `Forgejo runner` container. + +A new repository is created in root/test with the following workflows: + +#### `.forgejo/workflows/demo.yml`: + +```yaml +on: [push] +jobs: + test: + runs-on: docker + steps: + - run: echo All Good +``` + +#### `.forgejo/workflows/demo_docker.yml` + +```yaml +on: [push] +jobs: + test_docker: + runs-on: ubuntu-22.04 + steps: + - run: docker info +``` + +A wait loop expects the status of the check associated with the +commit in Forgejo to show "success" to assert the workflow was run. + +### Running + +```sh +$ docker-compose -f compose-forgejo-and-runner.yml -f compose-demo-workflow.yml up demo-workflow +... +demo-workflow_1 | To http://forgejo:3000/root/test +demo-workflow_1 | + 5ce134e...261cc79 main -> main (forced update) +demo-workflow_1 | branch 'main' set up to track 'http://root:admin1234@forgejo:3000/root/test/main'. +... +demo-workflow_1 | running +... +``` diff --git a/examples/docker-compose/compose-demo-workflow.yml b/examples/docker-compose/compose-demo-workflow.yml new file mode 100644 index 0000000..90e7d52 --- /dev/null +++ b/examples/docker-compose/compose-demo-workflow.yml @@ -0,0 +1,35 @@ +# Copyright 2024 The Forgejo Authors. +# SPDX-License-Identifier: MIT + +services: + + demo-workflow: + image: code.forgejo.org/oci/alpine:3.19 + links: + - forgejo + command: >- + sh -ec ' + apk add --quiet git curl jq ; + mkdir -p /srv/demo ; + cd /srv/demo ; + git init --initial-branch=main ; + mkdir -p .forgejo/workflows ; + echo "{ on: [push], jobs: { test: { runs-on: docker, steps: [ {uses: actions/checkout@v4}, { run: echo All Good } ] } } }" > .forgejo/workflows/demo.yml ; + echo "{ on: [push], jobs: { test_docker: { runs-on: ubuntu-22.04, steps: [ { run: docker info } ] } } }" > .forgejo/workflows/demo_docker.yml ; + git add . ; + git config user.email root@example.com ; + git config user.name username ; + git commit -m demo ; + while : ; do + git push --set-upstream --force http://root:{ROOT_PASSWORD}@forgejo:3000/root/test main && break ; + sleep 5 ; + done ; + sha=`git rev-parse HEAD` ; + for delay in 1 1 1 1 2 5 5 10 10 10 15 30 30 30 30 30 30 30 ; do + curl -sS -f http://forgejo:3000/api/v1/repos/root/test/commits/$$sha/status | jq --raw-output .state | tee status ; + if grep success status ; then echo DEMO WORKFLOW SUCCESS && break ; fi ; + if grep failure status ; then echo DEMO WORKFLOW FAILURE && break ; fi ; + sleep $$delay ; + done ; + grep success status || echo DEMO WORKFLOW FAILURE + ' diff --git a/examples/docker-compose/compose-forgejo-and-runner.yml b/examples/docker-compose/compose-forgejo-and-runner.yml new file mode 100644 index 0000000..4794985 --- /dev/null +++ b/examples/docker-compose/compose-forgejo-and-runner.yml @@ -0,0 +1,93 @@ +# Copyright 2024 The Forgejo Authors. +# SPDX-License-Identifier: MIT + +# +# Create a secret with: +# +# openssl rand -hex 20 +# +# Replace all occurences of {SHARED_SECRET} below with the output. +# +# NOTE: a token obtained from the Forgejo web interface cannot be used +# as a shared secret. +# +# Replace {ROOT_PASSWORD} with a secure password +# + +volumes: + docker_certs: + +services: + + docker-in-docker: + image: code.forgejo.org/oci/docker:dind + hostname: docker # Must set hostname as TLS certificates are only valid for docker or localhost + privileged: true + environment: + DOCKER_TLS_CERTDIR: /certs + DOCKER_HOST: docker-in-docker + volumes: + - docker_certs:/certs + + forgejo: + image: codeberg.org/forgejo/forgejo:1.21 + command: >- + bash -c ' + /bin/s6-svscan /etc/s6 & + sleep 10 ; + su -c "forgejo forgejo-cli actions register --secret {SHARED_SECRET}" git ; + su -c "forgejo admin user create --admin --username root --password {ROOT_PASSWORD} --email root@example.com" git ; + sleep infinity + ' + environment: + FORGEJO__security__INSTALL_LOCK: "true" + FORGEJO__log__LEVEL: "debug" + FORGEJO__repository__ENABLE_PUSH_CREATE_USER: "true" + FORGEJO__repository__DEFAULT_PUSH_CREATE_PRIVATE: "false" + FORGEJO__repository__DEFAULT_REPO_UNITS: "repo.code,repo.actions" + volumes: + - /srv/forgejo-data:/data + ports: + - 8080:3000 + + runner-register: + image: code.forgejo.org/forgejo/runner:3.4.1 + links: + - docker-in-docker + - forgejo + environment: + DOCKER_HOST: tcp://docker-in-docker:2376 + volumes: + - /srv/runner-data:/data + user: 0:0 + command: >- + bash -ec ' + while : ; do + forgejo-runner create-runner-file --connect --instance http://forgejo:3000 --name runner --secret {SHARED_SECRET} && break ; + sleep 1 ; + done ; + sed -i -e "s|\"labels\": null|\"labels\": [\"docker:docker://code.forgejo.org/oci/node:20-bookworm\", \"ubuntu-22.04:docker://catthehacker/ubuntu:act-22.04\"]|" .runner ; + forgejo-runner generate-config > config.yml ; + sed -i -e "s|network: .*|network: host|" config.yml ; + sed -i -e "s|^ envs:$$| envs:\n DOCKER_HOST: tcp://docker:2376\n DOCKER_TLS_VERIFY: 1\n DOCKER_CERT_PATH: /certs/client|" config.yml ; + sed -i -e "s|^ options:| options: -v /certs/client:/certs/client|" config.yml ; + sed -i -e "s| valid_volumes: \[\]$$| valid_volumes:\n - /certs/client|" config.yml ; + chown -R 1000:1000 /data + ' + + runner-daemon: + image: code.forgejo.org/forgejo/runner:3.4.1 + links: + - docker-in-docker + - forgejo + environment: + DOCKER_HOST: tcp://docker:2376 + DOCKER_CERT_PATH: /certs/client + DOCKER_TLS_VERIFY: "1" + volumes: + - /srv/runner-data:/data + - docker_certs:/certs + command: >- + bash -c ' + while : ; do test -w .runner && forgejo-runner --config config.yml daemon ; sleep 1 ; done + ' diff --git a/examples/docker/README.md b/examples/docker/README.md new file mode 100644 index 0000000..628c99c --- /dev/null +++ b/examples/docker/README.md @@ -0,0 +1,12 @@ +The following assumes: + +* a docker server runs on the host +* the docker group of the host is GID 133 +* a `.runner` file exists in /tmp/data +* a `runner-config.yml` file exists in /tmp/data + +```sh +docker run -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/data:/data --user 1000:133 --rm code.forgejo.org/forgejo/runner:3.0.0 forgejo-runner --config runner-config.yaml daemon +``` + +The workflows will run using the host docker srever diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md new file mode 100644 index 0000000..d00cf1a --- /dev/null +++ b/examples/kubernetes/README.md @@ -0,0 +1,7 @@ +## Kubernetes Docker in Docker Deployment + +Registers Kubernetes pod runners using [offline registration](https://forgejo.org/docs/v1.21/admin/actions/#offline-registration), allowing the scaling of runners as needed. + +NOTE: Docker in Docker (dind) requires elevated privileges on Kubernetes. The current way to achieve this is to set the pod `SecurityContext` to `privileged`. Keep in mind that this is a potential security issue that has the potential for a malicious application to break out of the container context. + +[`dind-docker.yaml`](dind-docker.yaml) creates a deployment and secret for Kubernetes to act as a runner. The Docker credentials are re-generated each time the pod connects and does not need to be persisted. diff --git a/examples/kubernetes/dind-docker.yaml b/examples/kubernetes/dind-docker.yaml new file mode 100644 index 0000000..534432d --- /dev/null +++ b/examples/kubernetes/dind-docker.yaml @@ -0,0 +1,87 @@ +# Secret data. +# You will need to retrive this from the web UI, and your Forgejo instance must be running v1.21+ +# Alternatively, create this with +# kubectl create secret generic runner-secret --from-literal=token=your_offline_token_here +apiVersion: v1 +stringData: + token: your_offline_secret_here +kind: Secret +metadata: + name: runner-secret +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: forgejo-runner + name: forgejo-runner +spec: + # Two replicas means that if one is busy, the other can pick up jobs. + replicas: 2 + selector: + matchLabels: + app: forgejo-runner + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: forgejo-runner + spec: + restartPolicy: Always + volumes: + - name: docker-certs + emptyDir: {} + - name: runner-data + emptyDir: {} + # Initialise our configuration file using offline registration + # https://forgejo.org/docs/v1.21/admin/actions/#offline-registration + initContainers: + - name: runner-register + image: code.forgejo.org/forgejo/runner:3.2.0 + command: ["forgejo-runner", "register", "--no-interactive", "--token", $(RUNNER_SECRET), "--name", $(RUNNER_NAME), "--instance", $(FORGEJO_INSTANCE_URL)] + env: + - name: RUNNER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: RUNNER_SECRET + valueFrom: + secretKeyRef: + name: runner-secret + key: token + - name: FORGEJO_INSTANCE_URL + value: http://forgejo-http.forgejo.svc.cluster.local:3000 + resources: + limits: + cpu: "0.50" + memory: "64Mi" + volumeMounts: + - name: runner-data + mountPath: /data + containers: + - name: runner + image: code.forgejo.org/forgejo/runner:3.0.0 + command: ["sh", "-c", "while ! nc -z localhost 2376 </dev/null; do echo 'waiting for docker daemon...'; sleep 5; done; forgejo-runner daemon"] + env: + - name: DOCKER_HOST + value: tcp://localhost:2376 + - name: DOCKER_CERT_PATH + value: /certs/client + - name: DOCKER_TLS_VERIFY + value: "1" + volumeMounts: + - name: docker-certs + mountPath: /certs + - name: runner-data + mountPath: /data + - name: daemon + image: docker:23.0.6-dind + env: + - name: DOCKER_TLS_CERTDIR + value: /certs + securityContext: + privileged: true + volumeMounts: + - name: docker-certs + mountPath: /certs @@ -0,0 +1,105 @@ +module gitea.com/gitea/act_runner + +go 1.21.13 + +toolchain go1.23.0 + +require ( + code.gitea.io/actions-proto-go v0.4.0 + code.gitea.io/gitea-vet v0.2.3 + connectrpc.com/connect v1.16.2 + github.com/avast/retry-go/v4 v4.6.0 + github.com/docker/docker v25.0.6+incompatible + github.com/google/uuid v1.6.0 + github.com/joho/godotenv v1.5.1 + github.com/mattn/go-isatty v0.0.20 + github.com/nektos/act v0.2.49 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.8.1 + github.com/stretchr/testify v1.9.0 + golang.org/x/term v0.23.0 + golang.org/x/time v0.6.0 + google.golang.org/protobuf v1.34.2 + gopkg.in/yaml.v3 v3.0.1 + gotest.tools/v3 v3.5.1 +) + +require ( + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/containerd/containerd v1.7.13 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/creack/pty v1.1.21 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/cli v25.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-git/go-git/v5 v5.11.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.17.4 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/buildkit v0.13.2 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rhysd/actionlint v1.6.27 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/sergi/go-diff v1.3.1 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + go.etcd.io/bbolt v1.3.9 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.23.0 // indirect + golang.org/x/tools v0.14.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) + +replace github.com/nektos/act => code.forgejo.org/forgejo/act v1.21.2 @@ -0,0 +1,332 @@ +code.forgejo.org/forgejo/act v1.21.2 h1:LERMtDNZDFXOYYYSU7Yduyyz7sN0t/Xnc1wFlupweiE= +code.forgejo.org/forgejo/act v1.21.2/go.mod h1:+PcvJ9iv+NTFeJSh79ra9Jbk9l0vvyA9D9me5/dbxYM= +code.gitea.io/actions-proto-go v0.4.0 h1:OsPBPhodXuQnsspG1sQ4eRE1PeoZyofd7+i73zCwnsU= +code.gitea.io/actions-proto-go v0.4.0/go.mod h1:mn7Wkqz6JbnTOHQpot3yDeHx+O5C9EGhMEE+htvHBas= +code.gitea.io/gitea-vet v0.2.3 h1:gdFmm6WOTM65rE8FUBTRzeQZYzXePKSSB1+r574hWwI= +code.gitea.io/gitea-vet v0.2.3/go.mod h1:zcNbT/aJEmivCAhfmkHOlT645KNOf9W2KnkLgFjGGfE= +connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE= +connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= +github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is= +github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= +github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/buildkit v0.13.2 h1:nXNszM4qD9E7QtG7bFWPnDI1teUQFQglBzon/IU3SzI= +github.com/moby/buildkit v0.13.2/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rhysd/actionlint v1.6.27 h1:xxwe8YmveBcC8lydW6GoHMGmB6H/MTqUU60F2p10wjw= +github.com/rhysd/actionlint v1.6.27/go.mod h1:m2nFUjAnOrxCMXuOMz9evYBRCLUsMnKY2IJl/N5umbk= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a h1:oIi7H/bwFUYKYhzKbHc+3MvHRWqhQwXVB4LweLMiVy0= +github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a/go.mod h1:iSvujNDmpZ6eQX+bg/0X3lF7LEmZ8N77g2a/J/+Zt2U= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= diff --git a/internal/app/cmd/cache-server.go b/internal/app/cmd/cache-server.go new file mode 100644 index 0000000..21b3352 --- /dev/null +++ b/internal/app/cmd/cache-server.go @@ -0,0 +1,69 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + + "gitea.com/gitea/act_runner/internal/pkg/config" + + "github.com/nektos/act/pkg/artifactcache" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +type cacheServerArgs struct { + Dir string + Host string + Port uint16 +} + +func runCacheServer(ctx context.Context, configFile *string, cacheArgs *cacheServerArgs) func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + cfg, err := config.LoadDefault(*configFile) + if err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + + initLogging(cfg) + + var ( + dir = cfg.Cache.Dir + host = cfg.Cache.Host + port = cfg.Cache.Port + ) + + // cacheArgs has higher priority + if cacheArgs.Dir != "" { + dir = cacheArgs.Dir + } + if cacheArgs.Host != "" { + host = cacheArgs.Host + } + if cacheArgs.Port != 0 { + port = cacheArgs.Port + } + + cacheHandler, err := artifactcache.StartHandler( + dir, + host, + port, + log.StandardLogger().WithField("module", "cache_request"), + ) + if err != nil { + return err + } + + log.Infof("cache server is listening on %v", cacheHandler.ExternalURL()) + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + <-c + + return nil + } +} diff --git a/internal/app/cmd/cmd.go b/internal/app/cmd/cmd.go new file mode 100644 index 0000000..48341dc --- /dev/null +++ b/internal/app/cmd/cmd.go @@ -0,0 +1,87 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/spf13/cobra" + + "gitea.com/gitea/act_runner/internal/pkg/config" + "gitea.com/gitea/act_runner/internal/pkg/ver" +) + +func Execute(ctx context.Context) { + // ./act_runner + rootCmd := &cobra.Command{ + Use: "forgejo-runner [event name to run]\nIf no event name passed, will default to \"on: push\"", + Short: "Run Forgejo Actions locally by specifying the event name (e.g. `push`) or an action name directly.", + Args: cobra.MaximumNArgs(1), + Version: ver.Version(), + SilenceUsage: true, + } + configFile := "" + rootCmd.PersistentFlags().StringVarP(&configFile, "config", "c", "", "Config file path") + + // ./act_runner register + var regArgs registerArgs + registerCmd := &cobra.Command{ + Use: "register", + Short: "Register a runner to the server", + Args: cobra.MaximumNArgs(0), + RunE: runRegister(ctx, ®Args, &configFile), // must use a pointer to regArgs + } + registerCmd.Flags().BoolVar(®Args.NoInteractive, "no-interactive", false, "Disable interactive mode") + registerCmd.Flags().StringVar(®Args.InstanceAddr, "instance", "", "Forgejo instance address") + registerCmd.Flags().StringVar(®Args.Token, "token", "", "Runner token") + registerCmd.Flags().StringVar(®Args.RunnerName, "name", "", "Runner name") + registerCmd.Flags().StringVar(®Args.Labels, "labels", "", "Runner tags, comma separated") + rootCmd.AddCommand(registerCmd) + + rootCmd.AddCommand(createRunnerFileCmd(ctx, &configFile)) + + // ./act_runner daemon + daemonCmd := &cobra.Command{ + Use: "daemon", + Short: "Run as a runner daemon", + Args: cobra.MaximumNArgs(1), + RunE: runDaemon(ctx, &configFile), + } + rootCmd.AddCommand(daemonCmd) + + // ./act_runner exec + rootCmd.AddCommand(loadExecCmd(ctx)) + + // ./act_runner config + rootCmd.AddCommand(&cobra.Command{ + Use: "generate-config", + Short: "Generate an example config file", + Args: cobra.MaximumNArgs(0), + Run: func(_ *cobra.Command, _ []string) { + fmt.Printf("%s", config.Example) + }, + }) + + // ./act_runner cache-server + var cacheArgs cacheServerArgs + cacheCmd := &cobra.Command{ + Use: "cache-server", + Short: "Start a cache server for the cache action", + Args: cobra.MaximumNArgs(0), + RunE: runCacheServer(ctx, &configFile, &cacheArgs), + } + cacheCmd.Flags().StringVarP(&cacheArgs.Dir, "dir", "d", "", "Cache directory") + cacheCmd.Flags().StringVarP(&cacheArgs.Host, "host", "s", "", "Host of the cache server") + cacheCmd.Flags().Uint16VarP(&cacheArgs.Port, "port", "p", 0, "Port of the cache server") + rootCmd.AddCommand(cacheCmd) + + // hide completion command + rootCmd.CompletionOptions.HiddenDefaultCmd = true + + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/internal/app/cmd/create-runner-file.go b/internal/app/cmd/create-runner-file.go new file mode 100644 index 0000000..a972624 --- /dev/null +++ b/internal/app/cmd/create-runner-file.go @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: MIT + +package cmd + +import ( + "context" + "encoding/hex" + "fmt" + "os" + + pingv1 "code.gitea.io/actions-proto-go/ping/v1" + "connectrpc.com/connect" + gouuid "github.com/google/uuid" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "gitea.com/gitea/act_runner/internal/app/run" + "gitea.com/gitea/act_runner/internal/pkg/client" + "gitea.com/gitea/act_runner/internal/pkg/config" + "gitea.com/gitea/act_runner/internal/pkg/ver" +) + +type createRunnerFileArgs struct { + Connect bool + InstanceAddr string + Secret string + Name string +} + +func createRunnerFileCmd(ctx context.Context, configFile *string) *cobra.Command { + var argsVar createRunnerFileArgs + cmd := &cobra.Command{ + Use: "create-runner-file", + Short: "Create a runner file using a shared secret used to pre-register the runner on the Forgejo instance", + Args: cobra.MaximumNArgs(0), + RunE: runCreateRunnerFile(ctx, &argsVar, configFile), + } + cmd.Flags().BoolVar(&argsVar.Connect, "connect", false, "tries to connect to the instance using the secret (Forgejo v1.21 instance or greater)") + cmd.Flags().StringVar(&argsVar.InstanceAddr, "instance", "", "Forgejo instance address") + cmd.MarkFlagRequired("instance") + cmd.Flags().StringVar(&argsVar.Secret, "secret", "", "secret shared with the Forgejo instance via forgejo-cli actions register") + cmd.MarkFlagRequired("secret") + cmd.Flags().StringVar(&argsVar.Name, "name", "", "Runner name") + + return cmd +} + +// must be exactly the same as fogejo/models/actions/forgejo.go +func uuidFromSecret(secret string) (string, error) { + uuid, err := gouuid.FromBytes([]byte(secret[:16])) + if err != nil { + return "", fmt.Errorf("gouuid.FromBytes %v", err) + } + return uuid.String(), nil +} + +// should be exactly the same as forgejo/cmd/forgejo/actions.go +func validateSecret(secret string) error { + secretLen := len(secret) + if secretLen != 40 { + return fmt.Errorf("the secret must be exactly 40 characters long, not %d", secretLen) + } + if _, err := hex.DecodeString(secret); err != nil { + return fmt.Errorf("the secret must be an hexadecimal string: %w", err) + } + return nil +} + +func ping(cfg *config.Config, reg *config.Registration) error { + // initial http client + cli := client.New( + reg.Address, + cfg.Runner.Insecure, + "", + "", + ver.Version(), + ) + + _, err := cli.Ping(context.Background(), connect.NewRequest(&pingv1.PingRequest{ + Data: reg.UUID, + })) + if err != nil { + return fmt.Errorf("ping %s failed %w", reg.Address, err) + } + return nil +} + +func runCreateRunnerFile(ctx context.Context, args *createRunnerFileArgs, configFile *string) func(cmd *cobra.Command, args []string) error { + return func(*cobra.Command, []string) error { + log.SetLevel(log.DebugLevel) + log.Info("Creating runner file") + + // + // Prepare the registration data + // + cfg, err := config.LoadDefault(*configFile) + if err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + + if err := validateSecret(args.Secret); err != nil { + return err + } + + uuid, err := uuidFromSecret(args.Secret) + if err != nil { + return err + } + + name := args.Name + if name == "" { + name, _ = os.Hostname() + log.Infof("Runner name is empty, use hostname '%s'.", name) + } + + reg := &config.Registration{ + Name: name, + UUID: uuid, + Token: args.Secret, + Address: args.InstanceAddr, + } + + // + // Verify the Forgejo instance is reachable + // + if err := ping(cfg, reg); err != nil { + return err + } + + // + // Save the registration file + // + if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil { + return fmt.Errorf("failed to save runner config to %s: %w", cfg.Runner.File, err) + } + + // + // Verify the secret works + // + if args.Connect { + cli := client.New( + reg.Address, + cfg.Runner.Insecure, + reg.UUID, + reg.Token, + ver.Version(), + ) + + runner := run.NewRunner(cfg, reg, cli) + resp, err := runner.Declare(ctx, cfg.Runner.Labels) + + if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented { + log.Warn("Cannot verify the connection because the Forgejo instance is lower than v1.21") + } else if err != nil { + log.WithError(err).Error("fail to invoke Declare") + return err + } else { + log.Infof("connection successful: %s, with version: %s, with labels: %v", + resp.Msg.Runner.Name, resp.Msg.Runner.Version, resp.Msg.Runner.Labels) + } + } + return nil + } +} diff --git a/internal/app/cmd/create-runner-file_test.go b/internal/app/cmd/create-runner-file_test.go new file mode 100644 index 0000000..4f3acb8 --- /dev/null +++ b/internal/app/cmd/create-runner-file_test.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: MIT + +package cmd + +import ( + "bytes" + "context" + "os" + "testing" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "connectrpc.com/connect" + "gitea.com/gitea/act_runner/internal/pkg/client" + "gitea.com/gitea/act_runner/internal/pkg/config" + "gitea.com/gitea/act_runner/internal/pkg/ver" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" +) + +func executeCommand(ctx context.Context, cmd *cobra.Command, args ...string) (string, error) { + buf := new(bytes.Buffer) + cmd.SetOut(buf) + cmd.SetErr(buf) + cmd.SetArgs(args) + + err := cmd.ExecuteContext(ctx) + + return buf.String(), err +} + +func Test_createRunnerFileCmd(t *testing.T) { + configFile := "config.yml" + ctx := context.Background() + cmd := createRunnerFileCmd(ctx, &configFile) + output, err := executeCommand(ctx, cmd) + assert.ErrorContains(t, err, `required flag(s) "instance", "secret" not set`) + assert.Contains(t, output, "Usage:") +} + +func Test_validateSecret(t *testing.T) { + assert.ErrorContains(t, validateSecret("abc"), "exactly 40 characters") + assert.ErrorContains(t, validateSecret("ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), "must be an hexadecimal") +} + +func Test_uuidFromSecret(t *testing.T) { + uuid, err := uuidFromSecret("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + assert.NoError(t, err) + assert.EqualValues(t, uuid, "41414141-4141-4141-4141-414141414141") +} + +func Test_ping(t *testing.T) { + cfg := &config.Config{} + address := os.Getenv("FORGEJO_URL") + if address == "" { + address = "https://code.forgejo.org" + } + reg := &config.Registration{ + Address: address, + UUID: "create-runner-file_test.go", + } + assert.NoError(t, ping(cfg, reg)) +} + +func Test_runCreateRunnerFile(t *testing.T) { + // + // Set the .runner file to be in a temporary directory + // + dir := t.TempDir() + configFile := dir + "/config.yml" + runnerFile := dir + "/.runner" + cfg, err := config.LoadDefault("") + cfg.Runner.File = runnerFile + yamlData, err := yaml.Marshal(cfg) + assert.NoError(t, err) + assert.NoError(t, os.WriteFile(configFile, yamlData, 0o666)) + + instance, has := os.LookupEnv("FORGEJO_URL") + if !has { + instance = "https://code.forgejo.org" + } + secret, has := os.LookupEnv("FORGEJO_RUNNER_SECRET") + assert.True(t, has) + name := "testrunner" + + // + // Run create-runner-file + // + ctx := context.Background() + cmd := createRunnerFileCmd(ctx, &configFile) + output, err := executeCommand(ctx, cmd, "--connect", "--secret", secret, "--instance", instance, "--name", name) + assert.NoError(t, err) + assert.EqualValues(t, "", output) + + // + // Read back the runner file and verify its content + // + reg, err := config.LoadRegistration(runnerFile) + assert.NoError(t, err) + assert.EqualValues(t, secret, reg.Token) + assert.EqualValues(t, instance, reg.Address) + + // + // Verify that fetching a task successfully returns there is + // no task for this runner + // + cli := client.New( + reg.Address, + cfg.Runner.Insecure, + reg.UUID, + reg.Token, + ver.Version(), + ) + resp, err := cli.FetchTask(ctx, connect.NewRequest(&runnerv1.FetchTaskRequest{})) + assert.NoError(t, err) + assert.Nil(t, resp.Msg.Task) +} diff --git a/internal/app/cmd/daemon.go b/internal/app/cmd/daemon.go new file mode 100644 index 0000000..a613546 --- /dev/null +++ b/internal/app/cmd/daemon.go @@ -0,0 +1,208 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package cmd + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + + "connectrpc.com/connect" + "github.com/mattn/go-isatty" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "gitea.com/gitea/act_runner/internal/app/poll" + "gitea.com/gitea/act_runner/internal/app/run" + "gitea.com/gitea/act_runner/internal/pkg/client" + "gitea.com/gitea/act_runner/internal/pkg/config" + "gitea.com/gitea/act_runner/internal/pkg/envcheck" + "gitea.com/gitea/act_runner/internal/pkg/labels" + "gitea.com/gitea/act_runner/internal/pkg/ver" +) + +func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + cfg, err := config.LoadDefault(*configFile) + if err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + + initLogging(cfg) + log.Infoln("Starting runner daemon") + + reg, err := config.LoadRegistration(cfg.Runner.File) + if os.IsNotExist(err) { + log.Error("registration file not found, please register the runner first") + return err + } else if err != nil { + return fmt.Errorf("failed to load registration file: %w", err) + } + + cfg.Tune(reg.Address) + + lbls := reg.Labels + if len(cfg.Runner.Labels) > 0 { + lbls = cfg.Runner.Labels + } + + ls := labels.Labels{} + for _, l := range lbls { + label, err := labels.Parse(l) + if err != nil { + log.WithError(err).Warnf("ignored invalid label %q", l) + continue + } + ls = append(ls, label) + } + if len(ls) == 0 { + log.Warn("no labels configured, runner may not be able to pick up jobs") + } + + if ls.RequireDocker() { + dockerSocketPath, err := getDockerSocketPath(cfg.Container.DockerHost) + if err != nil { + return err + } + if err := envcheck.CheckIfDockerRunning(ctx, dockerSocketPath); err != nil { + return err + } + // if dockerSocketPath passes the check, override DOCKER_HOST with dockerSocketPath + os.Setenv("DOCKER_HOST", dockerSocketPath) + // empty cfg.Container.DockerHost means act_runner need to find an available docker host automatically + // and assign the path to cfg.Container.DockerHost + if cfg.Container.DockerHost == "" { + cfg.Container.DockerHost = dockerSocketPath + } + // check the scheme, if the scheme is not npipe or unix + // set cfg.Container.DockerHost to "-" because it can't be mounted to the job container + if protoIndex := strings.Index(cfg.Container.DockerHost, "://"); protoIndex != -1 { + scheme := cfg.Container.DockerHost[:protoIndex] + if !strings.EqualFold(scheme, "npipe") && !strings.EqualFold(scheme, "unix") { + cfg.Container.DockerHost = "-" + } + } + } + + cli := client.New( + reg.Address, + cfg.Runner.Insecure, + reg.UUID, + reg.Token, + ver.Version(), + ) + + runner := run.NewRunner(cfg, reg, cli) + // declare the labels of the runner before fetching tasks + resp, err := runner.Declare(ctx, ls.Names()) + if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented { + // Gitea instance is older version. skip declare step. + log.Warn("Because the Forgejo instance is an old version, skipping declaring the labels and version.") + } else if err != nil { + log.WithError(err).Error("fail to invoke Declare") + return err + } else { + log.Infof("runner: %s, with version: %s, with labels: %v, declared successfully", + resp.Msg.Runner.Name, resp.Msg.Runner.Version, resp.Msg.Runner.Labels) + // if declared successfully, override the labels in the.runner file with valid labels in the config file (if specified) + runner.Update(ctx, ls) + reg.Labels = ls.ToStrings() + if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil { + return fmt.Errorf("failed to save runner config: %w", err) + } + } + + poller := poll.New(cfg, cli, runner) + + go poller.Poll() + + <-ctx.Done() + log.Infof("runner: %s shutdown initiated, waiting [runner].shutdown_timeout=%s for running jobs to complete before shutting down", resp.Msg.Runner.Name, cfg.Runner.ShutdownTimeout) + + ctx, cancel := context.WithTimeout(context.Background(), cfg.Runner.ShutdownTimeout) + defer cancel() + + err = poller.Shutdown(ctx) + if err != nil { + log.Warnf("runner: %s cancelled in progress jobs during shutdown", resp.Msg.Runner.Name) + } + return nil + } +} + +// initLogging setup the global logrus logger. +func initLogging(cfg *config.Config) { + isTerm := isatty.IsTerminal(os.Stdout.Fd()) + format := &log.TextFormatter{ + DisableColors: !isTerm, + FullTimestamp: true, + } + log.SetFormatter(format) + + if l := cfg.Log.Level; l != "" { + level, err := log.ParseLevel(l) + if err != nil { + log.WithError(err). + Errorf("invalid log level: %q", l) + } + + // debug level + if level == log.DebugLevel { + log.SetReportCaller(true) + format.CallerPrettyfier = func(f *runtime.Frame) (string, string) { + // get function name + s := strings.Split(f.Function, ".") + funcname := "[" + s[len(s)-1] + "]" + // get file name and line number + _, filename := path.Split(f.File) + filename = "[" + filename + ":" + strconv.Itoa(f.Line) + "]" + return funcname, filename + } + log.SetFormatter(format) + } + + if log.GetLevel() != level { + log.Infof("log level changed to %v", level) + log.SetLevel(level) + } + } +} + +var commonSocketPaths = []string{ + "/var/run/docker.sock", + "/run/podman/podman.sock", + "$HOME/.colima/docker.sock", + "$XDG_RUNTIME_DIR/docker.sock", + "$XDG_RUNTIME_DIR/podman/podman.sock", + `\\.\pipe\docker_engine`, + "$HOME/.docker/run/docker.sock", +} + +func getDockerSocketPath(configDockerHost string) (string, error) { + // a `-` means don't mount the docker socket to job containers + if configDockerHost != "" && configDockerHost != "-" { + return configDockerHost, nil + } + + socket, found := os.LookupEnv("DOCKER_HOST") + if found { + return socket, nil + } + + for _, p := range commonSocketPaths { + if _, err := os.Lstat(os.ExpandEnv(p)); err == nil { + if strings.HasPrefix(p, `\\.\`) { + return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), nil + } + return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), nil + } + } + + return "", fmt.Errorf("daemon Docker Engine socket not found and docker_host config was invalid") +} diff --git a/internal/app/cmd/exec.go b/internal/app/cmd/exec.go new file mode 100644 index 0000000..3e111fe --- /dev/null +++ b/internal/app/cmd/exec.go @@ -0,0 +1,495 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// Copyright 2019 nektos +// SPDX-License-Identifier: MIT + +package cmd + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/joho/godotenv" + "github.com/nektos/act/pkg/artifactcache" + "github.com/nektos/act/pkg/artifacts" + "github.com/nektos/act/pkg/common" + "github.com/nektos/act/pkg/model" + "github.com/nektos/act/pkg/runner" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "golang.org/x/term" +) + +type executeArgs struct { + runList bool + job string + event string + workdir string + workflowsPath string + noWorkflowRecurse bool + autodetectEvent bool + forcePull bool + forceRebuild bool + jsonLogger bool + envs []string + envfile string + secrets []string + defaultActionsURL string + insecureSecrets bool + privileged bool + usernsMode string + containerArchitecture string + containerDaemonSocket string + useGitIgnore bool + containerCapAdd []string + containerCapDrop []string + containerOptions string + artifactServerPath string + artifactServerAddr string + artifactServerPort string + noSkipCheckout bool + debug bool + dryrun bool + image string + cacheHandler *artifactcache.Handler + network string + enableIPv6 bool + githubInstance string +} + +// WorkflowsPath returns path to workflow file(s) +func (i *executeArgs) WorkflowsPath() string { + return i.resolve(i.workflowsPath) +} + +// Envfile returns path to .env +func (i *executeArgs) Envfile() string { + return i.resolve(i.envfile) +} + +func (i *executeArgs) LoadSecrets() map[string]string { + s := make(map[string]string) + for _, secretPair := range i.secrets { + secretPairParts := strings.SplitN(secretPair, "=", 2) + secretPairParts[0] = strings.ToUpper(secretPairParts[0]) + if strings.ToUpper(s[secretPairParts[0]]) == secretPairParts[0] { + log.Errorf("Secret %s is already defined (secrets are case insensitive)", secretPairParts[0]) + } + if len(secretPairParts) == 2 { + s[secretPairParts[0]] = secretPairParts[1] + } else if env, ok := os.LookupEnv(secretPairParts[0]); ok && env != "" { + s[secretPairParts[0]] = env + } else { + fmt.Printf("Provide value for '%s': ", secretPairParts[0]) + val, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Println() + if err != nil { + log.Errorf("failed to read input: %v", err) + os.Exit(1) + } + s[secretPairParts[0]] = string(val) + } + } + return s +} + +func readEnvs(path string, envs map[string]string) bool { + if _, err := os.Stat(path); err == nil { + env, err := godotenv.Read(path) + if err != nil { + log.Fatalf("Error loading from %s: %v", path, err) + } + for k, v := range env { + envs[k] = v + } + return true + } + return false +} + +func (i *executeArgs) LoadEnvs() map[string]string { + envs := make(map[string]string) + if i.envs != nil { + for _, envVar := range i.envs { + e := strings.SplitN(envVar, `=`, 2) + if len(e) == 2 { + envs[e[0]] = e[1] + } else { + envs[e[0]] = "" + } + } + } + _ = readEnvs(i.Envfile(), envs) + + envs["ACTIONS_CACHE_URL"] = i.cacheHandler.ExternalURL() + "/" + + return envs +} + +// Workdir returns path to workdir +func (i *executeArgs) Workdir() string { + return i.resolve(".") +} + +func (i *executeArgs) resolve(path string) string { + basedir, err := filepath.Abs(i.workdir) + if err != nil { + log.Fatal(err) + } + if path == "" { + return path + } + if !filepath.IsAbs(path) { + path = filepath.Join(basedir, path) + } + return path +} + +func printList(plan *model.Plan) error { + type lineInfoDef struct { + jobID string + jobName string + stage string + wfName string + wfFile string + events string + } + lineInfos := []lineInfoDef{} + + header := lineInfoDef{ + jobID: "Job ID", + jobName: "Job name", + stage: "Stage", + wfName: "Workflow name", + wfFile: "Workflow file", + events: "Events", + } + + jobs := map[string]bool{} + duplicateJobIDs := false + + jobIDMaxWidth := len(header.jobID) + jobNameMaxWidth := len(header.jobName) + stageMaxWidth := len(header.stage) + wfNameMaxWidth := len(header.wfName) + wfFileMaxWidth := len(header.wfFile) + eventsMaxWidth := len(header.events) + + for i, stage := range plan.Stages { + for _, r := range stage.Runs { + jobID := r.JobID + line := lineInfoDef{ + jobID: jobID, + jobName: r.String(), + stage: strconv.Itoa(i), + wfName: r.Workflow.Name, + wfFile: r.Workflow.File, + events: strings.Join(r.Workflow.On(), `,`), + } + if _, ok := jobs[jobID]; ok { + duplicateJobIDs = true + } else { + jobs[jobID] = true + } + lineInfos = append(lineInfos, line) + if jobIDMaxWidth < len(line.jobID) { + jobIDMaxWidth = len(line.jobID) + } + if jobNameMaxWidth < len(line.jobName) { + jobNameMaxWidth = len(line.jobName) + } + if stageMaxWidth < len(line.stage) { + stageMaxWidth = len(line.stage) + } + if wfNameMaxWidth < len(line.wfName) { + wfNameMaxWidth = len(line.wfName) + } + if wfFileMaxWidth < len(line.wfFile) { + wfFileMaxWidth = len(line.wfFile) + } + if eventsMaxWidth < len(line.events) { + eventsMaxWidth = len(line.events) + } + } + } + + jobIDMaxWidth += 2 + jobNameMaxWidth += 2 + stageMaxWidth += 2 + wfNameMaxWidth += 2 + wfFileMaxWidth += 2 + + fmt.Printf("%*s%*s%*s%*s%*s%*s\n", + -stageMaxWidth, header.stage, + -jobIDMaxWidth, header.jobID, + -jobNameMaxWidth, header.jobName, + -wfNameMaxWidth, header.wfName, + -wfFileMaxWidth, header.wfFile, + -eventsMaxWidth, header.events, + ) + for _, line := range lineInfos { + fmt.Printf("%*s%*s%*s%*s%*s%*s\n", + -stageMaxWidth, line.stage, + -jobIDMaxWidth, line.jobID, + -jobNameMaxWidth, line.jobName, + -wfNameMaxWidth, line.wfName, + -wfFileMaxWidth, line.wfFile, + -eventsMaxWidth, line.events, + ) + } + if duplicateJobIDs { + fmt.Print("\nDetected multiple jobs with the same job name, use `-W` to specify the path to the specific workflow.\n") + } + return nil +} + +func runExecList(ctx context.Context, planner model.WorkflowPlanner, execArgs *executeArgs) error { + // plan with filtered jobs - to be used for filtering only + var filterPlan *model.Plan + + // Determine the event name to be filtered + var filterEventName string + + if len(execArgs.event) > 0 { + log.Infof("Using chosed event for filtering: %s", execArgs.event) + filterEventName = execArgs.event + } else if execArgs.autodetectEvent { + // collect all events from loaded workflows + events := planner.GetEvents() + + // set default event type to first event from many available + // this way user dont have to specify the event. + log.Infof("Using first detected workflow event for filtering: %s", events[0]) + + filterEventName = events[0] + } + + var err error + if execArgs.job != "" { + log.Infof("Preparing plan with a job: %s", execArgs.job) + filterPlan, err = planner.PlanJob(execArgs.job) + if err != nil { + return err + } + } else if filterEventName != "" { + log.Infof("Preparing plan for a event: %s", filterEventName) + filterPlan, err = planner.PlanEvent(filterEventName) + if err != nil { + return err + } + } else { + log.Infof("Preparing plan with all jobs") + filterPlan, err = planner.PlanAll() + if err != nil { + return err + } + } + + _ = printList(filterPlan) + + return nil +} + +func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + planner, err := model.NewWorkflowPlanner(execArgs.WorkflowsPath(), execArgs.noWorkflowRecurse) + if err != nil { + return err + } + + if execArgs.runList { + return runExecList(ctx, planner, execArgs) + } + + // plan with triggered jobs + var plan *model.Plan + + // Determine the event name to be triggered + var eventName string + + // collect all events from loaded workflows + events := planner.GetEvents() + + if len(execArgs.event) > 0 { + log.Infof("Using chosed event for filtering: %s", execArgs.event) + eventName = execArgs.event + } else if len(events) == 1 && len(events[0]) > 0 { + log.Infof("Using the only detected workflow event: %s", events[0]) + eventName = events[0] + } else if execArgs.autodetectEvent && len(events) > 0 && len(events[0]) > 0 { + // set default event type to first event from many available + // this way user dont have to specify the event. + log.Infof("Using first detected workflow event: %s", events[0]) + eventName = events[0] + } else { + log.Infof("Using default workflow event: push") + eventName = "push" + } + + // build the plan for this run + if execArgs.job != "" { + log.Infof("Planning job: %s", execArgs.job) + plan, err = planner.PlanJob(execArgs.job) + if err != nil { + return err + } + } else { + log.Infof("Planning jobs for event: %s", eventName) + plan, err = planner.PlanEvent(eventName) + if err != nil { + return err + } + } + + maxLifetime := 3 * time.Hour + if deadline, ok := ctx.Deadline(); ok { + maxLifetime = time.Until(deadline) + } + + // init a cache server + handler, err := artifactcache.StartHandler("", "", 0, log.StandardLogger().WithField("module", "cache_request")) + if err != nil { + return err + } + log.Infof("cache handler listens on: %v", handler.ExternalURL()) + execArgs.cacheHandler = handler + + if len(execArgs.artifactServerAddr) == 0 { + ip := common.GetOutboundIP() + if ip == nil { + return fmt.Errorf("unable to determine outbound IP address") + } + execArgs.artifactServerAddr = ip.String() + } + + if len(execArgs.artifactServerPath) == 0 { + tempDir, err := os.MkdirTemp("", "gitea-act-") + if err != nil { + fmt.Println(err) + } + defer os.RemoveAll(tempDir) + + execArgs.artifactServerPath = tempDir + } + + // run the plan + config := &runner.Config{ + Workdir: execArgs.Workdir(), + BindWorkdir: false, + ReuseContainers: false, + ForcePull: execArgs.forcePull, + ForceRebuild: execArgs.forceRebuild, + LogOutput: true, + JSONLogger: execArgs.jsonLogger, + Env: execArgs.LoadEnvs(), + Secrets: execArgs.LoadSecrets(), + InsecureSecrets: execArgs.insecureSecrets, + Privileged: execArgs.privileged, + UsernsMode: execArgs.usernsMode, + ContainerArchitecture: execArgs.containerArchitecture, + ContainerDaemonSocket: execArgs.containerDaemonSocket, + UseGitIgnore: execArgs.useGitIgnore, + GitHubInstance: execArgs.githubInstance, + ContainerCapAdd: execArgs.containerCapAdd, + ContainerCapDrop: execArgs.containerCapDrop, + ContainerOptions: execArgs.containerOptions, + AutoRemove: true, + ArtifactServerPath: execArgs.artifactServerPath, + ArtifactServerPort: execArgs.artifactServerPort, + ArtifactServerAddr: execArgs.artifactServerAddr, + NoSkipCheckout: execArgs.noSkipCheckout, + // PresetGitHubContext: preset, + // EventJSON: string(eventJSON), + ContainerNamePrefix: fmt.Sprintf("FORGEJO-ACTIONS-TASK-%s", eventName), + ContainerMaxLifetime: maxLifetime, + ContainerNetworkMode: container.NetworkMode(execArgs.network), + ContainerNetworkEnableIPv6: execArgs.enableIPv6, + DefaultActionInstance: execArgs.defaultActionsURL, + PlatformPicker: func(_ []string) string { + return execArgs.image + }, + ValidVolumes: []string{"**"}, // All volumes are allowed for `exec` command + } + + config.Env["ACT_EXEC"] = "true" + + if t := config.Secrets["GITEA_TOKEN"]; t != "" { + config.Token = t + } else if t := config.Secrets["GITHUB_TOKEN"]; t != "" { + config.Token = t + } + + if !execArgs.debug { + logLevel := log.InfoLevel + config.JobLoggerLevel = &logLevel + } + + r, err := runner.New(config) + if err != nil { + return err + } + + artifactCancel := artifacts.Serve(ctx, execArgs.artifactServerPath, execArgs.artifactServerAddr, execArgs.artifactServerPort) + log.Debugf("artifacts server started at %s:%s", execArgs.artifactServerPath, execArgs.artifactServerPort) + + ctx = common.WithDryrun(ctx, execArgs.dryrun) + executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error { + artifactCancel() + return nil + }) + + return executor(ctx) + } +} + +func loadExecCmd(ctx context.Context) *cobra.Command { + execArg := executeArgs{} + + execCmd := &cobra.Command{ + Use: "exec", + Short: "Run workflow locally.", + Args: cobra.MaximumNArgs(20), + RunE: runExec(ctx, &execArg), + } + + execCmd.Flags().BoolVarP(&execArg.runList, "list", "l", false, "list workflows") + execCmd.Flags().StringVarP(&execArg.job, "job", "j", "", "run a specific job ID") + execCmd.Flags().StringVarP(&execArg.event, "event", "E", "", "run a event name") + execCmd.PersistentFlags().StringVarP(&execArg.workflowsPath, "workflows", "W", "./.forgejo/workflows/", "path to workflow file(s)") + execCmd.PersistentFlags().StringVarP(&execArg.workdir, "directory", "C", ".", "working directory") + execCmd.PersistentFlags().BoolVarP(&execArg.noWorkflowRecurse, "no-recurse", "", false, "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag") + execCmd.Flags().BoolVarP(&execArg.autodetectEvent, "detect-event", "", false, "Use first event type from workflow as event that triggered the workflow") + execCmd.Flags().BoolVarP(&execArg.forcePull, "pull", "p", false, "pull docker image(s) even if already present") + execCmd.Flags().BoolVarP(&execArg.forceRebuild, "rebuild", "", false, "rebuild local action docker image(s) even if already present") + execCmd.PersistentFlags().BoolVar(&execArg.jsonLogger, "json", false, "Output logs in json format") + execCmd.Flags().StringArrayVarP(&execArg.envs, "env", "", []string{}, "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)") + execCmd.PersistentFlags().StringVarP(&execArg.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers") + execCmd.Flags().StringArrayVarP(&execArg.secrets, "secret", "s", []string{}, "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)") + execCmd.PersistentFlags().BoolVarP(&execArg.insecureSecrets, "insecure-secrets", "", false, "NOT RECOMMENDED! Doesn't hide secrets while printing logs.") + execCmd.Flags().BoolVar(&execArg.privileged, "privileged", false, "use privileged mode") + execCmd.Flags().StringVar(&execArg.usernsMode, "userns", "", "user namespace to use") + execCmd.PersistentFlags().StringVarP(&execArg.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.") + execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers") + execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container") + execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)") + execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)") + execCmd.Flags().StringVarP(&execArg.containerOptions, "container-opts", "", "", "container options") + execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPath, "artifact-server-path", "", ".", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.") + execCmd.PersistentFlags().StringVarP(&execArg.artifactServerAddr, "artifact-server-addr", "", "", "Defines the address where the artifact server listens") + execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).") + execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsURL, "default-actions-url", "", "https://code.forgejo.org", "Defines the default base url of the action.") + execCmd.PersistentFlags().BoolVarP(&execArg.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout") + execCmd.PersistentFlags().BoolVarP(&execArg.debug, "debug", "d", false, "enable debug log") + execCmd.PersistentFlags().BoolVarP(&execArg.dryrun, "dryrun", "n", false, "dryrun mode") + execCmd.PersistentFlags().StringVarP(&execArg.image, "image", "i", "node:20-bullseye", "Docker image to use. Use \"-self-hosted\" to run directly on the host.") + execCmd.PersistentFlags().StringVarP(&execArg.network, "network", "", "", "Specify the network to which the container will connect") + execCmd.PersistentFlags().BoolVarP(&execArg.enableIPv6, "enable-ipv6", "6", false, "Create network with IPv6 enabled.") + execCmd.PersistentFlags().StringVarP(&execArg.githubInstance, "gitea-instance", "", "", "Gitea instance to use.") + + return execCmd +} diff --git a/internal/app/cmd/register.go b/internal/app/cmd/register.go new file mode 100644 index 0000000..803511a --- /dev/null +++ b/internal/app/cmd/register.go @@ -0,0 +1,355 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package cmd + +import ( + "bufio" + "context" + "fmt" + "os" + "os/signal" + goruntime "runtime" + "strings" + "time" + + pingv1 "code.gitea.io/actions-proto-go/ping/v1" + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "connectrpc.com/connect" + "github.com/mattn/go-isatty" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "gitea.com/gitea/act_runner/internal/pkg/client" + "gitea.com/gitea/act_runner/internal/pkg/config" + "gitea.com/gitea/act_runner/internal/pkg/labels" + "gitea.com/gitea/act_runner/internal/pkg/ver" +) + +// runRegister registers a runner to the server +func runRegister(ctx context.Context, regArgs *registerArgs, configFile *string) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + log.SetReportCaller(false) + isTerm := isatty.IsTerminal(os.Stdout.Fd()) + log.SetFormatter(&log.TextFormatter{ + DisableColors: !isTerm, + DisableTimestamp: true, + }) + log.SetLevel(log.DebugLevel) + + log.Infof("Registering runner, arch=%s, os=%s, version=%s.", + goruntime.GOARCH, goruntime.GOOS, ver.Version()) + + // runner always needs root permission + if os.Getuid() != 0 { + // TODO: use a better way to check root permission + log.Warnf("Runner in user-mode.") + } + + if regArgs.NoInteractive { + if err := registerNoInteractive(ctx, *configFile, regArgs); err != nil { + return err + } + } else { + go func() { + if err := registerInteractive(ctx, *configFile); err != nil { + log.Fatal(err) + return + } + os.Exit(0) + }() + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + <-c + } + + return nil + } +} + +// registerArgs represents the arguments for register command +type registerArgs struct { + NoInteractive bool + InstanceAddr string + Token string + RunnerName string + Labels string +} + +type registerStage int8 + +const ( + StageUnknown registerStage = -1 + StageOverwriteLocalConfig registerStage = iota + 1 + StageInputInstance + StageInputToken + StageInputRunnerName + StageInputLabels + StageWaitingForRegistration + StageExit +) + +var defaultLabels = []string{ + "docker:docker://node:20-bullseye", +} + +type registerInputs struct { + InstanceAddr string + Token string + RunnerName string + Labels []string +} + +func (r *registerInputs) validate() error { + if r.InstanceAddr == "" { + return fmt.Errorf("instance address is empty") + } + if r.Token == "" { + return fmt.Errorf("token is empty") + } + if len(r.Labels) > 0 { + return validateLabels(r.Labels) + } + return nil +} + +func validateLabels(ls []string) error { + for _, label := range ls { + if _, err := labels.Parse(label); err != nil { + return err + } + } + return nil +} + +func (r *registerInputs) assignToNext(stage registerStage, value string, cfg *config.Config) registerStage { + // must set instance address and token. + // if empty, keep current stage. + if stage == StageInputInstance || stage == StageInputToken { + if value == "" { + return stage + } + } + + // set hostname for runner name if empty + if stage == StageInputRunnerName && value == "" { + value, _ = os.Hostname() + } + + switch stage { + case StageOverwriteLocalConfig: + if value == "Y" || value == "y" { + return StageInputInstance + } + return StageExit + case StageInputInstance: + r.InstanceAddr = value + return StageInputToken + case StageInputToken: + r.Token = value + return StageInputRunnerName + case StageInputRunnerName: + r.RunnerName = value + // if there are some labels configured in config file, skip input labels stage + if len(cfg.Runner.Labels) > 0 { + ls := make([]string, 0, len(cfg.Runner.Labels)) + for _, l := range cfg.Runner.Labels { + _, err := labels.Parse(l) + if err != nil { + log.WithError(err).Warnf("ignored invalid label %q", l) + continue + } + ls = append(ls, l) + } + if len(ls) == 0 { + log.Warn("no valid labels configured in config file, runner may not be able to pick up jobs") + } + r.Labels = ls + return StageWaitingForRegistration + } + return StageInputLabels + case StageInputLabels: + r.Labels = defaultLabels + if value != "" { + r.Labels = strings.Split(value, ",") + } + + if validateLabels(r.Labels) != nil { + log.Infoln("Invalid labels, please input again, leave blank to use the default labels (for example, ubuntu-20.04:docker://node:20-bookworm,ubuntu-18.04:docker://node:20-bookworm)") + return StageInputLabels + } + return StageWaitingForRegistration + } + return StageUnknown +} + +func registerInteractive(ctx context.Context, configFile string) error { + var ( + reader = bufio.NewReader(os.Stdin) + stage = StageInputInstance + inputs = new(registerInputs) + ) + + cfg, err := config.LoadDefault(configFile) + if err != nil { + return fmt.Errorf("failed to load config: %v", err) + } + if f, err := os.Stat(cfg.Runner.File); err == nil && !f.IsDir() { + stage = StageOverwriteLocalConfig + } + + for { + printStageHelp(stage) + + cmdString, err := reader.ReadString('\n') + if err != nil { + return err + } + stage = inputs.assignToNext(stage, strings.TrimSpace(cmdString), cfg) + + if stage == StageWaitingForRegistration { + log.Infof("Registering runner, name=%s, instance=%s, labels=%v.", inputs.RunnerName, inputs.InstanceAddr, inputs.Labels) + if err := doRegister(ctx, cfg, inputs); err != nil { + return fmt.Errorf("Failed to register runner: %w", err) + } + log.Infof("Runner registered successfully.") + return nil + } + + if stage == StageExit { + return nil + } + + if stage <= StageUnknown { + log.Errorf("Invalid input, please re-run act command.") + return nil + } + } +} + +func printStageHelp(stage registerStage) { + switch stage { + case StageOverwriteLocalConfig: + log.Infoln("Runner is already registered, overwrite local config? [y/N]") + case StageInputInstance: + log.Infoln("Enter the Forgejo instance URL (for example, https://next.forgejo.org/):") + case StageInputToken: + log.Infoln("Enter the runner token:") + case StageInputRunnerName: + hostname, _ := os.Hostname() + log.Infof("Enter the runner name (if set empty, use hostname: %s):\n", hostname) + case StageInputLabels: + log.Infoln("Enter the runner labels, leave blank to use the default labels (comma-separated, for example, ubuntu-20.04:docker://node:20-bookworm,ubuntu-18.04:docker://node:20-bookworm):") + case StageWaitingForRegistration: + log.Infoln("Waiting for registration...") + } +} + +func registerNoInteractive(ctx context.Context, configFile string, regArgs *registerArgs) error { + cfg, err := config.LoadDefault(configFile) + if err != nil { + return err + } + inputs := ®isterInputs{ + InstanceAddr: regArgs.InstanceAddr, + Token: regArgs.Token, + RunnerName: regArgs.RunnerName, + Labels: defaultLabels, + } + regArgs.Labels = strings.TrimSpace(regArgs.Labels) + // command line flag. + if regArgs.Labels != "" { + inputs.Labels = strings.Split(regArgs.Labels, ",") + } + // specify labels in config file. + if len(cfg.Runner.Labels) > 0 { + if regArgs.Labels != "" { + log.Warn("Labels from command will be ignored, use labels defined in config file.") + } + inputs.Labels = cfg.Runner.Labels + } + + if inputs.RunnerName == "" { + inputs.RunnerName, _ = os.Hostname() + log.Infof("Runner name is empty, use hostname '%s'.", inputs.RunnerName) + } + if err := inputs.validate(); err != nil { + log.WithError(err).Errorf("Invalid input, please re-run act command.") + return nil + } + if err := doRegister(ctx, cfg, inputs); err != nil { + return fmt.Errorf("Failed to register runner: %w", err) + } + log.Infof("Runner registered successfully.") + return nil +} + +func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs) error { + // initial http client + cli := client.New( + inputs.InstanceAddr, + cfg.Runner.Insecure, + "", + "", + ver.Version(), + ) + + for { + _, err := cli.Ping(ctx, connect.NewRequest(&pingv1.PingRequest{ + Data: inputs.RunnerName, + })) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if ctx.Err() != nil { + break + } + if err != nil { + log.WithError(err). + Errorln("Cannot ping the Forgejo instance server") + // TODO: if ping failed, retry or exit + time.Sleep(time.Second) + } else { + log.Debugln("Successfully pinged the Forgejo instance server") + break + } + } + + reg := &config.Registration{ + Name: inputs.RunnerName, + Token: inputs.Token, + Address: inputs.InstanceAddr, + Labels: inputs.Labels, + } + + ls := make([]string, len(reg.Labels)) + for i, v := range reg.Labels { + l, _ := labels.Parse(v) + ls[i] = l.Name + } + // register new runner. + resp, err := cli.Register(ctx, connect.NewRequest(&runnerv1.RegisterRequest{ + Name: reg.Name, + Token: reg.Token, + Version: ver.Version(), + AgentLabels: ls, // Could be removed after Gitea 1.20 + Labels: ls, + })) + if err != nil { + log.WithError(err).Error("poller: cannot register new runner") + return err + } + + reg.ID = resp.Msg.Runner.Id + reg.UUID = resp.Msg.Runner.Uuid + reg.Name = resp.Msg.Runner.Name + reg.Token = resp.Msg.Runner.Token + + if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil { + return fmt.Errorf("failed to save runner config: %w", err) + } + return nil +} diff --git a/internal/app/poll/poller.go b/internal/app/poll/poller.go new file mode 100644 index 0000000..cc89fa5 --- /dev/null +++ b/internal/app/poll/poller.go @@ -0,0 +1,167 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package poll + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "connectrpc.com/connect" + log "github.com/sirupsen/logrus" + "golang.org/x/time/rate" + + "gitea.com/gitea/act_runner/internal/app/run" + "gitea.com/gitea/act_runner/internal/pkg/client" + "gitea.com/gitea/act_runner/internal/pkg/config" +) + +const PollerID = "PollerID" + +type Poller interface { + Poll() + Shutdown(ctx context.Context) error +} + +type poller struct { + client client.Client + runner run.RunnerInterface + cfg *config.Config + tasksVersion atomic.Int64 // tasksVersion used to store the version of the last task fetched from the Gitea. + + pollingCtx context.Context + shutdownPolling context.CancelFunc + + jobsCtx context.Context + shutdownJobs context.CancelFunc + + done chan any +} + +func New(cfg *config.Config, client client.Client, runner run.RunnerInterface) Poller { + return (&poller{}).init(cfg, client, runner) +} + +func (p *poller) init(cfg *config.Config, client client.Client, runner run.RunnerInterface) Poller { + pollingCtx, shutdownPolling := context.WithCancel(context.Background()) + + jobsCtx, shutdownJobs := context.WithCancel(context.Background()) + + done := make(chan any) + + p.client = client + p.runner = runner + p.cfg = cfg + + p.pollingCtx = pollingCtx + p.shutdownPolling = shutdownPolling + + p.jobsCtx = jobsCtx + p.shutdownJobs = shutdownJobs + p.done = done + + return p +} + +func (p *poller) Poll() { + limiter := rate.NewLimiter(rate.Every(p.cfg.Runner.FetchInterval), 1) + wg := &sync.WaitGroup{} + for i := 0; i < p.cfg.Runner.Capacity; i++ { + wg.Add(1) + go p.poll(i, wg, limiter) + } + wg.Wait() + + // signal the poller is finished + close(p.done) +} + +func (p *poller) Shutdown(ctx context.Context) error { + p.shutdownPolling() + + select { + case <-p.done: + log.Trace("all jobs are complete") + return nil + + case <-ctx.Done(): + log.Trace("forcing the jobs to shutdown") + p.shutdownJobs() + <-p.done + log.Trace("all jobs have been shutdown") + return ctx.Err() + } +} + +func (p *poller) poll(id int, wg *sync.WaitGroup, limiter *rate.Limiter) { + log.Infof("[poller %d] launched", id) + defer wg.Done() + for { + if err := limiter.Wait(p.pollingCtx); err != nil { + log.Infof("[poller %d] shutdown", id) + return + } + task, ok := p.fetchTask(p.pollingCtx) + if !ok { + continue + } + p.runTaskWithRecover(p.jobsCtx, task) + } +} + +func (p *poller) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) { + defer func() { + if r := recover(); r != nil { + err := fmt.Errorf("panic: %v", r) + log.WithError(err).Error("panic in runTaskWithRecover") + } + }() + + if err := p.runner.Run(ctx, task); err != nil { + log.WithError(err).Error("failed to run task") + } +} + +func (p *poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) { + reqCtx, cancel := context.WithTimeout(ctx, p.cfg.Runner.FetchTimeout) + defer cancel() + + // Load the version value that was in the cache when the request was sent. + v := p.tasksVersion.Load() + resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{ + TasksVersion: v, + })) + if errors.Is(err, context.DeadlineExceeded) { + log.Trace("deadline exceeded") + err = nil + } + if err != nil { + if errors.Is(err, context.Canceled) { + log.WithError(err).Debugf("shutdown, fetch task canceled") + } else { + log.WithError(err).Error("failed to fetch task") + } + return nil, false + } + + if resp == nil || resp.Msg == nil { + return nil, false + } + + if resp.Msg.TasksVersion > v { + p.tasksVersion.CompareAndSwap(v, resp.Msg.TasksVersion) + } + + if resp.Msg.Task == nil { + return nil, false + } + + // got a task, set `tasksVersion` to zero to focre query db in next request. + p.tasksVersion.CompareAndSwap(resp.Msg.TasksVersion, 0) + + return resp.Msg.Task, true +} diff --git a/internal/app/poll/poller_test.go b/internal/app/poll/poller_test.go new file mode 100644 index 0000000..04b1a84 --- /dev/null +++ b/internal/app/poll/poller_test.go @@ -0,0 +1,263 @@ +// Copyright The Forgejo Authors. +// SPDX-License-Identifier: MIT + +package poll + +import ( + "context" + "fmt" + "testing" + "time" + + "connectrpc.com/connect" + + "code.gitea.io/actions-proto-go/ping/v1/pingv1connect" + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "code.gitea.io/actions-proto-go/runner/v1/runnerv1connect" + "gitea.com/gitea/act_runner/internal/pkg/config" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +type mockPoller struct { + poller +} + +func (o *mockPoller) Poll() { + o.poller.Poll() +} + +type mockClient struct { + pingv1connect.PingServiceClient + runnerv1connect.RunnerServiceClient + + sleep time.Duration + cancel bool + err error + noTask bool +} + +func (o mockClient) Address() string { + return "" +} + +func (o mockClient) Insecure() bool { + return true +} + +func (o *mockClient) FetchTask(ctx context.Context, req *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error) { + if o.sleep > 0 { + select { + case <-ctx.Done(): + log.Trace("fetch task done") + return nil, context.DeadlineExceeded + case <-time.After(o.sleep): + log.Trace("slept") + return nil, fmt.Errorf("unexpected") + } + } + if o.cancel { + return nil, context.Canceled + } + if o.err != nil { + return nil, o.err + } + task := &runnerv1.Task{} + if o.noTask { + task = nil + o.noTask = false + } + + return connect.NewResponse(&runnerv1.FetchTaskResponse{ + Task: task, + TasksVersion: int64(1), + }), nil +} + +type mockRunner struct { + cfg *config.Runner + log chan string + panics bool + err error +} + +func (o *mockRunner) Run(ctx context.Context, task *runnerv1.Task) error { + o.log <- "runner starts" + if o.panics { + log.Trace("panics") + o.log <- "runner panics" + o.panics = false + panic("whatever") + } + if o.err != nil { + log.Trace("error") + o.log <- "runner error" + err := o.err + o.err = nil + return err + } + for { + select { + case <-ctx.Done(): + log.Trace("shutdown") + o.log <- "runner shutdown" + return nil + case <-time.After(o.cfg.Timeout): + log.Trace("after") + o.log <- "runner timeout" + return nil + } + } +} + +func setTrace(t *testing.T) { + t.Helper() + log.SetReportCaller(true) + log.SetLevel(log.TraceLevel) +} + +func TestPoller_New(t *testing.T) { + p := New(&config.Config{}, &mockClient{}, &mockRunner{}) + assert.NotNil(t, p) +} + +func TestPoller_Runner(t *testing.T) { + setTrace(t) + for _, testCase := range []struct { + name string + timeout time.Duration + noTask bool + panics bool + err error + expected string + contextTimeout time.Duration + }{ + { + name: "Simple", + timeout: 10 * time.Second, + expected: "runner shutdown", + }, + { + name: "Panics", + timeout: 10 * time.Second, + panics: true, + expected: "runner panics", + }, + { + name: "Error", + timeout: 10 * time.Second, + err: fmt.Errorf("ERROR"), + expected: "runner error", + }, + { + name: "PollTaskError", + timeout: 10 * time.Second, + noTask: true, + expected: "runner shutdown", + }, + { + name: "ShutdownTimeout", + timeout: 1 * time.Second, + contextTimeout: 1 * time.Minute, + expected: "runner timeout", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + runnerLog := make(chan string, 3) + configRunner := config.Runner{ + FetchInterval: 1, + Capacity: 1, + Timeout: testCase.timeout, + } + p := &mockPoller{} + p.init( + &config.Config{ + Runner: configRunner, + }, + &mockClient{ + noTask: testCase.noTask, + }, + &mockRunner{ + cfg: &configRunner, + log: runnerLog, + panics: testCase.panics, + err: testCase.err, + }) + go p.Poll() + assert.Equal(t, "runner starts", <-runnerLog) + var ctx context.Context + var cancel context.CancelFunc + if testCase.contextTimeout > 0 { + ctx, cancel = context.WithTimeout(context.Background(), testCase.contextTimeout) + defer cancel() + } else { + ctx, cancel = context.WithCancel(context.Background()) + cancel() + } + p.Shutdown(ctx) + <-p.done + assert.Equal(t, testCase.expected, <-runnerLog) + }) + } +} + +func TestPoller_Fetch(t *testing.T) { + setTrace(t) + for _, testCase := range []struct { + name string + noTask bool + sleep time.Duration + err error + cancel bool + success bool + }{ + { + name: "Success", + success: true, + }, + { + name: "Timeout", + sleep: 100 * time.Millisecond, + }, + { + name: "Canceled", + cancel: true, + }, + { + name: "NoTask", + noTask: true, + }, + { + name: "Error", + err: fmt.Errorf("random error"), + }, + } { + t.Run(testCase.name, func(t *testing.T) { + configRunner := config.Runner{ + FetchTimeout: 1 * time.Millisecond, + } + p := &mockPoller{} + p.init( + &config.Config{ + Runner: configRunner, + }, + &mockClient{ + sleep: testCase.sleep, + cancel: testCase.cancel, + noTask: testCase.noTask, + err: testCase.err, + }, + &mockRunner{}, + ) + task, ok := p.fetchTask(context.Background()) + if testCase.success { + assert.True(t, ok) + assert.NotNil(t, task) + } else { + assert.False(t, ok) + assert.Nil(t, task) + } + }) + } +} diff --git a/internal/app/run/runner.go b/internal/app/run/runner.go new file mode 100644 index 0000000..e8654b6 --- /dev/null +++ b/internal/app/run/runner.go @@ -0,0 +1,260 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package run + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strings" + "sync" + "time" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "connectrpc.com/connect" + "github.com/docker/docker/api/types/container" + "github.com/nektos/act/pkg/artifactcache" + "github.com/nektos/act/pkg/common" + "github.com/nektos/act/pkg/model" + "github.com/nektos/act/pkg/runner" + log "github.com/sirupsen/logrus" + + "gitea.com/gitea/act_runner/internal/pkg/client" + "gitea.com/gitea/act_runner/internal/pkg/config" + "gitea.com/gitea/act_runner/internal/pkg/labels" + "gitea.com/gitea/act_runner/internal/pkg/report" + "gitea.com/gitea/act_runner/internal/pkg/ver" +) + +// Runner runs the pipeline. +type Runner struct { + name string + + cfg *config.Config + + client client.Client + labels labels.Labels + envs map[string]string + + runningTasks sync.Map +} + +type RunnerInterface interface { + Run(ctx context.Context, task *runnerv1.Task) error +} + +func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) *Runner { + ls := labels.Labels{} + for _, v := range reg.Labels { + if l, err := labels.Parse(v); err == nil { + ls = append(ls, l) + } + } + + if cfg.Runner.Envs == nil { + cfg.Runner.Envs = make(map[string]string, 10) + } + + cfg.Runner.Envs["GITHUB_SERVER_URL"] = reg.Address + + envs := make(map[string]string, len(cfg.Runner.Envs)) + for k, v := range cfg.Runner.Envs { + envs[k] = v + } + if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled { + if cfg.Cache.ExternalServer != "" { + envs["ACTIONS_CACHE_URL"] = cfg.Cache.ExternalServer + } else { + cacheHandler, err := artifactcache.StartHandler( + cfg.Cache.Dir, + cfg.Cache.Host, + cfg.Cache.Port, + log.StandardLogger().WithField("module", "cache_request"), + ) + if err != nil { + log.Errorf("cannot init cache server, it will be disabled: %v", err) + // go on + } else { + envs["ACTIONS_CACHE_URL"] = cacheHandler.ExternalURL() + "/" + } + } + } + + // set artifact gitea api + artifactGiteaAPI := strings.TrimSuffix(cli.Address(), "/") + "/api/actions_pipeline/" + envs["ACTIONS_RUNTIME_URL"] = artifactGiteaAPI + envs["ACTIONS_RESULTS_URL"] = strings.TrimSuffix(cli.Address(), "/") + + // Set specific environments to distinguish between Gitea and GitHub + envs["GITEA_ACTIONS"] = "true" + envs["GITEA_ACTIONS_RUNNER_VERSION"] = ver.Version() + + return &Runner{ + name: reg.Name, + cfg: cfg, + client: cli, + labels: ls, + envs: envs, + } +} + +func (r *Runner) Run(ctx context.Context, task *runnerv1.Task) error { + if _, ok := r.runningTasks.Load(task.Id); ok { + return fmt.Errorf("task %d is already running", task.Id) + } + r.runningTasks.Store(task.Id, struct{}{}) + defer r.runningTasks.Delete(task.Id) + + ctx, cancel := context.WithTimeout(ctx, r.cfg.Runner.Timeout) + defer cancel() + reporter := report.NewReporter(ctx, cancel, r.client, task, r.cfg.Runner.ReportInterval) + var runErr error + defer func() { + lastWords := "" + if runErr != nil { + lastWords = runErr.Error() + } + _ = reporter.Close(lastWords) + }() + reporter.RunDaemon() + runErr = r.run(ctx, task, reporter) + + return nil +} + +func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.Reporter) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic: %v", r) + } + }() + + reporter.Logf("%s(version:%s) received task %v of job %v, be triggered by event: %s", r.name, ver.Version(), task.Id, task.Context.Fields["job"].GetStringValue(), task.Context.Fields["event_name"].GetStringValue()) + + workflow, jobID, err := generateWorkflow(task) + if err != nil { + return err + } + + plan, err := model.CombineWorkflowPlanner(workflow).PlanJob(jobID) + if err != nil { + return err + } + job := workflow.GetJob(jobID) + reporter.ResetSteps(len(job.Steps)) + + taskContext := task.Context.Fields + + log.Infof("task %v repo is %v %v %v", task.Id, taskContext["repository"].GetStringValue(), + taskContext["gitea_default_actions_url"].GetStringValue(), + r.client.Address()) + + preset := &model.GithubContext{ + Event: taskContext["event"].GetStructValue().AsMap(), + RunID: taskContext["run_id"].GetStringValue(), + RunNumber: taskContext["run_number"].GetStringValue(), + Actor: taskContext["actor"].GetStringValue(), + Repository: taskContext["repository"].GetStringValue(), + EventName: taskContext["event_name"].GetStringValue(), + Sha: taskContext["sha"].GetStringValue(), + Ref: taskContext["ref"].GetStringValue(), + RefName: taskContext["ref_name"].GetStringValue(), + RefType: taskContext["ref_type"].GetStringValue(), + HeadRef: taskContext["head_ref"].GetStringValue(), + BaseRef: taskContext["base_ref"].GetStringValue(), + Token: taskContext["token"].GetStringValue(), + RepositoryOwner: taskContext["repository_owner"].GetStringValue(), + RetentionDays: taskContext["retention_days"].GetStringValue(), + } + if t := task.Secrets["GITEA_TOKEN"]; t != "" { + preset.Token = t + } else if t := task.Secrets["GITHUB_TOKEN"]; t != "" { + preset.Token = t + } + + giteaRuntimeToken := taskContext["gitea_runtime_token"].GetStringValue() + if giteaRuntimeToken == "" { + // use task token to action api token for previous Gitea Server Versions + giteaRuntimeToken = preset.Token + } + r.envs["ACTIONS_RUNTIME_TOKEN"] = giteaRuntimeToken + + eventJSON, err := json.Marshal(preset.Event) + if err != nil { + return err + } + + maxLifetime := 3 * time.Hour + if deadline, ok := ctx.Deadline(); ok { + maxLifetime = time.Until(deadline) + } + + var inputs map[string]string + if preset.EventName == "workflow_dispatch" { + if inputsRaw, ok := preset.Event["inputs"]; ok { + inputs, _ = inputsRaw.(map[string]string) + } + } + + runnerConfig := &runner.Config{ + // On Linux, Workdir will be like "/<parent_directory>/<owner>/<repo>" + // On Windows, Workdir will be like "\<parent_directory>\<owner>\<repo>" + Workdir: filepath.FromSlash(filepath.Clean(fmt.Sprintf("/%s/%s", r.cfg.Container.WorkdirParent, preset.Repository))), + BindWorkdir: false, + ActionCacheDir: filepath.FromSlash(r.cfg.Host.WorkdirParent), + + ReuseContainers: false, + ForcePull: r.cfg.Container.ForcePull, + ForceRebuild: false, + LogOutput: true, + JSONLogger: false, + Env: r.envs, + Secrets: task.Secrets, + GitHubInstance: strings.TrimSuffix(r.client.Address(), "/"), + AutoRemove: true, + NoSkipCheckout: true, + PresetGitHubContext: preset, + EventJSON: string(eventJSON), + ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%d", task.Id), + ContainerMaxLifetime: maxLifetime, + ContainerNetworkMode: container.NetworkMode(r.cfg.Container.Network), + ContainerNetworkEnableIPv6: r.cfg.Container.EnableIPv6, + ContainerOptions: r.cfg.Container.Options, + ContainerDaemonSocket: r.cfg.Container.DockerHost, + Privileged: r.cfg.Container.Privileged, + DefaultActionInstance: taskContext["gitea_default_actions_url"].GetStringValue(), + PlatformPicker: r.labels.PickPlatform, + Vars: task.Vars, + ValidVolumes: r.cfg.Container.ValidVolumes, + InsecureSkipTLS: r.cfg.Runner.Insecure, + Inputs: inputs, + } + + rr, err := runner.New(runnerConfig) + if err != nil { + return err + } + executor := rr.NewPlanExecutor(plan) + + reporter.Logf("workflow prepared") + + // add logger recorders + ctx = common.WithLoggerHook(ctx, reporter) + + execErr := executor(ctx) + reporter.SetOutputs(job.Outputs) + return execErr +} + +func (r *Runner) Declare(ctx context.Context, labels []string) (*connect.Response[runnerv1.DeclareResponse], error) { + return r.client.Declare(ctx, connect.NewRequest(&runnerv1.DeclareRequest{ + Version: ver.Version(), + Labels: labels, + })) +} + +func (r *Runner) Update(ctx context.Context, labels labels.Labels) { + r.labels = labels +} diff --git a/internal/app/run/runner_test.go b/internal/app/run/runner_test.go new file mode 100644 index 0000000..0145c70 --- /dev/null +++ b/internal/app/run/runner_test.go @@ -0,0 +1,37 @@ +package run + +import ( + "context" + "testing" + + "gitea.com/gitea/act_runner/internal/pkg/labels" + "github.com/stretchr/testify/assert" +) + +func TestLabelUpdate(t *testing.T) { + ctx := context.Background() + ls := labels.Labels{} + + initialLabel, err := labels.Parse("testlabel:docker://alpine") + assert.NoError(t, err) + ls = append(ls, initialLabel) + + newLs := labels.Labels{} + + newLabel, err := labels.Parse("next label:host") + assert.NoError(t, err) + newLs = append(newLs, initialLabel) + newLs = append(newLs, newLabel) + + runner := Runner{ + labels: ls, + } + + assert.Contains(t, runner.labels, initialLabel) + assert.NotContains(t, runner.labels, newLabel) + + runner.Update(ctx, newLs) + + assert.Contains(t, runner.labels, initialLabel) + assert.Contains(t, runner.labels, newLabel) +} diff --git a/internal/app/run/workflow.go b/internal/app/run/workflow.go new file mode 100644 index 0000000..a6fbb71 --- /dev/null +++ b/internal/app/run/workflow.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package run + +import ( + "bytes" + "fmt" + "sort" + "strings" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "github.com/nektos/act/pkg/model" + "gopkg.in/yaml.v3" +) + +func generateWorkflow(task *runnerv1.Task) (*model.Workflow, string, error) { + workflow, err := model.ReadWorkflow(bytes.NewReader(task.WorkflowPayload)) + if err != nil { + return nil, "", err + } + + jobIDs := workflow.GetJobIDs() + if len(jobIDs) != 1 { + return nil, "", fmt.Errorf("multiple jobs found: %v", jobIDs) + } + jobID := jobIDs[0] + + needJobIDs := make([]string, 0, len(task.Needs)) + for id, need := range task.Needs { + needJobIDs = append(needJobIDs, id) + needJob := &model.Job{ + Outputs: need.Outputs, + Result: strings.ToLower(strings.TrimPrefix(need.Result.String(), "RESULT_")), + } + workflow.Jobs[id] = needJob + } + sort.Strings(needJobIDs) + + rawNeeds := yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0, len(needJobIDs)), + } + for _, id := range needJobIDs { + rawNeeds.Content = append(rawNeeds.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Value: id, + }) + } + + workflow.Jobs[jobID].RawNeeds = rawNeeds + + return workflow, jobID, nil +} diff --git a/internal/app/run/workflow_test.go b/internal/app/run/workflow_test.go new file mode 100644 index 0000000..c7598db --- /dev/null +++ b/internal/app/run/workflow_test.go @@ -0,0 +1,74 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package run + +import ( + "testing" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "github.com/nektos/act/pkg/model" + "github.com/stretchr/testify/require" + "gotest.tools/v3/assert" +) + +func Test_generateWorkflow(t *testing.T) { + type args struct { + task *runnerv1.Task + } + tests := []struct { + name string + args args + assert func(t *testing.T, wf *model.Workflow) + want1 string + wantErr bool + }{ + { + name: "has needs", + args: args{ + task: &runnerv1.Task{ + WorkflowPayload: []byte(` +name: Build and deploy +on: push + +jobs: + job9: + needs: build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: ./deploy --build ${{ needs.job1.outputs.output1 }} + - run: ./deploy --build ${{ needs.job2.outputs.output2 }} +`), + Needs: map[string]*runnerv1.TaskNeed{ + "job1": { + Outputs: map[string]string{ + "output1": "output1 value", + }, + Result: runnerv1.Result_RESULT_SUCCESS, + }, + "job2": { + Outputs: map[string]string{ + "output2": "output2 value", + }, + Result: runnerv1.Result_RESULT_SUCCESS, + }, + }, + }, + }, + assert: func(t *testing.T, wf *model.Workflow) { + assert.DeepEqual(t, wf.GetJob("job9").Needs(), []string{"job1", "job2"}) + }, + want1: "job9", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := generateWorkflow(tt.args.task) + require.NoError(t, err) + tt.assert(t, got) + assert.Equal(t, got1, tt.want1) + }) + } +} diff --git a/internal/pkg/client/client.go b/internal/pkg/client/client.go new file mode 100644 index 0000000..57f91ad --- /dev/null +++ b/internal/pkg/client/client.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package client + +import ( + "code.gitea.io/actions-proto-go/ping/v1/pingv1connect" + "code.gitea.io/actions-proto-go/runner/v1/runnerv1connect" +) + +// A Client manages communication with the runner. +// +//go:generate mockery --name Client +type Client interface { + pingv1connect.PingServiceClient + runnerv1connect.RunnerServiceClient + Address() string + Insecure() bool +} diff --git a/internal/pkg/client/header.go b/internal/pkg/client/header.go new file mode 100644 index 0000000..24844fa --- /dev/null +++ b/internal/pkg/client/header.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package client + +const ( + UUIDHeader = "x-runner-uuid" + TokenHeader = "x-runner-token" + // Deprecated: could be removed after Gitea 1.20 released + VersionHeader = "x-runner-version" +) diff --git a/internal/pkg/client/http.go b/internal/pkg/client/http.go new file mode 100644 index 0000000..d365a77 --- /dev/null +++ b/internal/pkg/client/http.go @@ -0,0 +1,82 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package client + +import ( + "context" + "crypto/tls" + "net/http" + "strings" + + "code.gitea.io/actions-proto-go/ping/v1/pingv1connect" + "code.gitea.io/actions-proto-go/runner/v1/runnerv1connect" + "connectrpc.com/connect" +) + +func getHTTPClient(endpoint string, insecure bool) *http.Client { + if strings.HasPrefix(endpoint, "https://") && insecure { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + } + return http.DefaultClient +} + +// New returns a new runner client. +func New(endpoint string, insecure bool, uuid, token, version string, opts ...connect.ClientOption) *HTTPClient { + baseURL := strings.TrimRight(endpoint, "/") + "/api/actions" + + opts = append(opts, connect.WithInterceptors(connect.UnaryInterceptorFunc(func(next connect.UnaryFunc) connect.UnaryFunc { + return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) { + if uuid != "" { + req.Header().Set(UUIDHeader, uuid) + } + if token != "" { + req.Header().Set(TokenHeader, token) + } + // TODO: version will be removed from request header after Gitea 1.20 released. + if version != "" { + req.Header().Set(VersionHeader, version) + } + return next(ctx, req) + } + }))) + + return &HTTPClient{ + PingServiceClient: pingv1connect.NewPingServiceClient( + getHTTPClient(endpoint, insecure), + baseURL, + opts..., + ), + RunnerServiceClient: runnerv1connect.NewRunnerServiceClient( + getHTTPClient(endpoint, insecure), + baseURL, + opts..., + ), + endpoint: endpoint, + insecure: insecure, + } +} + +func (c *HTTPClient) Address() string { + return c.endpoint +} + +func (c *HTTPClient) Insecure() bool { + return c.insecure +} + +var _ Client = (*HTTPClient)(nil) + +// An HTTPClient manages communication with the runner API. +type HTTPClient struct { + pingv1connect.PingServiceClient + runnerv1connect.RunnerServiceClient + endpoint string + insecure bool +} diff --git a/internal/pkg/client/mocks/Client.go b/internal/pkg/client/mocks/Client.go new file mode 100644 index 0000000..a8bfdb1 --- /dev/null +++ b/internal/pkg/client/mocks/Client.go @@ -0,0 +1,219 @@ +// Code generated by mockery v2.26.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + connect "connectrpc.com/connect" + + mock "github.com/stretchr/testify/mock" + + pingv1 "code.gitea.io/actions-proto-go/ping/v1" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +// Address provides a mock function with given fields: +func (_m *Client) Address() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Declare provides a mock function with given fields: _a0, _a1 +func (_m *Client) Declare(_a0 context.Context, _a1 *connect.Request[runnerv1.DeclareRequest]) (*connect.Response[runnerv1.DeclareResponse], error) { + ret := _m.Called(_a0, _a1) + + var r0 *connect.Response[runnerv1.DeclareResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.DeclareRequest]) (*connect.Response[runnerv1.DeclareResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.DeclareRequest]) *connect.Response[runnerv1.DeclareResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[runnerv1.DeclareResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.DeclareRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FetchTask provides a mock function with given fields: _a0, _a1 +func (_m *Client) FetchTask(_a0 context.Context, _a1 *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error) { + ret := _m.Called(_a0, _a1) + + var r0 *connect.Response[runnerv1.FetchTaskResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) *connect.Response[runnerv1.FetchTaskResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[runnerv1.FetchTaskResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Insecure provides a mock function with given fields: +func (_m *Client) Insecure() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Ping provides a mock function with given fields: _a0, _a1 +func (_m *Client) Ping(_a0 context.Context, _a1 *connect.Request[pingv1.PingRequest]) (*connect.Response[pingv1.PingResponse], error) { + ret := _m.Called(_a0, _a1) + + var r0 *connect.Response[pingv1.PingResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[pingv1.PingRequest]) (*connect.Response[pingv1.PingResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[pingv1.PingRequest]) *connect.Response[pingv1.PingResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[pingv1.PingResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[pingv1.PingRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Register provides a mock function with given fields: _a0, _a1 +func (_m *Client) Register(_a0 context.Context, _a1 *connect.Request[runnerv1.RegisterRequest]) (*connect.Response[runnerv1.RegisterResponse], error) { + ret := _m.Called(_a0, _a1) + + var r0 *connect.Response[runnerv1.RegisterResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) (*connect.Response[runnerv1.RegisterResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) *connect.Response[runnerv1.RegisterResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[runnerv1.RegisterResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateLog provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateLog(_a0 context.Context, _a1 *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error) { + ret := _m.Called(_a0, _a1) + + var r0 *connect.Response[runnerv1.UpdateLogResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) *connect.Response[runnerv1.UpdateLogResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[runnerv1.UpdateLogResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateTask provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateTask(_a0 context.Context, _a1 *connect.Request[runnerv1.UpdateTaskRequest]) (*connect.Response[runnerv1.UpdateTaskResponse], error) { + ret := _m.Called(_a0, _a1) + + var r0 *connect.Response[runnerv1.UpdateTaskResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) (*connect.Response[runnerv1.UpdateTaskResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) *connect.Response[runnerv1.UpdateTaskResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[runnerv1.UpdateTaskResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t mockConstructorTestingTNewClient) *Client { + mock := &Client{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/pkg/config/config.example.yaml b/internal/pkg/config/config.example.yaml new file mode 100644 index 0000000..32dfb68 --- /dev/null +++ b/internal/pkg/config/config.example.yaml @@ -0,0 +1,100 @@ +# Example configuration file, it's safe to copy this as the default config file without any modification. + +# You don't have to copy this file to your instance, +# just run `./act_runner generate-config > config.yaml` to generate a config file. + +log: + # The level of logging, can be trace, debug, info, warn, error, fatal + level: info + +runner: + # Where to store the registration result. + file: .runner + # Execute how many tasks concurrently at the same time. + capacity: 1 + # Extra environment variables to run jobs. + envs: + A_TEST_ENV_NAME_1: a_test_env_value_1 + A_TEST_ENV_NAME_2: a_test_env_value_2 + # Extra environment variables to run jobs from a file. + # It will be ignored if it's empty or the file doesn't exist. + env_file: .env + # The timeout for a job to be finished. + # Please note that the Forgejo instance also has a timeout (3h by default) for the job. + # So the job could be stopped by the Forgejo instance if it's timeout is shorter than this. + timeout: 3h + # The timeout for the runner to wait for running jobs to finish when + # shutting down because a TERM or INT signal has been received. Any + # running jobs that haven't finished after this timeout will be + # cancelled. + # If unset or zero the jobs will be cancelled immediately. + shutdown_timeout: 3h + # Whether skip verifying the TLS certificate of the instance. + insecure: false + # The timeout for fetching the job from the Forgejo instance. + fetch_timeout: 5s + # The interval for fetching the job from the Forgejo instance. + fetch_interval: 2s + # The interval for reporting the job status and logs to the Forgejo instance. + report_interval: 1s + # The labels of a runner are used to determine which jobs the runner can run, and how to run them. + # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:20-bookworm", "ubuntu-22.04:docker://node:20-bookworm"] + # If it's empty when registering, it will ask for inputting labels. + # If it's empty when execute `deamon`, will use labels in `.runner` file. + labels: [] + +cache: + # Enable cache server to use actions/cache. + enabled: true + # The directory to store the cache data. + # If it's empty, the cache data will be stored in $HOME/.cache/actcache. + dir: "" + # The host of the cache server. + # It's not for the address to listen, but the address to connect from job containers. + # So 0.0.0.0 is a bad choice, leave it empty to detect automatically. + host: "" + # The port of the cache server. + # 0 means to use a random available port. + port: 0 + # The external cache server URL. Valid only when enable is true. + # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself. + # The URL should generally end with "/". + external_server: "" + +container: + # Specifies the network to which the container will connect. + # Could be host, bridge or the name of a custom network. + # If it's empty, create a network automatically. + network: "" + # Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly. + # Only takes effect if "network" is set to "". + enable_ipv6: false + # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker). + privileged: false + # And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway). + options: + # The parent directory of a job's working directory. + # If it's empty, /workspace will be used. + workdir_parent: + # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob + # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted. + # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to: + # valid_volumes: + # - data + # - /src/*.json + # If you want to allow any volume, please use the following configuration: + # valid_volumes: + # - '**' + valid_volumes: [] + # overrides the docker client host with the specified one. + # If it's empty, act_runner will find an available docker host automatically. + # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers. + # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work. + docker_host: "" + # Pull docker image(s) even if already present + force_pull: false + +host: + # The parent directory of a job's working directory. + # If it's empty, $HOME/.cache/act/ will be used. + workdir_parent: diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go new file mode 100644 index 0000000..a1536b3 --- /dev/null +++ b/internal/pkg/config/config.go @@ -0,0 +1,166 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package config + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/joho/godotenv" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" +) + +// Log represents the configuration for logging. +type Log struct { + Level string `yaml:"level"` // Level indicates the logging level. +} + +// Runner represents the configuration for the runner. +type Runner struct { + File string `yaml:"file"` // File specifies the file path for the runner. + Capacity int `yaml:"capacity"` // Capacity specifies the capacity of the runner. + Envs map[string]string `yaml:"envs"` // Envs stores environment variables for the runner. + EnvFile string `yaml:"env_file"` // EnvFile specifies the path to the file containing environment variables for the runner. + Timeout time.Duration `yaml:"timeout"` // Timeout specifies the duration for runner timeout. + ShutdownTimeout time.Duration `yaml:"shutdown_timeout"` // ShutdownTimeout specifies the duration to wait for running jobs to complete during a shutdown of the runner. + Insecure bool `yaml:"insecure"` // Insecure indicates whether the runner operates in an insecure mode. + FetchTimeout time.Duration `yaml:"fetch_timeout"` // FetchTimeout specifies the timeout duration for fetching resources. + FetchInterval time.Duration `yaml:"fetch_interval"` // FetchInterval specifies the interval duration for fetching resources. + ReportInterval time.Duration `yaml:"report_interval"` // ReportInterval specifies the interval duration for reporting status and logs of a running job. + Labels []string `yaml:"labels"` // Labels specify the labels of the runner. Labels are declared on each startup +} + +// Cache represents the configuration for caching. +type Cache struct { + Enabled *bool `yaml:"enabled"` // Enabled indicates whether caching is enabled. It is a pointer to distinguish between false and not set. If not set, it will be true. + Dir string `yaml:"dir"` // Dir specifies the directory path for caching. + Host string `yaml:"host"` // Host specifies the caching host. + Port uint16 `yaml:"port"` // Port specifies the caching port. + ExternalServer string `yaml:"external_server"` // ExternalServer specifies the URL of external cache server +} + +// Container represents the configuration for the container. +type Container struct { + Network string `yaml:"network"` // Network specifies the network for the container. + NetworkMode string `yaml:"network_mode"` // Deprecated: use Network instead. Could be removed after Gitea 1.20 + EnableIPv6 bool `yaml:"enable_ipv6"` // EnableIPv6 indicates whether the network is created with IPv6 enabled. + Privileged bool `yaml:"privileged"` // Privileged indicates whether the container runs in privileged mode. + Options string `yaml:"options"` // Options specifies additional options for the container. + WorkdirParent string `yaml:"workdir_parent"` // WorkdirParent specifies the parent directory for the container's working directory. + ValidVolumes []string `yaml:"valid_volumes"` // ValidVolumes specifies the volumes (including bind mounts) can be mounted to containers. + DockerHost string `yaml:"docker_host"` // DockerHost specifies the Docker host. It overrides the value specified in environment variable DOCKER_HOST. + ForcePull bool `yaml:"force_pull"` // Pull docker image(s) even if already present +} + +// Host represents the configuration for the host. +type Host struct { + WorkdirParent string `yaml:"workdir_parent"` // WorkdirParent specifies the parent directory for the host's working directory. +} + +// Config represents the overall configuration. +type Config struct { + Log Log `yaml:"log"` // Log represents the configuration for logging. + Runner Runner `yaml:"runner"` // Runner represents the configuration for the runner. + Cache Cache `yaml:"cache"` // Cache represents the configuration for caching. + Container Container `yaml:"container"` // Container represents the configuration for the container. + Host Host `yaml:"host"` // Host represents the configuration for the host. +} + +// Tune the config settings accordingly to the Forgejo instance that will be used. +func (c *Config) Tune(instanceURL string) { + if instanceURL == "https://codeberg.org" { + if c.Runner.FetchInterval < 30*time.Second { + log.Info("The runner is configured to be used by a public instance, fetch interval is set to 30 seconds.") + c.Runner.FetchInterval = 30 * time.Second + } + } +} + +// LoadDefault returns the default configuration. +// If file is not empty, it will be used to load the configuration. +func LoadDefault(file string) (*Config, error) { + cfg := &Config{} + if file != "" { + content, err := os.ReadFile(file) + if err != nil { + return nil, fmt.Errorf("open config file %q: %w", file, err) + } + if err := yaml.Unmarshal(content, cfg); err != nil { + return nil, fmt.Errorf("parse config file %q: %w", file, err) + } + } + compatibleWithOldEnvs(file != "", cfg) + + if cfg.Runner.EnvFile != "" { + if stat, err := os.Stat(cfg.Runner.EnvFile); err == nil && !stat.IsDir() { + envs, err := godotenv.Read(cfg.Runner.EnvFile) + if err != nil { + return nil, fmt.Errorf("read env file %q: %w", cfg.Runner.EnvFile, err) + } + if cfg.Runner.Envs == nil { + cfg.Runner.Envs = map[string]string{} + } + for k, v := range envs { + cfg.Runner.Envs[k] = v + } + } + } + + if cfg.Log.Level == "" { + cfg.Log.Level = "info" + } + if cfg.Runner.File == "" { + cfg.Runner.File = ".runner" + } + if cfg.Runner.Capacity <= 0 { + cfg.Runner.Capacity = 1 + } + if cfg.Runner.Timeout <= 0 { + cfg.Runner.Timeout = 3 * time.Hour + } + if cfg.Cache.Enabled == nil { + b := true + cfg.Cache.Enabled = &b + } + if *cfg.Cache.Enabled { + if cfg.Cache.Dir == "" { + home, _ := os.UserHomeDir() + cfg.Cache.Dir = filepath.Join(home, ".cache", "actcache") + } + } + if cfg.Container.WorkdirParent == "" { + cfg.Container.WorkdirParent = "workspace" + } + if cfg.Host.WorkdirParent == "" { + home, _ := os.UserHomeDir() + cfg.Host.WorkdirParent = filepath.Join(home, ".cache", "act") + } + if cfg.Runner.FetchTimeout <= 0 { + cfg.Runner.FetchTimeout = 5 * time.Second + } + if cfg.Runner.FetchInterval <= 0 { + cfg.Runner.FetchInterval = 2 * time.Second + } + if cfg.Runner.ReportInterval <= 0 { + cfg.Runner.ReportInterval = time.Second + } + + // although `container.network_mode` will be deprecated, but we have to be compatible with it for now. + if cfg.Container.NetworkMode != "" && cfg.Container.Network == "" { + log.Warn("You are trying to use deprecated configuration item of `container.network_mode`, please use `container.network` instead.") + if cfg.Container.NetworkMode == "bridge" { + // Previously, if the value of `container.network_mode` is `bridge`, we will create a new network for job. + // But “bridge” is easily confused with the bridge network created by Docker by default. + // So we set the value of `container.network` to empty string to make `act_runner` automatically create a new network for job. + cfg.Container.Network = "" + } else { + cfg.Container.Network = cfg.Container.NetworkMode + } + } + + return cfg, nil +} diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go new file mode 100644 index 0000000..d2ddf2f --- /dev/null +++ b/internal/pkg/config/config_test.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Forgejo Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestConfigTune(t *testing.T) { + c := &Config{ + Runner: Runner{}, + } + + t.Run("Public instance tuning", func(t *testing.T) { + c.Runner.FetchInterval = 60 * time.Second + c.Tune("https://codeberg.org") + assert.EqualValues(t, 60*time.Second, c.Runner.FetchInterval) + + c.Runner.FetchInterval = 2 * time.Second + c.Tune("https://codeberg.org") + assert.EqualValues(t, 30*time.Second, c.Runner.FetchInterval) + }) + + t.Run("Non-public instance tuning", func(t *testing.T) { + c.Runner.FetchInterval = 60 * time.Second + c.Tune("https://example.com") + assert.EqualValues(t, 60*time.Second, c.Runner.FetchInterval) + + c.Runner.FetchInterval = 2 * time.Second + c.Tune("https://codeberg.com") + assert.EqualValues(t, 2*time.Second, c.Runner.FetchInterval) + }) +} diff --git a/internal/pkg/config/deprecated.go b/internal/pkg/config/deprecated.go new file mode 100644 index 0000000..b5051aa --- /dev/null +++ b/internal/pkg/config/deprecated.go @@ -0,0 +1,62 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package config + +import ( + "os" + "strconv" + "strings" + + log "github.com/sirupsen/logrus" +) + +// Deprecated: could be removed in the future. TODO: remove it when Gitea 1.20.0 is released. +// Be compatible with old envs. +func compatibleWithOldEnvs(fileUsed bool, cfg *Config) { + handleEnv := func(key string) (string, bool) { + if v, ok := os.LookupEnv(key); ok { + if fileUsed { + log.Warnf("env %s has been ignored because config file is used", key) + return "", false + } + log.Warnf("env %s will be deprecated, please use config file instead", key) + return v, true + } + return "", false + } + + if v, ok := handleEnv("GITEA_DEBUG"); ok { + if b, _ := strconv.ParseBool(v); b { + cfg.Log.Level = "debug" + } + } + if v, ok := handleEnv("GITEA_TRACE"); ok { + if b, _ := strconv.ParseBool(v); b { + cfg.Log.Level = "trace" + } + } + if v, ok := handleEnv("GITEA_RUNNER_CAPACITY"); ok { + if i, _ := strconv.Atoi(v); i > 0 { + cfg.Runner.Capacity = i + } + } + if v, ok := handleEnv("GITEA_RUNNER_FILE"); ok { + cfg.Runner.File = v + } + if v, ok := handleEnv("GITEA_RUNNER_ENVIRON"); ok { + splits := strings.Split(v, ",") + if cfg.Runner.Envs == nil { + cfg.Runner.Envs = map[string]string{} + } + for _, split := range splits { + kv := strings.SplitN(split, ":", 2) + if len(kv) == 2 && kv[0] != "" { + cfg.Runner.Envs[kv[0]] = kv[1] + } + } + } + if v, ok := handleEnv("GITEA_RUNNER_ENV_FILE"); ok { + cfg.Runner.EnvFile = v + } +} diff --git a/internal/pkg/config/embed.go b/internal/pkg/config/embed.go new file mode 100644 index 0000000..cf445cf --- /dev/null +++ b/internal/pkg/config/embed.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package config + +import _ "embed" + +//go:embed config.example.yaml +var Example []byte diff --git a/internal/pkg/config/registration.go b/internal/pkg/config/registration.go new file mode 100644 index 0000000..be66b4f --- /dev/null +++ b/internal/pkg/config/registration.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package config + +import ( + "encoding/json" + "os" +) + +const registrationWarning = "This file is automatically generated by act-runner. Do not edit it manually unless you know what you are doing. Removing this file will cause act runner to re-register as a new runner." + +// Registration is the registration information for a runner +type Registration struct { + Warning string `json:"WARNING"` // Warning message to display, it's always the registrationWarning constant + + ID int64 `json:"id"` + UUID string `json:"uuid"` + Name string `json:"name"` + Token string `json:"token"` + Address string `json:"address"` + Labels []string `json:"labels"` +} + +func LoadRegistration(file string) (*Registration, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var reg Registration + if err := json.NewDecoder(f).Decode(®); err != nil { + return nil, err + } + + reg.Warning = "" + + return ®, nil +} + +func SaveRegistration(file string, reg *Registration) error { + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + + reg.Warning = registrationWarning + + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + return enc.Encode(reg) +} diff --git a/internal/pkg/envcheck/doc.go b/internal/pkg/envcheck/doc.go new file mode 100644 index 0000000..8641a77 --- /dev/null +++ b/internal/pkg/envcheck/doc.go @@ -0,0 +1,5 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +// Package envcheck provides a simple way to check if the environment is ready to run jobs. +package envcheck diff --git a/internal/pkg/envcheck/docker.go b/internal/pkg/envcheck/docker.go new file mode 100644 index 0000000..f115bc7 --- /dev/null +++ b/internal/pkg/envcheck/docker.go @@ -0,0 +1,34 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package envcheck + +import ( + "context" + "fmt" + + "github.com/docker/docker/client" +) + +func CheckIfDockerRunning(ctx context.Context, configDockerHost string) error { + opts := []client.Opt{ + client.FromEnv, + } + + if configDockerHost != "" { + opts = append(opts, client.WithHost(configDockerHost)) + } + + cli, err := client.NewClientWithOpts(opts...) + if err != nil { + return err + } + defer cli.Close() + + _, err = cli.Ping(ctx) + if err != nil { + return fmt.Errorf("cannot ping the docker daemon. is it running? %w", err) + } + + return nil +} diff --git a/internal/pkg/labels/labels.go b/internal/pkg/labels/labels.go new file mode 100644 index 0000000..f448fdf --- /dev/null +++ b/internal/pkg/labels/labels.go @@ -0,0 +1,109 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package labels + +import ( + "fmt" + "strings" +) + +const ( + SchemeHost = "host" + SchemeDocker = "docker" + SchemeLXC = "lxc" +) + +type Label struct { + Name string + Schema string + Arg string +} + +func Parse(str string) (*Label, error) { + splits := strings.SplitN(str, ":", 3) + label := &Label{ + Name: splits[0], + Schema: "host", + Arg: "", + } + if len(splits) >= 2 { + label.Schema = splits[1] + } + if len(splits) >= 3 { + label.Arg = splits[2] + } + if label.Schema != SchemeHost && label.Schema != SchemeDocker && label.Schema != SchemeLXC { + return nil, fmt.Errorf("unsupported schema: %s", label.Schema) + } + return label, nil +} + +type Labels []*Label + +func (l Labels) RequireDocker() bool { + for _, label := range l { + if label.Schema == SchemeDocker { + return true + } + } + return false +} + +func (l Labels) PickPlatform(runsOn []string) string { + platforms := make(map[string]string, len(l)) + for _, label := range l { + switch label.Schema { + case SchemeDocker: + // "//" will be ignored + platforms[label.Name] = strings.TrimPrefix(label.Arg, "//") + case SchemeHost: + platforms[label.Name] = "-self-hosted" + case SchemeLXC: + platforms[label.Name] = "lxc:" + strings.TrimPrefix(label.Arg, "//") + default: + // It should not happen, because Parse has checked it. + continue + } + } + for _, v := range runsOn { + if v, ok := platforms[v]; ok { + return v + } + } + + // TODO: support multiple labels + // like: + // ["ubuntu-22.04"] => "ubuntu:22.04" + // ["with-gpu"] => "linux:with-gpu" + // ["ubuntu-22.04", "with-gpu"] => "ubuntu:22.04_with-gpu" + + // return default. + // So the runner receives a task with a label that the runner doesn't have, + // it happens when the user have edited the label of the runner in the web UI. + // TODO: it may be not correct, what if the runner is used as host mode only? + return "node:20-bullseye" +} + +func (l Labels) Names() []string { + names := make([]string, 0, len(l)) + for _, label := range l { + names = append(names, label.Name) + } + return names +} + +func (l Labels) ToStrings() []string { + ls := make([]string, 0, len(l)) + for _, label := range l { + lbl := label.Name + if label.Schema != "" { + lbl += ":" + label.Schema + if label.Arg != "" { + lbl += ":" + label.Arg + } + } + ls = append(ls, lbl) + } + return ls +} diff --git a/internal/pkg/labels/labels_test.go b/internal/pkg/labels/labels_test.go new file mode 100644 index 0000000..e46a27b --- /dev/null +++ b/internal/pkg/labels/labels_test.go @@ -0,0 +1,63 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package labels + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gotest.tools/v3/assert" +) + +func TestParse(t *testing.T) { + tests := []struct { + args string + want *Label + wantErr bool + }{ + { + args: "ubuntu:docker://node:18", + want: &Label{ + Name: "ubuntu", + Schema: "docker", + Arg: "//node:18", + }, + wantErr: false, + }, + { + args: "ubuntu:host", + want: &Label{ + Name: "ubuntu", + Schema: "host", + Arg: "", + }, + wantErr: false, + }, + { + args: "ubuntu", + want: &Label{ + Name: "ubuntu", + Schema: "host", + Arg: "", + }, + wantErr: false, + }, + { + args: "ubuntu:vm:ubuntu-18.04", + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.args, func(t *testing.T) { + got, err := Parse(tt.args) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.DeepEqual(t, got, tt.want) + }) + } +} diff --git a/internal/pkg/report/reporter.go b/internal/pkg/report/reporter.go new file mode 100644 index 0000000..cee5062 --- /dev/null +++ b/internal/pkg/report/reporter.go @@ -0,0 +1,437 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package report + +import ( + "context" + "fmt" + "regexp" + "strings" + "sync" + "time" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + "connectrpc.com/connect" + retry "github.com/avast/retry-go/v4" + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + + "gitea.com/gitea/act_runner/internal/pkg/client" +) + +type Reporter struct { + ctx context.Context + cancel context.CancelFunc + + closed bool + client client.Client + clientM sync.Mutex + + logOffset int + logRows []*runnerv1.LogRow + logReplacer *strings.Replacer + oldnew []string + reportInterval time.Duration + + state *runnerv1.TaskState + stateMu sync.RWMutex + outputs sync.Map + + debugOutputEnabled bool + stopCommandEndToken string +} + +func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.Client, task *runnerv1.Task, reportInterval time.Duration) *Reporter { + var oldnew []string + if v := task.Context.Fields["token"].GetStringValue(); v != "" { + oldnew = append(oldnew, v, "***") + } + if v := task.Context.Fields["gitea_runtime_token"].GetStringValue(); v != "" { + oldnew = append(oldnew, v, "***") + } + for _, v := range task.Secrets { + oldnew = append(oldnew, v, "***") + } + + rv := &Reporter{ + ctx: ctx, + cancel: cancel, + client: client, + oldnew: oldnew, + reportInterval: reportInterval, + logReplacer: strings.NewReplacer(oldnew...), + state: &runnerv1.TaskState{ + Id: task.Id, + }, + } + + if task.Secrets["ACTIONS_STEP_DEBUG"] == "true" { + rv.debugOutputEnabled = true + } + + return rv +} + +func (r *Reporter) ResetSteps(l int) { + r.stateMu.Lock() + defer r.stateMu.Unlock() + for i := 0; i < l; i++ { + r.state.Steps = append(r.state.Steps, &runnerv1.StepState{ + Id: int64(i), + }) + } +} + +func (r *Reporter) Levels() []log.Level { + return log.AllLevels +} + +func appendIfNotNil[T any](s []*T, v *T) []*T { + if v != nil { + return append(s, v) + } + return s +} + +func (r *Reporter) Fire(entry *log.Entry) error { + r.stateMu.Lock() + defer r.stateMu.Unlock() + + log.WithFields(entry.Data).Trace(entry.Message) + + timestamp := entry.Time + if r.state.StartedAt == nil { + r.state.StartedAt = timestamppb.New(timestamp) + } + + stage := entry.Data["stage"] + + if stage != "Main" { + if v, ok := entry.Data["jobResult"]; ok { + if jobResult, ok := r.parseResult(v); ok { + r.state.Result = jobResult + r.state.StoppedAt = timestamppb.New(timestamp) + for _, s := range r.state.Steps { + if s.Result == runnerv1.Result_RESULT_UNSPECIFIED { + s.Result = runnerv1.Result_RESULT_CANCELLED + if jobResult == runnerv1.Result_RESULT_SKIPPED { + s.Result = runnerv1.Result_RESULT_SKIPPED + } + } + } + } + } + if !r.duringSteps() { + r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry)) + } + return nil + } + + var step *runnerv1.StepState + if v, ok := entry.Data["stepNumber"]; ok { + if v, ok := v.(int); ok && len(r.state.Steps) > v { + step = r.state.Steps[v] + } + } + if step == nil { + if !r.duringSteps() { + r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry)) + } + return nil + } + + if step.StartedAt == nil { + step.StartedAt = timestamppb.New(timestamp) + } + if v, ok := entry.Data["raw_output"]; ok { + if rawOutput, ok := v.(bool); ok && rawOutput { + if row := r.parseLogRow(entry); row != nil { + if step.LogLength == 0 { + step.LogIndex = int64(r.logOffset + len(r.logRows)) + } + step.LogLength++ + r.logRows = append(r.logRows, row) + } + } + } else if !r.duringSteps() { + r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry)) + } + if v, ok := entry.Data["stepResult"]; ok { + if stepResult, ok := r.parseResult(v); ok { + if step.LogLength == 0 { + step.LogIndex = int64(r.logOffset + len(r.logRows)) + } + step.Result = stepResult + step.StoppedAt = timestamppb.New(timestamp) + } + } + + return nil +} + +func (r *Reporter) RunDaemon() { + if r.closed { + return + } + if r.ctx.Err() != nil { + return + } + + _ = r.ReportLog(false) + _ = r.ReportState() + + time.AfterFunc(r.reportInterval, r.RunDaemon) +} + +func (r *Reporter) Logf(format string, a ...interface{}) { + r.stateMu.Lock() + defer r.stateMu.Unlock() + + r.logf(format, a...) +} + +func (r *Reporter) logf(format string, a ...interface{}) { + if !r.duringSteps() { + r.logRows = append(r.logRows, &runnerv1.LogRow{ + Time: timestamppb.Now(), + Content: fmt.Sprintf(format, a...), + }) + } +} + +func (r *Reporter) SetOutputs(outputs map[string]string) { + r.stateMu.Lock() + defer r.stateMu.Unlock() + + for k, v := range outputs { + if len(k) > 255 { + r.logf("ignore output because the key is too long: %q", k) + continue + } + if l := len(v); l > 1024*1024 { + log.Println("ignore output because the value is too long:", k, l) + r.logf("ignore output because the value %q is too long: %d", k, l) + } + if _, ok := r.outputs.Load(k); ok { + continue + } + r.outputs.Store(k, v) + } +} + +func (r *Reporter) Close(lastWords string) error { + r.closed = true + + r.stateMu.Lock() + if r.state.Result == runnerv1.Result_RESULT_UNSPECIFIED { + if lastWords == "" { + lastWords = "Early termination" + } + for _, v := range r.state.Steps { + if v.Result == runnerv1.Result_RESULT_UNSPECIFIED { + v.Result = runnerv1.Result_RESULT_CANCELLED + } + } + r.state.Result = runnerv1.Result_RESULT_FAILURE + r.logRows = append(r.logRows, &runnerv1.LogRow{ + Time: timestamppb.Now(), + Content: lastWords, + }) + r.state.StoppedAt = timestamppb.Now() + } else if lastWords != "" { + r.logRows = append(r.logRows, &runnerv1.LogRow{ + Time: timestamppb.Now(), + Content: lastWords, + }) + } + r.stateMu.Unlock() + + return retry.Do(func() error { + if err := r.ReportLog(true); err != nil { + return err + } + return r.ReportState() + }, retry.Context(r.ctx)) +} + +func (r *Reporter) ReportLog(noMore bool) error { + r.clientM.Lock() + defer r.clientM.Unlock() + + r.stateMu.RLock() + rows := r.logRows + r.stateMu.RUnlock() + + resp, err := r.client.UpdateLog(r.ctx, connect.NewRequest(&runnerv1.UpdateLogRequest{ + TaskId: r.state.Id, + Index: int64(r.logOffset), + Rows: rows, + NoMore: noMore, + })) + if err != nil { + return err + } + + ack := int(resp.Msg.AckIndex) + if ack < r.logOffset { + return fmt.Errorf("submitted logs are lost") + } + + r.stateMu.Lock() + r.logRows = r.logRows[ack-r.logOffset:] + r.logOffset = ack + r.stateMu.Unlock() + + if noMore && ack < r.logOffset+len(rows) { + return fmt.Errorf("not all logs are submitted") + } + + return nil +} + +func (r *Reporter) ReportState() error { + r.clientM.Lock() + defer r.clientM.Unlock() + + r.stateMu.RLock() + state := proto.Clone(r.state).(*runnerv1.TaskState) + r.stateMu.RUnlock() + + outputs := make(map[string]string) + r.outputs.Range(func(k, v interface{}) bool { + if val, ok := v.(string); ok { + outputs[k.(string)] = val + } + return true + }) + + resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{ + State: state, + Outputs: outputs, + })) + if err != nil { + return err + } + + for _, k := range resp.Msg.SentOutputs { + r.outputs.Store(k, struct{}{}) + } + + if resp.Msg.State != nil && resp.Msg.State.Result == runnerv1.Result_RESULT_CANCELLED { + r.cancel() + } + + var noSent []string + r.outputs.Range(func(k, v interface{}) bool { + if _, ok := v.(string); ok { + noSent = append(noSent, k.(string)) + } + return true + }) + if len(noSent) > 0 { + return fmt.Errorf("there are still outputs that have not been sent: %v", noSent) + } + + return nil +} + +func (r *Reporter) duringSteps() bool { + if steps := r.state.Steps; len(steps) == 0 { + return false + } else if first := steps[0]; first.Result == runnerv1.Result_RESULT_UNSPECIFIED && first.LogLength == 0 { + return false + } else if last := steps[len(steps)-1]; last.Result != runnerv1.Result_RESULT_UNSPECIFIED { + return false + } + return true +} + +var stringToResult = map[string]runnerv1.Result{ + "success": runnerv1.Result_RESULT_SUCCESS, + "failure": runnerv1.Result_RESULT_FAILURE, + "skipped": runnerv1.Result_RESULT_SKIPPED, + "cancelled": runnerv1.Result_RESULT_CANCELLED, +} + +func (r *Reporter) parseResult(result interface{}) (runnerv1.Result, bool) { + str := "" + if v, ok := result.(string); ok { // for jobResult + str = v + } else if v, ok := result.(fmt.Stringer); ok { // for stepResult + str = v.String() + } + + ret, ok := stringToResult[str] + return ret, ok +} + +var cmdRegex = regexp.MustCompile(`^::([^ :]+)( .*)?::(.*)$`) + +func (r *Reporter) handleCommand(originalContent, command, parameters, value string) *string { + if r.stopCommandEndToken != "" && command != r.stopCommandEndToken { + return &originalContent + } + + switch command { + case "add-mask": + r.addMask(value) + return nil + case "debug": + if r.debugOutputEnabled { + return &value + } + return nil + + case "notice": + // Not implemented yet, so just return the original content. + return &originalContent + case "warning": + // Not implemented yet, so just return the original content. + return &originalContent + case "error": + // Not implemented yet, so just return the original content. + return &originalContent + case "group": + // Rewriting into ##[] syntax which the frontend understands + content := "##[group]" + value + return &content + case "endgroup": + // Ditto + content := "##[endgroup]" + return &content + case "stop-commands": + r.stopCommandEndToken = value + return nil + case r.stopCommandEndToken: + r.stopCommandEndToken = "" + return nil + } + return &originalContent +} + +func (r *Reporter) parseLogRow(entry *log.Entry) *runnerv1.LogRow { + content := strings.TrimRightFunc(entry.Message, func(r rune) bool { return r == '\r' || r == '\n' }) + + matches := cmdRegex.FindStringSubmatch(content) + if matches != nil { + if output := r.handleCommand(content, matches[1], matches[2], matches[3]); output != nil { + content = *output + } else { + return nil + } + } + + content = r.logReplacer.Replace(content) + + return &runnerv1.LogRow{ + Time: timestamppb.New(entry.Time), + Content: strings.ToValidUTF8(content, "?"), + } +} + +func (r *Reporter) addMask(msg string) { + r.oldnew = append(r.oldnew, msg, "***") + r.logReplacer = strings.NewReplacer(r.oldnew...) +} diff --git a/internal/pkg/report/reporter_test.go b/internal/pkg/report/reporter_test.go new file mode 100644 index 0000000..524e972 --- /dev/null +++ b/internal/pkg/report/reporter_test.go @@ -0,0 +1,198 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package report + +import ( + "context" + "strings" + "testing" + "time" + + runnerv1 "code.gitea.io/actions-proto-go/runner/v1" + connect_go "connectrpc.com/connect" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/structpb" + + "gitea.com/gitea/act_runner/internal/pkg/client/mocks" +) + +func TestReporter_parseLogRow(t *testing.T) { + tests := []struct { + name string + debugOutputEnabled bool + args []string + want []string + }{ + { + "No command", false, + []string{"Hello, world!"}, + []string{"Hello, world!"}, + }, + { + "Add-mask", false, + []string{ + "foo mysecret bar", + "::add-mask::mysecret", + "foo mysecret bar", + }, + []string{ + "foo mysecret bar", + "<nil>", + "foo *** bar", + }, + }, + { + "Debug enabled", true, + []string{ + "::debug::GitHub Actions runtime token access controls", + }, + []string{ + "GitHub Actions runtime token access controls", + }, + }, + { + "Debug not enabled", false, + []string{ + "::debug::GitHub Actions runtime token access controls", + }, + []string{ + "<nil>", + }, + }, + { + "notice", false, + []string{ + "::notice file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work", + }, + []string{ + "::notice file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work", + }, + }, + { + "warning", false, + []string{ + "::warning file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work", + }, + []string{ + "::warning file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work", + }, + }, + { + "error", false, + []string{ + "::error file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work", + }, + []string{ + "::error file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work", + }, + }, + { + "group", false, + []string{ + "::group::", + "::endgroup::", + }, + []string{ + "##[group]", + "##[endgroup]", + }, + }, + { + "stop-commands", false, + []string{ + "::add-mask::foo", + "::stop-commands::myverycoolstoptoken", + "::add-mask::bar", + "::debug::Stuff", + "myverycoolstoptoken", + "::add-mask::baz", + "::myverycoolstoptoken::", + "::add-mask::wibble", + "foo bar baz wibble", + }, + []string{ + "<nil>", + "<nil>", + "::add-mask::bar", + "::debug::Stuff", + "myverycoolstoptoken", + "::add-mask::baz", + "<nil>", + "<nil>", + "*** bar baz ***", + }, + }, + { + "unknown command", false, + []string{ + "::set-mask::foo", + }, + []string{ + "::set-mask::foo", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Reporter{ + logReplacer: strings.NewReplacer(), + debugOutputEnabled: tt.debugOutputEnabled, + } + for idx, arg := range tt.args { + rv := r.parseLogRow(&log.Entry{Message: arg}) + got := "<nil>" + + if rv != nil { + got = rv.Content + } + + assert.Equal(t, tt.want[idx], got) + } + }) + } +} + +func TestReporter_Fire(t *testing.T) { + t.Run("ignore command lines", func(t *testing.T) { + client := mocks.NewClient(t) + client.On("UpdateLog", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateLogRequest]) (*connect_go.Response[runnerv1.UpdateLogResponse], error) { + t.Logf("Received UpdateLog: %s", req.Msg.String()) + return connect_go.NewResponse(&runnerv1.UpdateLogResponse{ + AckIndex: req.Msg.Index + int64(len(req.Msg.Rows)), + }), nil + }) + client.On("UpdateTask", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) { + t.Logf("Received UpdateTask: %s", req.Msg.String()) + return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{}), nil + }) + ctx, cancel := context.WithCancel(context.Background()) + taskCtx, err := structpb.NewStruct(map[string]interface{}{}) + require.NoError(t, err) + reporter := NewReporter(ctx, cancel, client, &runnerv1.Task{ + Context: taskCtx, + }, time.Second) + defer func() { + assert.NoError(t, reporter.Close("")) + }() + reporter.ResetSteps(5) + + dataStep0 := map[string]interface{}{ + "stage": "Main", + "stepNumber": 0, + "raw_output": true, + } + + assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0})) + assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0})) + assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0})) + assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0})) + assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0})) + assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0})) + + assert.Equal(t, int64(3), reporter.state.Steps[0].LogLength) + }) +} diff --git a/internal/pkg/ver/version.go b/internal/pkg/ver/version.go new file mode 100644 index 0000000..3c07a18 --- /dev/null +++ b/internal/pkg/ver/version.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package ver + +// go build -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=1.2.3" +var version = "dev" + +func Version() string { + return version +} @@ -0,0 +1,19 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package main + +import ( + "context" + "os/signal" + "syscall" + + "gitea.com/gitea/act_runner/internal/app/cmd" +) + +func main() { + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + // run the command + cmd.Execute(ctx) +} diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000..0dcdb34 --- /dev/null +++ b/renovate.json @@ -0,0 +1,4 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": ["local>forgejo/renovate-config"] +} diff --git a/scripts/rootless.sh b/scripts/rootless.sh new file mode 100755 index 0000000..310a03b --- /dev/null +++ b/scripts/rootless.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# wait for docker daemon +while ! nc -z localhost 2376 </dev/null; do + echo 'waiting for docker daemon...' + sleep 5 +done + +. /opt/act/run.sh diff --git a/scripts/run.sh b/scripts/run.sh new file mode 100755 index 0000000..89626b4 --- /dev/null +++ b/scripts/run.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +if [[ ! -d /data ]]; then + mkdir -p /data +fi + +cd /data + +CONFIG_ARG="" +if [[ ! -z "${CONFIG_FILE}" ]]; then + CONFIG_ARG="--config ${CONFIG_FILE}" +fi +EXTRA_ARGS="" +if [[ ! -z "${GITEA_RUNNER_LABELS}" ]]; then + EXTRA_ARGS="${EXTRA_ARGS} --labels ${GITEA_RUNNER_LABELS}" +fi + +# Use the same ENV variable names as https://github.com/vegardit/docker-gitea-act-runner + +if [[ ! -s .runner ]]; then + try=$((try + 1)) + success=0 + + # The point of this loop is to make it simple, when running both forgejo-runner and gitea in docker, + # for the forgejo-runner to wait a moment for gitea to become available before erroring out. Within + # the context of a single docker-compose, something similar could be done via healthchecks, but + # this is more flexible. + while [[ $success -eq 0 ]] && [[ $try -lt ${GITEA_MAX_REG_ATTEMPTS:-10} ]]; do + forgejo-runner register \ + --instance "${GITEA_INSTANCE_URL}" \ + --token "${GITEA_RUNNER_REGISTRATION_TOKEN}" \ + --name "${GITEA_RUNNER_NAME:-`hostname`}" \ + ${CONFIG_ARG} ${EXTRA_ARGS} --no-interactive 2>&1 | tee /tmp/reg.log + + cat /tmp/reg.log | grep 'Runner registered successfully' > /dev/null + if [[ $? -eq 0 ]]; then + echo "SUCCESS" + success=1 + else + echo "Waiting to retry ..." + sleep 5 + fi + done +fi +# Prevent reading the token from the forgejo-runner process +unset GITEA_RUNNER_REGISTRATION_TOKEN + +forgejo-runner daemon ${CONFIG_ARG} diff --git a/scripts/supervisord.conf b/scripts/supervisord.conf new file mode 100644 index 0000000..8c45f5b --- /dev/null +++ b/scripts/supervisord.conf @@ -0,0 +1,13 @@ +[supervisord] +nodaemon=true +logfile=/dev/null +logfile_maxbytes=0 + +[program:dockerd] +command=/usr/local/bin/dockerd-entrypoint.sh + +[program:act_runner] +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +redirect_stderr=true +command=/opt/act/rootless.sh diff --git a/scripts/systemd.md b/scripts/systemd.md new file mode 100644 index 0000000..089dd61 --- /dev/null +++ b/scripts/systemd.md @@ -0,0 +1,67 @@ +# Forgejo Runner with systemd User Services + +It is possible to use systemd's user services together with +[podman](https://podman.io/) to run `forgejo-runner` using a normal user +account without any privileges and automatically start on boot. + +This was last tested on Fedora 39 on 2024-02-19, but should work elsewhere as +well. + +Place the `forgejo-runner` binary in `/usr/local/bin/forgejo-runner` and make +sure it can be executed (`chmod +x /usr/local/bin/forgejo-runner`). + +Install and enable `podman` as a user service: + +```bash +$ sudo dnf -y install podman +``` + +You *may* need to reboot your system after installing `podman` as it +modifies some system configuration(s) that may need to be activated. Without +rebooting the system my runner errored out when trying to set firewall rules, a +reboot fixed it. + +Enable `podman` as a user service: + +``` +$ systemctl --user start podman.socket +$ systemctl --user enable podman.socket +``` + +Make sure processes remain after your user account logs out: + +```bash +$ loginctl enable-linger +``` + +Create the file `/etc/systemd/user/forgejo-runner.service` with the following +content: + +``` +[Unit] +Description=Forgejo Runner + +[Service] +Type=simple +ExecStart=/usr/local/bin/forgejo-runner daemon +Restart=on-failure + +[Install] +WantedBy=default.target +``` + +Now activate it as a user service: + +```bash +$ systemctl --user daemon-reload +$ systemctl --user start forgejo-runner +$ systemctl --user enable forgejo-runner +``` + +To see/follow the log of `forgejo-runner`: + +```bash +$ journalctl -f -t forgejo-runner +``` + +If you reboot your system, all should come back automatically. |