summaryrefslogtreecommitdiffstats
path: root/routers/api/actions
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--routers/api/actions/actions.go24
-rw-r--r--routers/api/actions/artifact.pb.go1058
-rw-r--r--routers/api/actions/artifact.proto73
-rw-r--r--routers/api/actions/artifacts.go507
-rw-r--r--routers/api/actions/artifacts_chunks.go301
-rw-r--r--routers/api/actions/artifacts_utils.go94
-rw-r--r--routers/api/actions/artifactsv4.go599
-rw-r--r--routers/api/actions/ping/ping.go38
-rw-r--r--routers/api/actions/ping/ping_test.go61
-rw-r--r--routers/api/actions/runner/interceptor.go80
-rw-r--r--routers/api/actions/runner/runner.go289
-rw-r--r--routers/api/actions/runner/utils.go189
12 files changed, 3313 insertions, 0 deletions
diff --git a/routers/api/actions/actions.go b/routers/api/actions/actions.go
new file mode 100644
index 0000000..a418b3a
--- /dev/null
+++ b/routers/api/actions/actions.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/actions/ping"
+ "code.gitea.io/gitea/routers/api/actions/runner"
+)
+
+func Routes(prefix string) *web.Route {
+ m := web.NewRoute()
+
+ path, handler := ping.NewPingServiceHandler()
+ m.Post(path+"*", http.StripPrefix(prefix, handler).ServeHTTP)
+
+ path, handler = runner.NewRunnerServiceHandler()
+ m.Post(path+"*", http.StripPrefix(prefix, handler).ServeHTTP)
+
+ return m
+}
diff --git a/routers/api/actions/artifact.pb.go b/routers/api/actions/artifact.pb.go
new file mode 100644
index 0000000..590eda9
--- /dev/null
+++ b/routers/api/actions/artifact.pb.go
@@ -0,0 +1,1058 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
+// source: artifact.proto
+
+package actions
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CreateArtifactRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"`
+ Version int32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *CreateArtifactRequest) Reset() {
+ *x = CreateArtifactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateArtifactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateArtifactRequest) ProtoMessage() {}
+
+func (x *CreateArtifactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateArtifactRequest.ProtoReflect.Descriptor instead.
+func (*CreateArtifactRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateArtifactRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *CreateArtifactRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *CreateArtifactRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateArtifactRequest) GetExpiresAt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpiresAt
+ }
+ return nil
+}
+
+func (x *CreateArtifactRequest) GetVersion() int32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+type CreateArtifactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+ SignedUploadUrl string `protobuf:"bytes,2,opt,name=signed_upload_url,json=signedUploadUrl,proto3" json:"signed_upload_url,omitempty"`
+}
+
+func (x *CreateArtifactResponse) Reset() {
+ *x = CreateArtifactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateArtifactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateArtifactResponse) ProtoMessage() {}
+
+func (x *CreateArtifactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateArtifactResponse.ProtoReflect.Descriptor instead.
+func (*CreateArtifactResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CreateArtifactResponse) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+func (x *CreateArtifactResponse) GetSignedUploadUrl() string {
+ if x != nil {
+ return x.SignedUploadUrl
+ }
+ return ""
+}
+
+type FinalizeArtifactRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+ Hash *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=hash,proto3" json:"hash,omitempty"`
+}
+
+func (x *FinalizeArtifactRequest) Reset() {
+ *x = FinalizeArtifactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeArtifactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeArtifactRequest) ProtoMessage() {}
+
+func (x *FinalizeArtifactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeArtifactRequest.ProtoReflect.Descriptor instead.
+func (*FinalizeArtifactRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FinalizeArtifactRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *FinalizeArtifactRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *FinalizeArtifactRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *FinalizeArtifactRequest) GetSize() int64 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *FinalizeArtifactRequest) GetHash() *wrapperspb.StringValue {
+ if x != nil {
+ return x.Hash
+ }
+ return nil
+}
+
+type FinalizeArtifactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+ ArtifactId int64 `protobuf:"varint,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+}
+
+func (x *FinalizeArtifactResponse) Reset() {
+ *x = FinalizeArtifactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeArtifactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeArtifactResponse) ProtoMessage() {}
+
+func (x *FinalizeArtifactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeArtifactResponse.ProtoReflect.Descriptor instead.
+func (*FinalizeArtifactResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *FinalizeArtifactResponse) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+func (x *FinalizeArtifactResponse) GetArtifactId() int64 {
+ if x != nil {
+ return x.ArtifactId
+ }
+ return 0
+}
+
+type ListArtifactsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ NameFilter *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=name_filter,json=nameFilter,proto3" json:"name_filter,omitempty"`
+ IdFilter *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=id_filter,json=idFilter,proto3" json:"id_filter,omitempty"`
+}
+
+func (x *ListArtifactsRequest) Reset() {
+ *x = ListArtifactsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListArtifactsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListArtifactsRequest) ProtoMessage() {}
+
+func (x *ListArtifactsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListArtifactsRequest.ProtoReflect.Descriptor instead.
+func (*ListArtifactsRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListArtifactsRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsRequest) GetNameFilter() *wrapperspb.StringValue {
+ if x != nil {
+ return x.NameFilter
+ }
+ return nil
+}
+
+func (x *ListArtifactsRequest) GetIdFilter() *wrapperspb.Int64Value {
+ if x != nil {
+ return x.IdFilter
+ }
+ return nil
+}
+
+type ListArtifactsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Artifacts []*ListArtifactsResponse_MonolithArtifact `protobuf:"bytes,1,rep,name=artifacts,proto3" json:"artifacts,omitempty"`
+}
+
+func (x *ListArtifactsResponse) Reset() {
+ *x = ListArtifactsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListArtifactsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListArtifactsResponse) ProtoMessage() {}
+
+func (x *ListArtifactsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListArtifactsResponse.ProtoReflect.Descriptor instead.
+func (*ListArtifactsResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListArtifactsResponse) GetArtifacts() []*ListArtifactsResponse_MonolithArtifact {
+ if x != nil {
+ return x.Artifacts
+ }
+ return nil
+}
+
+type ListArtifactsResponse_MonolithArtifact struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ DatabaseId int64 `protobuf:"varint,3,opt,name=database_id,json=databaseId,proto3" json:"database_id,omitempty"`
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"`
+ CreatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) Reset() {
+ *x = ListArtifactsResponse_MonolithArtifact{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListArtifactsResponse_MonolithArtifact) ProtoMessage() {}
+
+func (x *ListArtifactsResponse_MonolithArtifact) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListArtifactsResponse_MonolithArtifact.ProtoReflect.Descriptor instead.
+func (*ListArtifactsResponse_MonolithArtifact) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetDatabaseId() int64 {
+ if x != nil {
+ return x.DatabaseId
+ }
+ return 0
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetSize() int64 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetCreatedAt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return nil
+}
+
+type GetSignedArtifactURLRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetSignedArtifactURLRequest) Reset() {
+ *x = GetSignedArtifactURLRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSignedArtifactURLRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSignedArtifactURLRequest) ProtoMessage() {}
+
+func (x *GetSignedArtifactURLRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSignedArtifactURLRequest.ProtoReflect.Descriptor instead.
+func (*GetSignedArtifactURLRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *GetSignedArtifactURLRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *GetSignedArtifactURLRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *GetSignedArtifactURLRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type GetSignedArtifactURLResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SignedUrl string `protobuf:"bytes,1,opt,name=signed_url,json=signedUrl,proto3" json:"signed_url,omitempty"`
+}
+
+func (x *GetSignedArtifactURLResponse) Reset() {
+ *x = GetSignedArtifactURLResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSignedArtifactURLResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSignedArtifactURLResponse) ProtoMessage() {}
+
+func (x *GetSignedArtifactURLResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSignedArtifactURLResponse.ProtoReflect.Descriptor instead.
+func (*GetSignedArtifactURLResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *GetSignedArtifactURLResponse) GetSignedUrl() string {
+ if x != nil {
+ return x.SignedUrl
+ }
+ return ""
+}
+
+type DeleteArtifactRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteArtifactRequest) Reset() {
+ *x = DeleteArtifactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteArtifactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteArtifactRequest) ProtoMessage() {}
+
+func (x *DeleteArtifactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteArtifactRequest.ProtoReflect.Descriptor instead.
+func (*DeleteArtifactRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *DeleteArtifactRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *DeleteArtifactRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *DeleteArtifactRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type DeleteArtifactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+ ArtifactId int64 `protobuf:"varint,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+}
+
+func (x *DeleteArtifactResponse) Reset() {
+ *x = DeleteArtifactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteArtifactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteArtifactResponse) ProtoMessage() {}
+
+func (x *DeleteArtifactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteArtifactResponse.ProtoReflect.Descriptor instead.
+func (*DeleteArtifactResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *DeleteArtifactResponse) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+func (x *DeleteArtifactResponse) GetArtifactId() int64 {
+ if x != nil {
+ return x.ArtifactId
+ }
+ return 0
+}
+
+var File_artifact_proto protoreflect.FileDescriptor
+
+var file_artifact_proto_rawDesc = []byte{
+ 0x0a, 0x0e, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x1d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xf5, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66,
+ 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f,
+ 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49,
+ 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f,
+ 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
+ 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18,
+ 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02,
+ 0x6f, 0x6b, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73,
+ 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x72, 0x6c, 0x22, 0xe8,
+ 0x01, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66,
+ 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f,
+ 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49,
+ 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f,
+ 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
+ 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0x4b, 0x0a, 0x18, 0x46, 0x69, 0x6e,
+ 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63,
+ 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69,
+ 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, 0x84, 0x02, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x41,
+ 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f,
+ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x09, 0x69, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x08, 0x69, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x7c, 0x0a,
+ 0x15, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61,
+ 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72,
+ 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f,
+ 0x4d, 0x6f, 0x6e, 0x6f, 0x6c, 0x69, 0x74, 0x68, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74,
+ 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x26,
+ 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x4d, 0x6f, 0x6e, 0x6f, 0x6c, 0x69, 0x74, 0x68, 0x41, 0x72,
+ 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
+ 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a,
+ 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75,
+ 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52,
+ 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04,
+ 0x73, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f,
+ 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22,
+ 0xa6, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x72, 0x74,
+ 0x69, 0x66, 0x61, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f,
+ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53,
+ 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x55, 0x52, 0x4c,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e,
+ 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75,
+ 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42,
+ 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b,
+ 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77,
+ 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x49, 0x0a, 0x16, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74,
+ 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66,
+ 0x61, 0x63, 0x74, 0x49, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_artifact_proto_rawDescOnce sync.Once
+ file_artifact_proto_rawDescData = file_artifact_proto_rawDesc
+)
+
+func file_artifact_proto_rawDescGZIP() []byte {
+ file_artifact_proto_rawDescOnce.Do(func() {
+ file_artifact_proto_rawDescData = protoimpl.X.CompressGZIP(file_artifact_proto_rawDescData)
+ })
+ return file_artifact_proto_rawDescData
+}
+
+var (
+ file_artifact_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
+ file_artifact_proto_goTypes = []interface{}{
+ (*CreateArtifactRequest)(nil), // 0: github.actions.results.api.v1.CreateArtifactRequest
+ (*CreateArtifactResponse)(nil), // 1: github.actions.results.api.v1.CreateArtifactResponse
+ (*FinalizeArtifactRequest)(nil), // 2: github.actions.results.api.v1.FinalizeArtifactRequest
+ (*FinalizeArtifactResponse)(nil), // 3: github.actions.results.api.v1.FinalizeArtifactResponse
+ (*ListArtifactsRequest)(nil), // 4: github.actions.results.api.v1.ListArtifactsRequest
+ (*ListArtifactsResponse)(nil), // 5: github.actions.results.api.v1.ListArtifactsResponse
+ (*ListArtifactsResponse_MonolithArtifact)(nil), // 6: github.actions.results.api.v1.ListArtifactsResponse_MonolithArtifact
+ (*GetSignedArtifactURLRequest)(nil), // 7: github.actions.results.api.v1.GetSignedArtifactURLRequest
+ (*GetSignedArtifactURLResponse)(nil), // 8: github.actions.results.api.v1.GetSignedArtifactURLResponse
+ (*DeleteArtifactRequest)(nil), // 9: github.actions.results.api.v1.DeleteArtifactRequest
+ (*DeleteArtifactResponse)(nil), // 10: github.actions.results.api.v1.DeleteArtifactResponse
+ (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+ (*wrapperspb.StringValue)(nil), // 12: google.protobuf.StringValue
+ (*wrapperspb.Int64Value)(nil), // 13: google.protobuf.Int64Value
+ }
+)
+
+var file_artifact_proto_depIdxs = []int32{
+ 11, // 0: github.actions.results.api.v1.CreateArtifactRequest.expires_at:type_name -> google.protobuf.Timestamp
+ 12, // 1: github.actions.results.api.v1.FinalizeArtifactRequest.hash:type_name -> google.protobuf.StringValue
+ 12, // 2: github.actions.results.api.v1.ListArtifactsRequest.name_filter:type_name -> google.protobuf.StringValue
+ 13, // 3: github.actions.results.api.v1.ListArtifactsRequest.id_filter:type_name -> google.protobuf.Int64Value
+ 6, // 4: github.actions.results.api.v1.ListArtifactsResponse.artifacts:type_name -> github.actions.results.api.v1.ListArtifactsResponse_MonolithArtifact
+ 11, // 5: github.actions.results.api.v1.ListArtifactsResponse_MonolithArtifact.created_at:type_name -> google.protobuf.Timestamp
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_artifact_proto_init() }
+func file_artifact_proto_init() {
+ if File_artifact_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_artifact_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateArtifactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateArtifactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeArtifactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeArtifactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListArtifactsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListArtifactsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListArtifactsResponse_MonolithArtifact); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSignedArtifactURLRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSignedArtifactURLResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteArtifactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteArtifactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_artifact_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 11,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_artifact_proto_goTypes,
+ DependencyIndexes: file_artifact_proto_depIdxs,
+ MessageInfos: file_artifact_proto_msgTypes,
+ }.Build()
+ File_artifact_proto = out.File
+ file_artifact_proto_rawDesc = nil
+ file_artifact_proto_goTypes = nil
+ file_artifact_proto_depIdxs = nil
+}
diff --git a/routers/api/actions/artifact.proto b/routers/api/actions/artifact.proto
new file mode 100644
index 0000000..c68e5d0
--- /dev/null
+++ b/routers/api/actions/artifact.proto
@@ -0,0 +1,73 @@
+syntax = "proto3";
+
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+
+package github.actions.results.api.v1;
+
+message CreateArtifactRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+ google.protobuf.Timestamp expires_at = 4;
+ int32 version = 5;
+}
+
+message CreateArtifactResponse {
+ bool ok = 1;
+ string signed_upload_url = 2;
+}
+
+message FinalizeArtifactRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+ int64 size = 4;
+ google.protobuf.StringValue hash = 5;
+}
+
+message FinalizeArtifactResponse {
+ bool ok = 1;
+ int64 artifact_id = 2;
+}
+
+message ListArtifactsRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ google.protobuf.StringValue name_filter = 3;
+ google.protobuf.Int64Value id_filter = 4;
+}
+
+message ListArtifactsResponse {
+ repeated ListArtifactsResponse_MonolithArtifact artifacts = 1;
+}
+
+message ListArtifactsResponse_MonolithArtifact {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ int64 database_id = 3;
+ string name = 4;
+ int64 size = 5;
+ google.protobuf.Timestamp created_at = 6;
+}
+
+message GetSignedArtifactURLRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+}
+
+message GetSignedArtifactURLResponse {
+ string signed_url = 1;
+}
+
+message DeleteArtifactRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+}
+
+message DeleteArtifactResponse {
+ bool ok = 1;
+ int64 artifact_id = 2;
+}
diff --git a/routers/api/actions/artifacts.go b/routers/api/actions/artifacts.go
new file mode 100644
index 0000000..bc29e44
--- /dev/null
+++ b/routers/api/actions/artifacts.go
@@ -0,0 +1,507 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+// GitHub Actions Artifacts API Simple Description
+//
+// 1. Upload artifact
+// 1.1. Post upload url
+// Post: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
+// Request:
+// {
+// "Type": "actions_storage",
+// "Name": "artifact"
+// }
+// Response:
+// {
+// "fileContainerResourceUrl":"/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload"
+// }
+// it acquires an upload url for artifact upload
+// 1.2. Upload artifact
+// PUT: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
+// it upload chunk with headers:
+// x-tfs-filelength: 1024 // total file length
+// content-length: 1024 // chunk length
+// x-actions-results-md5: md5sum // md5sum of chunk
+// content-range: bytes 0-1023/1024 // chunk range
+// we save all chunks to one storage directory after md5sum check
+// 1.3. Confirm upload
+// PATCH: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
+// it confirm upload and merge all chunks to one file, save this file to storage
+//
+// 2. Download artifact
+// 2.1 list artifacts
+// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
+// Response:
+// {
+// "count": 1,
+// "value": [
+// {
+// "name": "artifact",
+// "fileContainerResourceUrl": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path"
+// }
+// ]
+// }
+// 2.2 download artifact
+// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path?api-version=6.0-preview
+// Response:
+// {
+// "value": [
+// {
+// "contentLocation": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download",
+// "path": "artifact/filename",
+// "itemType": "file"
+// }
+// ]
+// }
+// 2.3 download artifact file
+// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download?itemPath=artifact%2Ffilename
+// Response:
+// download file
+//
+
+import (
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ quota_model "code.gitea.io/gitea/models/quota"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ web_types "code.gitea.io/gitea/modules/web/types"
+ actions_service "code.gitea.io/gitea/services/actions"
+ "code.gitea.io/gitea/services/context"
+)
+
+const artifactRouteBase = "/_apis/pipelines/workflows/{run_id}/artifacts"
+
+type artifactContextKeyType struct{}
+
+var artifactContextKey = artifactContextKeyType{}
+
+type ArtifactContext struct {
+ *context.Base
+
+ ActionTask *actions.ActionTask
+}
+
+func init() {
+ web.RegisterResponseStatusProvider[*ArtifactContext](func(req *http.Request) web_types.ResponseStatusProvider {
+ return req.Context().Value(artifactContextKey).(*ArtifactContext)
+ })
+}
+
+func ArtifactsRoutes(prefix string) *web.Route {
+ m := web.NewRoute()
+ m.Use(ArtifactContexter())
+
+ r := artifactRoutes{
+ prefix: prefix,
+ fs: storage.ActionsArtifacts,
+ }
+
+ m.Group(artifactRouteBase, func() {
+ // retrieve, list and confirm artifacts
+ m.Combo("").Get(r.listArtifacts).Post(r.getUploadArtifactURL).Patch(r.comfirmUploadArtifact)
+ // handle container artifacts list and download
+ m.Put("/{artifact_hash}/upload", r.uploadArtifact)
+ // handle artifacts download
+ m.Get("/{artifact_hash}/download_url", r.getDownloadArtifactURL)
+ m.Get("/{artifact_id}/download", r.downloadArtifact)
+ })
+
+ return m
+}
+
+func ArtifactContexter() func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
+ base, baseCleanUp := context.NewBaseContext(resp, req)
+ defer baseCleanUp()
+
+ ctx := &ArtifactContext{Base: base}
+ ctx.AppendContextValue(artifactContextKey, ctx)
+
+ // action task call server api with Bearer ACTIONS_RUNTIME_TOKEN
+ // we should verify the ACTIONS_RUNTIME_TOKEN
+ authHeader := req.Header.Get("Authorization")
+ if len(authHeader) == 0 || !strings.HasPrefix(authHeader, "Bearer ") {
+ ctx.Error(http.StatusUnauthorized, "Bad authorization header")
+ return
+ }
+
+ // New act_runner uses jwt to authenticate
+ tID, err := actions_service.ParseAuthorizationToken(req)
+
+ var task *actions.ActionTask
+ if err == nil {
+ task, err = actions.GetTaskByID(req.Context(), tID)
+ if err != nil {
+ log.Error("Error runner api getting task by ID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task by ID")
+ return
+ }
+ if task.Status != actions.StatusRunning {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ } else {
+ // Old act_runner uses GITEA_TOKEN to authenticate
+ authToken := strings.TrimPrefix(authHeader, "Bearer ")
+
+ task, err = actions.GetRunningTaskByToken(req.Context(), authToken)
+ if err != nil {
+ log.Error("Error runner api getting task: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task")
+ return
+ }
+ }
+
+ if err := task.LoadJob(req.Context()); err != nil {
+ log.Error("Error runner api getting job: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting job")
+ return
+ }
+
+ ctx.ActionTask = task
+ next.ServeHTTP(ctx.Resp, ctx.Req)
+ })
+ }
+}
+
+type artifactRoutes struct {
+ prefix string
+ fs storage.ObjectStorage
+}
+
+func (ar artifactRoutes) buildArtifactURL(runID int64, artifactHash, suffix string) string {
+ uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(ar.prefix, "/") +
+ strings.ReplaceAll(artifactRouteBase, "{run_id}", strconv.FormatInt(runID, 10)) +
+ "/" + artifactHash + "/" + suffix
+ return uploadURL
+}
+
+type getUploadArtifactRequest struct {
+ Type string
+ Name string
+ RetentionDays int64
+}
+
+type getUploadArtifactResponse struct {
+ FileContainerResourceURL string `json:"fileContainerResourceUrl"`
+}
+
+// getUploadArtifactURL generates a URL for uploading an artifact
+func (ar artifactRoutes) getUploadArtifactURL(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ var req getUploadArtifactRequest
+ if err := json.NewDecoder(ctx.Req.Body).Decode(&req); err != nil {
+ log.Error("Error decode request body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error decode request body")
+ return
+ }
+
+ // set retention days
+ retentionQuery := ""
+ if req.RetentionDays > 0 {
+ retentionQuery = fmt.Sprintf("?retentionDays=%d", req.RetentionDays)
+ }
+
+ // use md5(artifact_name) to create upload url
+ artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(req.Name)))
+ resp := getUploadArtifactResponse{
+ FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "upload"+retentionQuery),
+ }
+ log.Debug("[artifact] get upload url: %s", resp.FileContainerResourceURL)
+ ctx.JSON(http.StatusOK, resp)
+}
+
+func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
+ task, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+ artifactName, artifactPath, ok := parseArtifactItemPath(ctx)
+ if !ok {
+ return
+ }
+
+ // check the owner's quota
+ ok, err := quota_model.EvaluateForUser(ctx, ctx.ActionTask.OwnerID, quota_model.LimitSubjectSizeAssetsArtifacts)
+ if err != nil {
+ log.Error("quota_model.EvaluateForUser: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error checking quota")
+ return
+ }
+ if !ok {
+ ctx.Error(http.StatusRequestEntityTooLarge, "Quota exceeded")
+ return
+ }
+
+ // get upload file size
+ fileRealTotalSize, contentLength := getUploadFileSize(ctx)
+
+ // get artifact retention days
+ expiredDays := setting.Actions.ArtifactRetentionDays
+ if queryRetentionDays := ctx.Req.URL.Query().Get("retentionDays"); queryRetentionDays != "" {
+ var err error
+ expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64)
+ if err != nil {
+ log.Error("Error parse retention days: %v", err)
+ ctx.Error(http.StatusBadRequest, "Error parse retention days")
+ return
+ }
+ }
+ log.Debug("[artifact] upload chunk, name: %s, path: %s, size: %d, retention days: %d",
+ artifactName, artifactPath, fileRealTotalSize, expiredDays)
+
+ // create or get artifact with name and path
+ artifact, err := actions.CreateArtifact(ctx, task, artifactName, artifactPath, expiredDays)
+ if err != nil {
+ log.Error("Error create or get artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
+ return
+ }
+
+ // save chunk to storage, if success, return chunk stotal size
+ // if artifact is not gzip when uploading, chunksTotalSize == fileRealTotalSize
+ // if artifact is gzip when uploading, chunksTotalSize < fileRealTotalSize
+ chunksTotalSize, err := saveUploadChunk(ar.fs, ctx, artifact, contentLength, runID)
+ if err != nil {
+ log.Error("Error save upload chunk: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error save upload chunk")
+ return
+ }
+
+ // update artifact size if zero or not match, over write artifact size
+ if artifact.FileSize == 0 ||
+ artifact.FileCompressedSize == 0 ||
+ artifact.FileSize != fileRealTotalSize ||
+ artifact.FileCompressedSize != chunksTotalSize {
+ artifact.FileSize = fileRealTotalSize
+ artifact.FileCompressedSize = chunksTotalSize
+ artifact.ContentEncoding = ctx.Req.Header.Get("Content-Encoding")
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ log.Error("Error update artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error update artifact")
+ return
+ }
+ log.Debug("[artifact] update artifact size, artifact_id: %d, size: %d, compressed size: %d",
+ artifact.ID, artifact.FileSize, artifact.FileCompressedSize)
+ }
+
+ ctx.JSON(http.StatusOK, map[string]string{
+ "message": "success",
+ })
+}
+
+// comfirmUploadArtifact confirm upload artifact.
+// if all chunks are uploaded, merge them to one file.
+func (ar artifactRoutes) comfirmUploadArtifact(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+ artifactName := ctx.Req.URL.Query().Get("artifactName")
+ if artifactName == "" {
+ log.Warn("Error artifact name is empty")
+ ctx.Error(http.StatusBadRequest, "Error artifact name is empty")
+ return
+ }
+ if err := mergeChunksForRun(ctx, ar.fs, runID, artifactName); err != nil {
+ log.Error("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ ctx.JSON(http.StatusOK, map[string]string{
+ "message": "success",
+ })
+}
+
+type (
+ listArtifactsResponse struct {
+ Count int64 `json:"count"`
+ Value []listArtifactsResponseItem `json:"value"`
+ }
+ listArtifactsResponseItem struct {
+ Name string `json:"name"`
+ FileContainerResourceURL string `json:"fileContainerResourceUrl"`
+ }
+)
+
+func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{RunID: runID})
+ if err != nil {
+ log.Error("Error getting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if len(artifacts) == 0 {
+ log.Debug("[artifact] handleListArtifacts, no artifacts")
+ ctx.Error(http.StatusNotFound)
+ return
+ }
+
+ var (
+ items []listArtifactsResponseItem
+ values = make(map[string]bool)
+ )
+
+ for _, art := range artifacts {
+ if values[art.ArtifactName] {
+ continue
+ }
+ artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(art.ArtifactName)))
+ item := listArtifactsResponseItem{
+ Name: art.ArtifactName,
+ FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "download_url"),
+ }
+ items = append(items, item)
+ values[art.ArtifactName] = true
+
+ log.Debug("[artifact] handleListArtifacts, name: %s, url: %s", item.Name, item.FileContainerResourceURL)
+ }
+
+ respData := listArtifactsResponse{
+ Count: int64(len(items)),
+ Value: items,
+ }
+ ctx.JSON(http.StatusOK, respData)
+}
+
+type (
+ downloadArtifactResponse struct {
+ Value []downloadArtifactResponseItem `json:"value"`
+ }
+ downloadArtifactResponseItem struct {
+ Path string `json:"path"`
+ ItemType string `json:"itemType"`
+ ContentLocation string `json:"contentLocation"`
+ }
+)
+
+// getDownloadArtifactURL generates download url for each artifact
+func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath"))
+ if !validateArtifactHash(ctx, itemPath) {
+ return
+ }
+
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
+ RunID: runID,
+ ArtifactName: itemPath,
+ })
+ if err != nil {
+ log.Error("Error getting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if len(artifacts) == 0 {
+ log.Debug("[artifact] getDownloadArtifactURL, no artifacts")
+ ctx.Error(http.StatusNotFound)
+ return
+ }
+
+ if itemPath != artifacts[0].ArtifactName {
+ log.Error("Error mismatch artifact name, itemPath: %v, artifact: %v", itemPath, artifacts[0].ArtifactName)
+ ctx.Error(http.StatusBadRequest, "Error mismatch artifact name")
+ return
+ }
+
+ var items []downloadArtifactResponseItem
+ for _, artifact := range artifacts {
+ var downloadURL string
+ if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
+ u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName)
+ if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
+ log.Error("Error getting serve direct url: %v", err)
+ }
+ if u != nil {
+ downloadURL = u.String()
+ }
+ }
+ if downloadURL == "" {
+ downloadURL = ar.buildArtifactURL(runID, strconv.FormatInt(artifact.ID, 10), "download")
+ }
+ item := downloadArtifactResponseItem{
+ Path: util.PathJoinRel(itemPath, artifact.ArtifactPath),
+ ItemType: "file",
+ ContentLocation: downloadURL,
+ }
+ log.Debug("[artifact] getDownloadArtifactURL, path: %s, url: %s", item.Path, item.ContentLocation)
+ items = append(items, item)
+ }
+ respData := downloadArtifactResponse{
+ Value: items,
+ }
+ ctx.JSON(http.StatusOK, respData)
+}
+
+// downloadArtifact downloads artifact content
+func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ artifactID := ctx.ParamsInt64("artifact_id")
+ artifact, exist, err := db.GetByID[actions.ActionArtifact](ctx, artifactID)
+ if err != nil {
+ log.Error("Error getting artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if !exist {
+ log.Error("artifact with ID %d does not exist", artifactID)
+ ctx.Error(http.StatusNotFound, fmt.Sprintf("artifact with ID %d does not exist", artifactID))
+ return
+ }
+ if artifact.RunID != runID {
+ log.Error("Error mismatch runID and artifactID, task: %v, artifact: %v", runID, artifactID)
+ ctx.Error(http.StatusBadRequest)
+ return
+ }
+
+ fd, err := ar.fs.Open(artifact.StoragePath)
+ if err != nil {
+ log.Error("Error opening file: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ defer fd.Close()
+
+ // if artifact is compressed, set content-encoding header to gzip
+ if artifact.ContentEncoding == "gzip" {
+ ctx.Resp.Header().Set("Content-Encoding", "gzip")
+ }
+ log.Debug("[artifact] downloadArtifact, name: %s, path: %s, storage: %s, size: %d", artifact.ArtifactName, artifact.ArtifactPath, artifact.StoragePath, artifact.FileSize)
+ ctx.ServeContent(fd, &context.ServeHeaderOptions{
+ Filename: artifact.ArtifactName,
+ LastModified: artifact.CreatedUnix.AsLocalTime(),
+ })
+}
diff --git a/routers/api/actions/artifacts_chunks.go b/routers/api/actions/artifacts_chunks.go
new file mode 100644
index 0000000..cdb5658
--- /dev/null
+++ b/routers/api/actions/artifacts_chunks.go
@@ -0,0 +1,301 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/storage"
+)
+
+func saveUploadChunkBase(st storage.ObjectStorage, ctx *ArtifactContext,
+ artifact *actions.ActionArtifact,
+ contentSize, runID, start, end, length int64, checkMd5 bool,
+) (int64, error) {
+ // build chunk store path
+ storagePath := fmt.Sprintf("tmp%d/%d-%d-%d-%d.chunk", runID, runID, artifact.ID, start, end)
+ var r io.Reader = ctx.Req.Body
+ var hasher hash.Hash
+ if checkMd5 {
+ // use io.TeeReader to avoid reading all body to md5 sum.
+ // it writes data to hasher after reading end
+ // if hash is not matched, delete the read-end result
+ hasher = md5.New()
+ r = io.TeeReader(r, hasher)
+ }
+ // save chunk to storage
+ writtenSize, err := st.Save(storagePath, r, contentSize)
+ if err != nil {
+ return -1, fmt.Errorf("save chunk to storage error: %v", err)
+ }
+ var checkErr error
+ if checkMd5 {
+ // check md5
+ reqMd5String := ctx.Req.Header.Get(artifactXActionsResultsMD5Header)
+ chunkMd5String := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
+ log.Info("[artifact] check chunk md5, sum: %s, header: %s", chunkMd5String, reqMd5String)
+ // if md5 not match, delete the chunk
+ if reqMd5String != chunkMd5String {
+ checkErr = fmt.Errorf("md5 not match")
+ }
+ }
+ if writtenSize != contentSize {
+ checkErr = errors.Join(checkErr, fmt.Errorf("contentSize not match body size"))
+ }
+ if checkErr != nil {
+ if err := st.Delete(storagePath); err != nil {
+ log.Error("Error deleting chunk: %s, %v", storagePath, err)
+ }
+ return -1, checkErr
+ }
+ log.Info("[artifact] save chunk %s, size: %d, artifact id: %d, start: %d, end: %d",
+ storagePath, contentSize, artifact.ID, start, end)
+ // return chunk total size
+ return length, nil
+}
+
+func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
+ artifact *actions.ActionArtifact,
+ contentSize, runID int64,
+) (int64, error) {
+ // parse content-range header, format: bytes 0-1023/146515
+ contentRange := ctx.Req.Header.Get("Content-Range")
+ start, end, length := int64(0), int64(0), int64(0)
+ if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil {
+ log.Warn("parse content range error: %v, content-range: %s", err, contentRange)
+ return -1, fmt.Errorf("parse content range error: %v", err)
+ }
+ return saveUploadChunkBase(st, ctx, artifact, contentSize, runID, start, end, length, true)
+}
+
+func appendUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
+ artifact *actions.ActionArtifact,
+ start, contentSize, runID int64,
+) (int64, error) {
+ end := start + contentSize - 1
+ return saveUploadChunkBase(st, ctx, artifact, contentSize, runID, start, end, contentSize, false)
+}
+
+type chunkFileItem struct {
+ RunID int64
+ ArtifactID int64
+ Start int64
+ End int64
+ Path string
+}
+
+func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) {
+ storageDir := fmt.Sprintf("tmp%d", runID)
+ var chunks []*chunkFileItem
+ if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
+ baseName := filepath.Base(fpath)
+ // when read chunks from storage, it only contains storage dir and basename,
+ // no matter the subdirectory setting in storage config
+ item := chunkFileItem{Path: storageDir + "/" + baseName}
+ if _, err := fmt.Sscanf(baseName, "%d-%d-%d-%d.chunk", &item.RunID, &item.ArtifactID, &item.Start, &item.End); err != nil {
+ return fmt.Errorf("parse content range error: %v", err)
+ }
+ chunks = append(chunks, &item)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ // chunks group by artifact id
+ chunksMap := make(map[int64][]*chunkFileItem)
+ for _, c := range chunks {
+ chunksMap[c.ArtifactID] = append(chunksMap[c.ArtifactID], c)
+ }
+ return chunksMap, nil
+}
+
+func listChunksByRunIDV4(st storage.ObjectStorage, runID, artifactID int64, blist *BlockList) ([]*chunkFileItem, error) {
+ storageDir := fmt.Sprintf("tmpv4%d", runID)
+ var chunks []*chunkFileItem
+ chunkMap := map[string]*chunkFileItem{}
+ dummy := &chunkFileItem{}
+ for _, name := range blist.Latest {
+ chunkMap[name] = dummy
+ }
+ if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
+ baseName := filepath.Base(fpath)
+ if !strings.HasPrefix(baseName, "block-") {
+ return nil
+ }
+ // when read chunks from storage, it only contains storage dir and basename,
+ // no matter the subdirectory setting in storage config
+ item := chunkFileItem{Path: storageDir + "/" + baseName, ArtifactID: artifactID}
+ var size int64
+ var b64chunkName string
+ if _, err := fmt.Sscanf(baseName, "block-%d-%d-%s", &item.RunID, &size, &b64chunkName); err != nil {
+ return fmt.Errorf("parse content range error: %v", err)
+ }
+ rchunkName, err := base64.URLEncoding.DecodeString(b64chunkName)
+ if err != nil {
+ return fmt.Errorf("failed to parse chunkName: %v", err)
+ }
+ chunkName := string(rchunkName)
+ item.End = item.Start + size - 1
+ if _, ok := chunkMap[chunkName]; ok {
+ chunkMap[chunkName] = &item
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ for i, name := range blist.Latest {
+ chunk, ok := chunkMap[name]
+ if !ok || chunk.Path == "" {
+ return nil, fmt.Errorf("missing Chunk (%d/%d): %s", i, len(blist.Latest), name)
+ }
+ chunks = append(chunks, chunk)
+ if i > 0 {
+ chunk.Start = chunkMap[blist.Latest[i-1]].End + 1
+ chunk.End += chunk.Start
+ }
+ }
+ return chunks, nil
+}
+
+func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error {
+ // read all db artifacts by name
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
+ RunID: runID,
+ ArtifactName: artifactName,
+ })
+ if err != nil {
+ return err
+ }
+ // read all uploading chunks from storage
+ chunksMap, err := listChunksByRunID(st, runID)
+ if err != nil {
+ return err
+ }
+ // range db artifacts to merge chunks
+ for _, art := range artifacts {
+ chunks, ok := chunksMap[art.ID]
+ if !ok {
+ log.Debug("artifact %d chunks not found", art.ID)
+ continue
+ }
+ if err := mergeChunksForArtifact(ctx, chunks, st, art, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st storage.ObjectStorage, artifact *actions.ActionArtifact, checksum string) error {
+ sort.Slice(chunks, func(i, j int) bool {
+ return chunks[i].Start < chunks[j].Start
+ })
+ allChunks := make([]*chunkFileItem, 0)
+ startAt := int64(-1)
+ // check if all chunks are uploaded and in order and clean repeated chunks
+ for _, c := range chunks {
+ // startAt is -1 means this is the first chunk
+ // previous c.ChunkEnd + 1 == c.ChunkStart means this chunk is in order
+ // StartAt is not -1 and c.ChunkStart is not startAt + 1 means there is a chunk missing
+ if c.Start == (startAt + 1) {
+ allChunks = append(allChunks, c)
+ startAt = c.End
+ }
+ }
+ // if the last chunk.End + 1 is not equal to chunk.ChunkLength, means chunks are not uploaded completely
+ if startAt+1 != artifact.FileCompressedSize {
+ log.Debug("[artifact] chunks are not uploaded completely, artifact_id: %d", artifact.ID)
+ return nil
+ }
+ // use multiReader
+ readers := make([]io.Reader, 0, len(allChunks))
+ closeReaders := func() {
+ for _, r := range readers {
+ _ = r.(io.Closer).Close() // it guarantees to be io.Closer by the following loop's Open function
+ }
+ readers = nil
+ }
+ defer closeReaders()
+ for _, c := range allChunks {
+ var readCloser io.ReadCloser
+ var err error
+ if readCloser, err = st.Open(c.Path); err != nil {
+ return fmt.Errorf("open chunk error: %v, %s", err, c.Path)
+ }
+ readers = append(readers, readCloser)
+ }
+ mergedReader := io.MultiReader(readers...)
+ shaPrefix := "sha256:"
+ var hash hash.Hash
+ if strings.HasPrefix(checksum, shaPrefix) {
+ hash = sha256.New()
+ }
+ if hash != nil {
+ mergedReader = io.TeeReader(mergedReader, hash)
+ }
+
+ // if chunk is gzip, use gz as extension
+ // download-artifact action will use content-encoding header to decide if it should decompress the file
+ extension := "chunk"
+ if artifact.ContentEncoding == "gzip" {
+ extension = "chunk.gz"
+ }
+
+ // save merged file
+ storagePath := fmt.Sprintf("%d/%d/%d.%s", artifact.RunID%255, artifact.ID%255, time.Now().UnixNano(), extension)
+ written, err := st.Save(storagePath, mergedReader, artifact.FileCompressedSize)
+ if err != nil {
+ return fmt.Errorf("save merged file error: %v", err)
+ }
+ if written != artifact.FileCompressedSize {
+ return fmt.Errorf("merged file size is not equal to chunk length")
+ }
+
+ defer func() {
+ closeReaders() // close before delete
+ // drop chunks
+ for _, c := range chunks {
+ if err := st.Delete(c.Path); err != nil {
+ log.Warn("Error deleting chunk: %s, %v", c.Path, err)
+ }
+ }
+ }()
+
+ if hash != nil {
+ rawChecksum := hash.Sum(nil)
+ actualChecksum := hex.EncodeToString(rawChecksum)
+ if !strings.HasSuffix(checksum, actualChecksum) {
+ return fmt.Errorf("update artifact error checksum is invalid %v vs %v", checksum, actualChecksum)
+ }
+ }
+
+ // save storage path to artifact
+ log.Debug("[artifact] merge chunks to artifact: %d, %s, old:%s", artifact.ID, storagePath, artifact.StoragePath)
+ // if artifact is already uploaded, delete the old file
+ if artifact.StoragePath != "" {
+ if err := st.Delete(artifact.StoragePath); err != nil {
+ log.Warn("Error deleting old artifact: %s, %v", artifact.StoragePath, err)
+ }
+ }
+
+ artifact.StoragePath = storagePath
+ artifact.Status = int64(actions.ArtifactStatusUploadConfirmed)
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ return fmt.Errorf("update artifact error: %v", err)
+ }
+
+ return nil
+}
diff --git a/routers/api/actions/artifacts_utils.go b/routers/api/actions/artifacts_utils.go
new file mode 100644
index 0000000..db602f1
--- /dev/null
+++ b/routers/api/actions/artifacts_utils.go
@@ -0,0 +1,94 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "crypto/md5"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+const (
+ artifactXTfsFileLengthHeader = "x-tfs-filelength"
+ artifactXActionsResultsMD5Header = "x-actions-results-md5"
+)
+
+// The rules are from https://github.com/actions/toolkit/blob/main/packages/artifact/src/internal/path-and-artifact-name-validation.ts#L32
+var invalidArtifactNameChars = strings.Join([]string{"\\", "/", "\"", ":", "<", ">", "|", "*", "?", "\r", "\n"}, "")
+
+func validateArtifactName(ctx *ArtifactContext, artifactName string) bool {
+ if strings.ContainsAny(artifactName, invalidArtifactNameChars) {
+ log.Error("Error checking artifact name contains invalid character")
+ ctx.Error(http.StatusBadRequest, "Error checking artifact name contains invalid character")
+ return false
+ }
+ return true
+}
+
+func validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) {
+ task := ctx.ActionTask
+ runID := ctx.ParamsInt64("run_id")
+ if task.Job.RunID != runID {
+ log.Error("Error runID not match")
+ ctx.Error(http.StatusBadRequest, "run-id does not match")
+ return nil, 0, false
+ }
+ return task, runID, true
+}
+
+func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask, int64, bool) { //nolint:unparam
+ task := ctx.ActionTask
+ runID, err := strconv.ParseInt(rawRunID, 10, 64)
+ if err != nil || task.Job.RunID != runID {
+ log.Error("Error runID not match")
+ ctx.Error(http.StatusBadRequest, "run-id does not match")
+ return nil, 0, false
+ }
+ return task, runID, true
+}
+
+func validateArtifactHash(ctx *ArtifactContext, artifactName string) bool {
+ paramHash := ctx.Params("artifact_hash")
+ // use artifact name to create upload url
+ artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(artifactName)))
+ if paramHash == artifactHash {
+ return true
+ }
+ log.Warn("Invalid artifact hash: %s", paramHash)
+ ctx.Error(http.StatusBadRequest, "Invalid artifact hash")
+ return false
+}
+
+func parseArtifactItemPath(ctx *ArtifactContext) (string, string, bool) {
+ // itemPath is generated from upload-artifact action
+ // it's formatted as {artifact_name}/{artfict_path_in_runner}
+ // act_runner in host mode on Windows, itemPath is joined by Windows slash '\'
+ itemPath := util.PathJoinRelX(ctx.Req.URL.Query().Get("itemPath"))
+ artifactName := strings.Split(itemPath, "/")[0]
+ artifactPath := strings.TrimPrefix(itemPath, artifactName+"/")
+ if !validateArtifactHash(ctx, artifactName) {
+ return "", "", false
+ }
+ if !validateArtifactName(ctx, artifactName) {
+ return "", "", false
+ }
+ return artifactName, artifactPath, true
+}
+
+// getUploadFileSize returns the size of the file to be uploaded.
+// The raw size is the size of the file as reported by the header X-TFS-FileLength.
+func getUploadFileSize(ctx *ArtifactContext) (int64, int64) {
+ contentLength := ctx.Req.ContentLength
+ xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64)
+ if xTfsLength > 0 {
+ return xTfsLength, contentLength
+ }
+ return contentLength, contentLength
+}
diff --git a/routers/api/actions/artifactsv4.go b/routers/api/actions/artifactsv4.go
new file mode 100644
index 0000000..677e89d
--- /dev/null
+++ b/routers/api/actions/artifactsv4.go
@@ -0,0 +1,599 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+// GitHub Actions Artifacts V4 API Simple Description
+//
+// 1. Upload artifact
+// 1.1. CreateArtifact
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact
+// Request:
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name": "test",
+// "version": 4
+// }
+// Response:
+// {
+// "ok": true,
+// "signedUploadUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75"
+// }
+// 1.2. Upload Zip Content to Blobstorage (unauthenticated request)
+// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
+// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
+// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
+// 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
+// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order
+// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
+// Request
+// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+// <BlockList>
+// <Latest>blockId1</Latest>
+// <Latest>blockId2</Latest>
+// </BlockList>
+// 1.5. FinalizeArtifact
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
+// Request
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name": "test",
+// "size": "2097",
+// "hash": "sha256:b6325614d5649338b87215d9536b3c0477729b8638994c74cdefacb020a2cad4"
+// }
+// Response
+// {
+// "ok": true,
+// "artifactId": "4"
+// }
+// 2. Download artifact
+// 2.1. ListArtifacts and optionally filter by artifact exact name or id
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts
+// Request
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name_filter": "test"
+// }
+// Response
+// {
+// "artifacts": [
+// {
+// "workflowRunBackendId": "21",
+// "workflowJobRunBackendId": "49",
+// "databaseId": "4",
+// "name": "test",
+// "size": "2093",
+// "createdAt": "2024-01-23T00:13:28Z"
+// }
+// ]
+// }
+// 2.2. GetSignedArtifactURL get the URL to download the artifact zip file of a specific artifact
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL
+// Request
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name": "test"
+// }
+// Response
+// {
+// "signedUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76"
+// }
+// 2.3. Download Zip from Blobstorage (unauthenticated request)
+// GET: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ quota_model "code.gitea.io/gitea/models/quota"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/common"
+ "code.gitea.io/gitea/services/context"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ ArtifactV4RouteBase = "/twirp/github.actions.results.api.v1.ArtifactService"
+ ArtifactV4ContentEncoding = "application/zip"
+)
+
+type artifactV4Routes struct {
+ prefix string
+ fs storage.ObjectStorage
+}
+
+func ArtifactV4Contexter() func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
+ base, baseCleanUp := context.NewBaseContext(resp, req)
+ defer baseCleanUp()
+
+ ctx := &ArtifactContext{Base: base}
+ ctx.AppendContextValue(artifactContextKey, ctx)
+
+ next.ServeHTTP(ctx.Resp, ctx.Req)
+ })
+ }
+}
+
+func ArtifactsV4Routes(prefix string) *web.Route {
+ m := web.NewRoute()
+
+ r := artifactV4Routes{
+ prefix: prefix,
+ fs: storage.ActionsArtifacts,
+ }
+
+ m.Group("", func() {
+ m.Post("CreateArtifact", r.createArtifact)
+ m.Post("FinalizeArtifact", r.finalizeArtifact)
+ m.Post("ListArtifacts", r.listArtifacts)
+ m.Post("GetSignedArtifactURL", r.getSignedArtifactURL)
+ m.Post("DeleteArtifact", r.deleteArtifact)
+ }, ArtifactContexter())
+ m.Group("", func() {
+ m.Put("UploadArtifact", r.uploadArtifact)
+ m.Get("DownloadArtifact", r.downloadArtifact)
+ }, ArtifactV4Contexter())
+
+ return m
+}
+
+func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID, artifactID int64) []byte {
+ mac := hmac.New(sha256.New, setting.GetGeneralTokenSigningSecret())
+ mac.Write([]byte(endp))
+ mac.Write([]byte(expires))
+ mac.Write([]byte(artifactName))
+ mac.Write([]byte(fmt.Sprint(taskID)))
+ mac.Write([]byte(fmt.Sprint(artifactID)))
+ return mac.Sum(nil)
+}
+
+func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID, artifactID int64) string {
+ expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST")
+ uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") +
+ "/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID, artifactID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID) + "&artifactID=" + fmt.Sprint(artifactID)
+ return uploadURL
+}
+
+func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*actions.ActionTask, string, bool) {
+ rawTaskID := ctx.Req.URL.Query().Get("taskID")
+ rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
+ sig := ctx.Req.URL.Query().Get("sig")
+ expires := ctx.Req.URL.Query().Get("expires")
+ artifactName := ctx.Req.URL.Query().Get("artifactName")
+ dsig, _ := base64.URLEncoding.DecodeString(sig)
+ taskID, _ := strconv.ParseInt(rawTaskID, 10, 64)
+ artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
+
+ expecedsig := r.buildSignature(endp, expires, artifactName, taskID, artifactID)
+ if !hmac.Equal(dsig, expecedsig) {
+ log.Error("Error unauthorized")
+ ctx.Error(http.StatusUnauthorized, "Error unauthorized")
+ return nil, "", false
+ }
+ t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", expires)
+ if err != nil || t.Before(time.Now()) {
+ log.Error("Error link expired")
+ ctx.Error(http.StatusUnauthorized, "Error link expired")
+ return nil, "", false
+ }
+ task, err := actions.GetTaskByID(ctx, taskID)
+ if err != nil {
+ log.Error("Error runner api getting task by ID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task by ID")
+ return nil, "", false
+ }
+ if task.Status != actions.StatusRunning {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return nil, "", false
+ }
+ if err := task.LoadJob(ctx); err != nil {
+ log.Error("Error runner api getting job: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting job")
+ return nil, "", false
+ }
+ return task, artifactName, true
+}
+
+func (r *artifactV4Routes) getArtifactByName(ctx *ArtifactContext, runID int64, name string) (*actions.ActionArtifact, error) {
+ var art actions.ActionArtifact
+ has, err := db.GetEngine(ctx).Where("run_id = ? AND artifact_name = ? AND artifact_path = ? AND content_encoding = ?", runID, name, name+".zip", ArtifactV4ContentEncoding).Get(&art)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, util.ErrNotExist
+ }
+ return &art, nil
+}
+
+func (r *artifactV4Routes) parseProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) bool {
+ body, err := io.ReadAll(ctx.Req.Body)
+ if err != nil {
+ log.Error("Error decode request body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error decode request body")
+ return false
+ }
+ err = protojson.Unmarshal(body, req)
+ if err != nil {
+ log.Error("Error decode request body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error decode request body")
+ return false
+ }
+ return true
+}
+
+func (r *artifactV4Routes) sendProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) {
+ resp, err := protojson.Marshal(req)
+ if err != nil {
+ log.Error("Error encode response body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error encode response body")
+ return
+ }
+ ctx.Resp.Header().Set("Content-Type", "application/json;charset=utf-8")
+ ctx.Resp.WriteHeader(http.StatusOK)
+ _, _ = ctx.Resp.Write(resp)
+}
+
+func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
+ var req CreateArtifactRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, _, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ artifactName := req.Name
+
+ rententionDays := setting.Actions.ArtifactRetentionDays
+ if req.ExpiresAt != nil {
+ rententionDays = int64(time.Until(req.ExpiresAt.AsTime()).Hours() / 24)
+ }
+ // create or get artifact with name and path
+ artifact, err := actions.CreateArtifact(ctx, ctx.ActionTask, artifactName, artifactName+".zip", rententionDays)
+ if err != nil {
+ log.Error("Error create or get artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
+ return
+ }
+ artifact.ContentEncoding = ArtifactV4ContentEncoding
+ artifact.FileSize = 0
+ artifact.FileCompressedSize = 0
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ log.Error("Error UpdateArtifactByID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
+ return
+ }
+
+ respData := CreateArtifactResponse{
+ Ok: true,
+ SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID),
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
+ task, artifactName, ok := r.verifySignature(ctx, "UploadArtifact")
+ if !ok {
+ return
+ }
+
+ // check the owner's quota
+ ok, err := quota_model.EvaluateForUser(ctx, task.OwnerID, quota_model.LimitSubjectSizeAssetsArtifacts)
+ if err != nil {
+ log.Error("quota_model.EvaluateForUser: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error checking quota")
+ return
+ }
+ if !ok {
+ ctx.Error(http.StatusRequestEntityTooLarge, "Quota exceeded")
+ return
+ }
+
+ comp := ctx.Req.URL.Query().Get("comp")
+ switch comp {
+ case "block", "appendBlock":
+ blockid := ctx.Req.URL.Query().Get("blockid")
+ if blockid == "" {
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ _, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
+ if err != nil {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ artifact.FileCompressedSize += ctx.Req.ContentLength
+ artifact.FileSize += ctx.Req.ContentLength
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ log.Error("Error UpdateArtifactByID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
+ return
+ }
+ } else {
+ _, err := r.fs.Save(fmt.Sprintf("tmpv4%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, base64.URLEncoding.EncodeToString([]byte(blockid))), ctx.Req.Body, -1)
+ if err != nil {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ }
+ ctx.JSON(http.StatusCreated, "appended")
+ case "blocklist":
+ rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
+ artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
+ _, err := r.fs.Save(fmt.Sprintf("tmpv4%d/%d-%d-blocklist", task.Job.RunID, task.Job.RunID, artifactID), ctx.Req.Body, -1)
+ if err != nil {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ ctx.JSON(http.StatusCreated, "created")
+ }
+}
+
+type BlockList struct {
+ Latest []string `xml:"Latest"`
+}
+
+type Latest struct {
+ Value string `xml:",chardata"`
+}
+
+func (r *artifactV4Routes) readBlockList(runID, artifactID int64) (*BlockList, error) {
+ blockListName := fmt.Sprintf("tmpv4%d/%d-%d-blocklist", runID, runID, artifactID)
+ s, err := r.fs.Open(blockListName)
+ if err != nil {
+ return nil, err
+ }
+
+ xdec := xml.NewDecoder(s)
+ blockList := &BlockList{}
+ err = xdec.Decode(blockList)
+
+ delerr := r.fs.Delete(blockListName)
+ if delerr != nil {
+ log.Warn("Failed to delete blockList %s: %v", blockListName, delerr)
+ }
+ return blockList, err
+}
+
+func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
+ var req FinalizeArtifactRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, runID, req.Name)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ var chunks []*chunkFileItem
+ blockList, err := r.readBlockList(runID, artifact.ID)
+ if err != nil {
+ log.Warn("Failed to read BlockList, fallback to old behavior: %v", err)
+ chunkMap, err := listChunksByRunID(r.fs, runID)
+ if err != nil {
+ log.Error("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ chunks, ok = chunkMap[artifact.ID]
+ if !ok {
+ log.Error("Error merge chunks")
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ } else {
+ chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList)
+ if err != nil {
+ log.Error("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ artifact.FileSize = chunks[len(chunks)-1].End + 1
+ artifact.FileCompressedSize = chunks[len(chunks)-1].End + 1
+ }
+
+ checksum := ""
+ if req.Hash != nil {
+ checksum = req.Hash.Value
+ }
+ if err := mergeChunksForArtifact(ctx, chunks, r.fs, artifact, checksum); err != nil {
+ log.Warn("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+
+ respData := FinalizeArtifactResponse{
+ Ok: true,
+ ArtifactId: artifact.ID,
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) listArtifacts(ctx *ArtifactContext) {
+ var req ListArtifactsRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{RunID: runID})
+ if err != nil {
+ log.Error("Error getting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if len(artifacts) == 0 {
+ log.Debug("[artifact] handleListArtifacts, no artifacts")
+ ctx.Error(http.StatusNotFound)
+ return
+ }
+
+ list := []*ListArtifactsResponse_MonolithArtifact{}
+
+ table := map[string]*ListArtifactsResponse_MonolithArtifact{}
+ for _, artifact := range artifacts {
+ if _, ok := table[artifact.ArtifactName]; ok || req.IdFilter != nil && artifact.ID != req.IdFilter.Value || req.NameFilter != nil && artifact.ArtifactName != req.NameFilter.Value || artifact.ArtifactName+".zip" != artifact.ArtifactPath || artifact.ContentEncoding != ArtifactV4ContentEncoding {
+ table[artifact.ArtifactName] = nil
+ continue
+ }
+
+ table[artifact.ArtifactName] = &ListArtifactsResponse_MonolithArtifact{
+ Name: artifact.ArtifactName,
+ CreatedAt: timestamppb.New(artifact.CreatedUnix.AsTime()),
+ DatabaseId: artifact.ID,
+ WorkflowRunBackendId: req.WorkflowRunBackendId,
+ WorkflowJobRunBackendId: req.WorkflowJobRunBackendId,
+ Size: artifact.FileSize,
+ }
+ }
+ for _, artifact := range table {
+ if artifact != nil {
+ list = append(list, artifact)
+ }
+ }
+
+ respData := ListArtifactsResponse{
+ Artifacts: list,
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
+ var req GetSignedArtifactURLRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ artifactName := req.Name
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, runID, artifactName)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ respData := GetSignedArtifactURLResponse{}
+
+ if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
+ u, err := storage.ActionsArtifacts.URL(artifact.StoragePath, artifact.ArtifactPath)
+ if u != nil && err == nil {
+ respData.SignedUrl = u.String()
+ }
+ }
+ if respData.SignedUrl == "" {
+ respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID)
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) downloadArtifact(ctx *ArtifactContext) {
+ task, artifactName, ok := r.verifySignature(ctx, "DownloadArtifact")
+ if !ok {
+ return
+ }
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ file, err := r.fs.Open(artifact.StoragePath)
+ if err != nil {
+ log.Error("Error artifact could not be opened: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+
+ common.ServeContentByReadSeeker(ctx.Base, artifactName, util.ToPointer(artifact.UpdatedUnix.AsTime()), file)
+}
+
+func (r *artifactV4Routes) deleteArtifact(ctx *ArtifactContext) {
+ var req DeleteArtifactRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, runID, req.Name)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ err = actions.SetArtifactNeedDelete(ctx, runID, req.Name)
+ if err != nil {
+ log.Error("Error deleting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+
+ respData := DeleteArtifactResponse{
+ Ok: true,
+ ArtifactId: artifact.ID,
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
diff --git a/routers/api/actions/ping/ping.go b/routers/api/actions/ping/ping.go
new file mode 100644
index 0000000..13985c9
--- /dev/null
+++ b/routers/api/actions/ping/ping.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package ping
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/modules/log"
+
+ pingv1 "code.gitea.io/actions-proto-go/ping/v1"
+ "code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
+ "connectrpc.com/connect"
+)
+
+func NewPingServiceHandler() (string, http.Handler) {
+ return pingv1connect.NewPingServiceHandler(&Service{})
+}
+
+var _ pingv1connect.PingServiceHandler = (*Service)(nil)
+
+type Service struct {
+ pingv1connect.UnimplementedPingServiceHandler
+}
+
+func (s *Service) Ping(
+ ctx context.Context,
+ req *connect.Request[pingv1.PingRequest],
+) (*connect.Response[pingv1.PingResponse], error) {
+ log.Trace("Content-Type: %s", req.Header().Get("Content-Type"))
+ log.Trace("User-Agent: %s", req.Header().Get("User-Agent"))
+ res := connect.NewResponse(&pingv1.PingResponse{
+ Data: fmt.Sprintf("Hello, %s!", req.Msg.Data),
+ })
+ return res, nil
+}
diff --git a/routers/api/actions/ping/ping_test.go b/routers/api/actions/ping/ping_test.go
new file mode 100644
index 0000000..098b003
--- /dev/null
+++ b/routers/api/actions/ping/ping_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package ping
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ pingv1 "code.gitea.io/actions-proto-go/ping/v1"
+ "code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
+ "connectrpc.com/connect"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestService(t *testing.T) {
+ mux := http.NewServeMux()
+ mux.Handle(pingv1connect.NewPingServiceHandler(
+ &Service{},
+ ))
+ MainServiceTest(t, mux)
+}
+
+func MainServiceTest(t *testing.T, h http.Handler) {
+ t.Parallel()
+ server := httptest.NewUnstartedServer(h)
+ server.EnableHTTP2 = true
+ server.StartTLS()
+ defer server.Close()
+
+ connectClient := pingv1connect.NewPingServiceClient(
+ server.Client(),
+ server.URL,
+ )
+
+ grpcClient := pingv1connect.NewPingServiceClient(
+ server.Client(),
+ server.URL,
+ connect.WithGRPC(),
+ )
+
+ grpcWebClient := pingv1connect.NewPingServiceClient(
+ server.Client(),
+ server.URL,
+ connect.WithGRPCWeb(),
+ )
+
+ clients := []pingv1connect.PingServiceClient{connectClient, grpcClient, grpcWebClient}
+ t.Run("ping request", func(t *testing.T) {
+ for _, client := range clients {
+ result, err := client.Ping(context.Background(), connect.NewRequest(&pingv1.PingRequest{
+ Data: "foobar",
+ }))
+ require.NoError(t, err)
+ assert.Equal(t, "Hello, foobar!", result.Msg.Data)
+ }
+ })
+}
diff --git a/routers/api/actions/runner/interceptor.go b/routers/api/actions/runner/interceptor.go
new file mode 100644
index 0000000..521ba91
--- /dev/null
+++ b/routers/api/actions/runner/interceptor.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package runner
+
+import (
+ "context"
+ "crypto/subtle"
+ "errors"
+ "strings"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "connectrpc.com/connect"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+const (
+ uuidHeaderKey = "x-runner-uuid"
+ tokenHeaderKey = "x-runner-token"
+)
+
+var withRunner = connect.WithInterceptors(connect.UnaryInterceptorFunc(func(unaryFunc connect.UnaryFunc) connect.UnaryFunc {
+ return func(ctx context.Context, request connect.AnyRequest) (connect.AnyResponse, error) {
+ methodName := getMethodName(request)
+ if methodName == "Register" {
+ return unaryFunc(ctx, request)
+ }
+ uuid := request.Header().Get(uuidHeaderKey)
+ token := request.Header().Get(tokenHeaderKey)
+
+ runner, err := actions_model.GetRunnerByUUID(ctx, uuid)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ return nil, status.Error(codes.Unauthenticated, "unregistered runner")
+ }
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ if subtle.ConstantTimeCompare([]byte(runner.TokenHash), []byte(auth_model.HashToken(token, runner.TokenSalt))) != 1 {
+ return nil, status.Error(codes.Unauthenticated, "unregistered runner")
+ }
+
+ cols := []string{"last_online"}
+ runner.LastOnline = timeutil.TimeStampNow()
+ if methodName == "UpdateTask" || methodName == "UpdateLog" {
+ runner.LastActive = timeutil.TimeStampNow()
+ cols = append(cols, "last_active")
+ }
+ if err := actions_model.UpdateRunner(ctx, runner, cols...); err != nil {
+ log.Error("can't update runner status: %v", err)
+ }
+
+ ctx = context.WithValue(ctx, runnerCtxKey{}, runner)
+ return unaryFunc(ctx, request)
+ }
+}))
+
+func getMethodName(req connect.AnyRequest) string {
+ splits := strings.Split(req.Spec().Procedure, "/")
+ if len(splits) > 0 {
+ return splits[len(splits)-1]
+ }
+ return ""
+}
+
+type runnerCtxKey struct{}
+
+func GetRunner(ctx context.Context) *actions_model.ActionRunner {
+ if v := ctx.Value(runnerCtxKey{}); v != nil {
+ if r, ok := v.(*actions_model.ActionRunner); ok {
+ return r
+ }
+ }
+ return nil
+}
diff --git a/routers/api/actions/runner/runner.go b/routers/api/actions/runner/runner.go
new file mode 100644
index 0000000..017bdf6
--- /dev/null
+++ b/routers/api/actions/runner/runner.go
@@ -0,0 +1,289 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package runner
+
+import (
+ "context"
+ "errors"
+ "net/http"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/actions"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+ actions_service "code.gitea.io/gitea/services/actions"
+
+ runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
+ "code.gitea.io/actions-proto-go/runner/v1/runnerv1connect"
+ "connectrpc.com/connect"
+ gouuid "github.com/google/uuid"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func NewRunnerServiceHandler() (string, http.Handler) {
+ return runnerv1connect.NewRunnerServiceHandler(
+ &Service{},
+ connect.WithCompressMinBytes(1024),
+ withRunner,
+ )
+}
+
+var _ runnerv1connect.RunnerServiceClient = (*Service)(nil)
+
+type Service struct {
+ runnerv1connect.UnimplementedRunnerServiceHandler
+}
+
+// Register for new runner.
+func (s *Service) Register(
+ ctx context.Context,
+ req *connect.Request[runnerv1.RegisterRequest],
+) (*connect.Response[runnerv1.RegisterResponse], error) {
+ if req.Msg.Token == "" || req.Msg.Name == "" {
+ return nil, errors.New("missing runner token, name")
+ }
+
+ runnerToken, err := actions_model.GetRunnerToken(ctx, req.Msg.Token)
+ if err != nil {
+ return nil, errors.New("runner registration token not found")
+ }
+
+ if !runnerToken.IsActive {
+ return nil, errors.New("runner registration token has been invalidated, please use the latest one")
+ }
+
+ if runnerToken.OwnerID > 0 {
+ if _, err := user_model.GetUserByID(ctx, runnerToken.OwnerID); err != nil {
+ return nil, errors.New("owner of the token not found")
+ }
+ }
+
+ if runnerToken.RepoID > 0 {
+ if _, err := repo_model.GetRepositoryByID(ctx, runnerToken.RepoID); err != nil {
+ return nil, errors.New("repository of the token not found")
+ }
+ }
+
+ labels := req.Msg.Labels
+
+ // create new runner
+ name, _ := util.SplitStringAtByteN(req.Msg.Name, 255)
+ runner := &actions_model.ActionRunner{
+ UUID: gouuid.New().String(),
+ Name: name,
+ OwnerID: runnerToken.OwnerID,
+ RepoID: runnerToken.RepoID,
+ Version: req.Msg.Version,
+ AgentLabels: labels,
+ }
+ if err := runner.GenerateToken(); err != nil {
+ return nil, errors.New("can't generate token")
+ }
+
+ // create new runner
+ if err := actions_model.CreateRunner(ctx, runner); err != nil {
+ return nil, errors.New("can't create new runner")
+ }
+
+ // update token status
+ runnerToken.IsActive = true
+ if err := actions_model.UpdateRunnerToken(ctx, runnerToken, "is_active"); err != nil {
+ return nil, errors.New("can't update runner token status")
+ }
+
+ res := connect.NewResponse(&runnerv1.RegisterResponse{
+ Runner: &runnerv1.Runner{
+ Id: runner.ID,
+ Uuid: runner.UUID,
+ Token: runner.Token,
+ Name: runner.Name,
+ Version: runner.Version,
+ Labels: runner.AgentLabels,
+ },
+ })
+
+ return res, nil
+}
+
+func (s *Service) Declare(
+ ctx context.Context,
+ req *connect.Request[runnerv1.DeclareRequest],
+) (*connect.Response[runnerv1.DeclareResponse], error) {
+ runner := GetRunner(ctx)
+ runner.AgentLabels = req.Msg.Labels
+ runner.Version = req.Msg.Version
+ if err := actions_model.UpdateRunner(ctx, runner, "agent_labels", "version"); err != nil {
+ return nil, status.Errorf(codes.Internal, "update runner: %v", err)
+ }
+
+ return connect.NewResponse(&runnerv1.DeclareResponse{
+ Runner: &runnerv1.Runner{
+ Id: runner.ID,
+ Uuid: runner.UUID,
+ Token: runner.Token,
+ Name: runner.Name,
+ Version: runner.Version,
+ Labels: runner.AgentLabels,
+ },
+ }), nil
+}
+
+// FetchTask assigns a task to the runner
+func (s *Service) FetchTask(
+ ctx context.Context,
+ req *connect.Request[runnerv1.FetchTaskRequest],
+) (*connect.Response[runnerv1.FetchTaskResponse], error) {
+ runner := GetRunner(ctx)
+
+ var task *runnerv1.Task
+ tasksVersion := req.Msg.TasksVersion // task version from runner
+ latestVersion, err := actions_model.GetTasksVersionByScope(ctx, runner.OwnerID, runner.RepoID)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "query tasks version failed: %v", err)
+ } else if latestVersion == 0 {
+ if err := actions_model.IncreaseTaskVersion(ctx, runner.OwnerID, runner.RepoID); err != nil {
+ return nil, status.Errorf(codes.Internal, "fail to increase task version: %v", err)
+ }
+ // if we don't increase the value of `latestVersion` here,
+ // the response of FetchTask will return tasksVersion as zero.
+ // and the runner will treat it as an old version of Gitea.
+ latestVersion++
+ }
+
+ if tasksVersion != latestVersion {
+ // if the task version in request is not equal to the version in db,
+ // it means there may still be some tasks not be assigned.
+ // try to pick a task for the runner that send the request.
+ if t, ok, err := pickTask(ctx, runner); err != nil {
+ log.Error("pick task failed: %v", err)
+ return nil, status.Errorf(codes.Internal, "pick task: %v", err)
+ } else if ok {
+ task = t
+ }
+ }
+ res := connect.NewResponse(&runnerv1.FetchTaskResponse{
+ Task: task,
+ TasksVersion: latestVersion,
+ })
+ return res, nil
+}
+
+// UpdateTask updates the task status.
+func (s *Service) UpdateTask(
+ ctx context.Context,
+ req *connect.Request[runnerv1.UpdateTaskRequest],
+) (*connect.Response[runnerv1.UpdateTaskResponse], error) {
+ task, err := actions_model.UpdateTaskByState(ctx, req.Msg.State)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "update task: %v", err)
+ }
+
+ for k, v := range req.Msg.Outputs {
+ if len(k) > 255 {
+ log.Warn("Ignore the output of task %d because the key is too long: %q", task.ID, k)
+ continue
+ }
+ // The value can be a maximum of 1 MB
+ if l := len(v); l > 1024*1024 {
+ log.Warn("Ignore the output %q of task %d because the value is too long: %v", k, task.ID, l)
+ continue
+ }
+ // There's another limitation on GitHub that the total of all outputs in a workflow run can be a maximum of 50 MB.
+ // We don't check the total size here because it's not easy to do, and it doesn't really worth it.
+ // See https://docs.github.com/en/actions/using-jobs/defining-outputs-for-jobs
+
+ if err := actions_model.InsertTaskOutputIfNotExist(ctx, task.ID, k, v); err != nil {
+ log.Warn("Failed to insert the output %q of task %d: %v", k, task.ID, err)
+ // It's ok not to return errors, the runner will resend the outputs.
+ }
+ }
+ sentOutputs, err := actions_model.FindTaskOutputKeyByTaskID(ctx, task.ID)
+ if err != nil {
+ log.Warn("Failed to find the sent outputs of task %d: %v", task.ID, err)
+ // It's not to return errors, it can be handled when the runner resends sent outputs.
+ }
+
+ if err := task.LoadJob(ctx); err != nil {
+ return nil, status.Errorf(codes.Internal, "load job: %v", err)
+ }
+ if err := task.Job.LoadRun(ctx); err != nil {
+ return nil, status.Errorf(codes.Internal, "load run: %v", err)
+ }
+
+ // don't create commit status for cron job
+ if task.Job.Run.ScheduleID == 0 {
+ actions_service.CreateCommitStatus(ctx, task.Job)
+ }
+
+ if req.Msg.State.Result != runnerv1.Result_RESULT_UNSPECIFIED {
+ if err := actions_service.EmitJobsIfReady(task.Job.RunID); err != nil {
+ log.Error("Emit ready jobs of run %d: %v", task.Job.RunID, err)
+ }
+ }
+
+ return connect.NewResponse(&runnerv1.UpdateTaskResponse{
+ State: &runnerv1.TaskState{
+ Id: req.Msg.State.Id,
+ Result: task.Status.AsResult(),
+ },
+ SentOutputs: sentOutputs,
+ }), nil
+}
+
+// UpdateLog uploads log of the task.
+func (s *Service) UpdateLog(
+ ctx context.Context,
+ req *connect.Request[runnerv1.UpdateLogRequest],
+) (*connect.Response[runnerv1.UpdateLogResponse], error) {
+ res := connect.NewResponse(&runnerv1.UpdateLogResponse{})
+
+ task, err := actions_model.GetTaskByID(ctx, req.Msg.TaskId)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "get task: %v", err)
+ }
+ ack := task.LogLength
+
+ if len(req.Msg.Rows) == 0 || req.Msg.Index > ack || int64(len(req.Msg.Rows))+req.Msg.Index <= ack {
+ res.Msg.AckIndex = ack
+ return res, nil
+ }
+
+ if task.LogInStorage {
+ return nil, status.Errorf(codes.AlreadyExists, "log file has been archived")
+ }
+
+ rows := req.Msg.Rows[ack-req.Msg.Index:]
+ ns, err := actions.WriteLogs(ctx, task.LogFilename, task.LogSize, rows)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "write logs: %v", err)
+ }
+ task.LogLength += int64(len(rows))
+ for _, n := range ns {
+ task.LogIndexes = append(task.LogIndexes, task.LogSize)
+ task.LogSize += int64(n)
+ }
+
+ res.Msg.AckIndex = task.LogLength
+
+ var remove func()
+ if req.Msg.NoMore {
+ task.LogInStorage = true
+ remove, err = actions.TransferLogs(ctx, task.LogFilename)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "transfer logs: %v", err)
+ }
+ }
+
+ if err := actions_model.UpdateTask(ctx, task, "log_indexes", "log_length", "log_size", "log_in_storage"); err != nil {
+ return nil, status.Errorf(codes.Internal, "update task: %v", err)
+ }
+ if remove != nil {
+ remove()
+ }
+
+ return res, nil
+}
diff --git a/routers/api/actions/runner/utils.go b/routers/api/actions/runner/utils.go
new file mode 100644
index 0000000..ff6ec5b
--- /dev/null
+++ b/routers/api/actions/runner/utils.go
@@ -0,0 +1,189 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ secret_model "code.gitea.io/gitea/models/secret"
+ actions_module "code.gitea.io/gitea/modules/actions"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/services/actions"
+
+ runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
+ "google.golang.org/protobuf/types/known/structpb"
+)
+
+func pickTask(ctx context.Context, runner *actions_model.ActionRunner) (*runnerv1.Task, bool, error) {
+ t, ok, err := actions_model.CreateTaskForRunner(ctx, runner)
+ if err != nil {
+ return nil, false, fmt.Errorf("CreateTaskForRunner: %w", err)
+ }
+ if !ok {
+ return nil, false, nil
+ }
+
+ secrets, err := secret_model.GetSecretsOfTask(ctx, t)
+ if err != nil {
+ return nil, false, fmt.Errorf("GetSecretsOfTask: %w", err)
+ }
+
+ vars, err := actions_model.GetVariablesOfRun(ctx, t.Job.Run)
+ if err != nil {
+ return nil, false, fmt.Errorf("GetVariablesOfRun: %w", err)
+ }
+
+ actions.CreateCommitStatus(ctx, t.Job)
+
+ task := &runnerv1.Task{
+ Id: t.ID,
+ WorkflowPayload: t.Job.WorkflowPayload,
+ Context: generateTaskContext(t),
+ Secrets: secrets,
+ Vars: vars,
+ }
+
+ if needs, err := findTaskNeeds(ctx, t); err != nil {
+ log.Error("Cannot find needs for task %v: %v", t.ID, err)
+ // Go on with empty needs.
+ // If return error, the task will be wild, which means the runner will never get it when it has been assigned to the runner.
+ // In contrast, missing needs is less serious.
+ // And the task will fail and the runner will report the error in the logs.
+ } else {
+ task.Needs = needs
+ }
+
+ return task, true, nil
+}
+
+func generateTaskContext(t *actions_model.ActionTask) *structpb.Struct {
+ event := map[string]any{}
+ _ = json.Unmarshal([]byte(t.Job.Run.EventPayload), &event)
+
+ // TriggerEvent is added in https://github.com/go-gitea/gitea/pull/25229
+ // This fallback is for the old ActionRun that doesn't have the TriggerEvent field
+ // and should be removed in 1.22
+ eventName := t.Job.Run.TriggerEvent
+ if eventName == "" {
+ eventName = t.Job.Run.Event.Event()
+ }
+
+ baseRef := ""
+ headRef := ""
+ ref := t.Job.Run.Ref
+ sha := t.Job.Run.CommitSHA
+ if pullPayload, err := t.Job.Run.GetPullRequestEventPayload(); err == nil && pullPayload.PullRequest != nil && pullPayload.PullRequest.Base != nil && pullPayload.PullRequest.Head != nil {
+ baseRef = pullPayload.PullRequest.Base.Ref
+ headRef = pullPayload.PullRequest.Head.Ref
+
+ // if the TriggerEvent is pull_request_target, ref and sha need to be set according to the base of pull request
+ // In GitHub's documentation, ref should be the branch or tag that triggered workflow. But when the TriggerEvent is pull_request_target,
+ // the ref will be the base branch.
+ if t.Job.Run.TriggerEvent == actions_module.GithubEventPullRequestTarget {
+ ref = git.BranchPrefix + pullPayload.PullRequest.Base.Name
+ sha = pullPayload.PullRequest.Base.Sha
+ }
+ }
+
+ refName := git.RefName(ref)
+
+ giteaRuntimeToken, err := actions.CreateAuthorizationToken(t.ID, t.Job.RunID, t.JobID)
+ if err != nil {
+ log.Error("actions.CreateAuthorizationToken failed: %v", err)
+ }
+
+ taskContext, err := structpb.NewStruct(map[string]any{
+ // standard contexts, see https://docs.github.com/en/actions/learn-github-actions/contexts#github-context
+ "action": "", // string, The name of the action currently running, or the id of a step. GitHub removes special characters, and uses the name __run when the current step runs a script without an id. If you use the same action more than once in the same job, the name will include a suffix with the sequence number with underscore before it. For example, the first script you run will have the name __run, and the second script will be named __run_2. Similarly, the second invocation of actions/checkout will be actionscheckout2.
+ "action_path": "", // string, The path where an action is located. This property is only supported in composite actions. You can use this path to access files located in the same repository as the action.
+ "action_ref": "", // string, For a step executing an action, this is the ref of the action being executed. For example, v2.
+ "action_repository": "", // string, For a step executing an action, this is the owner and repository name of the action. For example, actions/checkout.
+ "action_status": "", // string, For a composite action, the current result of the composite action.
+ "actor": t.Job.Run.TriggerUser.Name, // string, The username of the user that triggered the initial workflow run. If the workflow run is a re-run, this value may differ from github.triggering_actor. Any workflow re-runs will use the privileges of github.actor, even if the actor initiating the re-run (github.triggering_actor) has different privileges.
+ "api_url": setting.AppURL + "api/v1", // string, The URL of the GitHub REST API.
+ "base_ref": baseRef, // string, The base_ref or target branch of the pull request in a workflow run. This property is only available when the event that triggers a workflow run is either pull_request or pull_request_target.
+ "env": "", // string, Path on the runner to the file that sets environment variables from workflow commands. This file is unique to the current step and is a different file for each step in a job. For more information, see "Workflow commands for GitHub Actions."
+ "event": event, // object, The full event webhook payload. You can access individual properties of the event using this context. This object is identical to the webhook payload of the event that triggered the workflow run, and is different for each event. The webhooks for each GitHub Actions event is linked in "Events that trigger workflows." For example, for a workflow run triggered by the push event, this object contains the contents of the push webhook payload.
+ "event_name": eventName, // string, The name of the event that triggered the workflow run.
+ "event_path": "", // string, The path to the file on the runner that contains the full event webhook payload.
+ "graphql_url": "", // string, The URL of the GitHub GraphQL API.
+ "head_ref": headRef, // string, The head_ref or source branch of the pull request in a workflow run. This property is only available when the event that triggers a workflow run is either pull_request or pull_request_target.
+ "job": fmt.Sprint(t.JobID), // string, The job_id of the current job.
+ "ref": ref, // string, The fully-formed ref of the branch or tag that triggered the workflow run. For workflows triggered by push, this is the branch or tag ref that was pushed. For workflows triggered by pull_request, this is the pull request merge branch. For workflows triggered by release, this is the release tag created. For other triggers, this is the branch or tag ref that triggered the workflow run. This is only set if a branch or tag is available for the event type. The ref given is fully-formed, meaning that for branches the format is refs/heads/<branch_name>, for pull requests it is refs/pull/<pr_number>/merge, and for tags it is refs/tags/<tag_name>. For example, refs/heads/feature-branch-1.
+ "ref_name": refName.ShortName(), // string, The short ref name of the branch or tag that triggered the workflow run. This value matches the branch or tag name shown on GitHub. For example, feature-branch-1.
+ "ref_protected": false, // boolean, true if branch protections are configured for the ref that triggered the workflow run.
+ "ref_type": refName.RefType(), // string, The type of ref that triggered the workflow run. Valid values are branch or tag.
+ "path": "", // string, Path on the runner to the file that sets system PATH variables from workflow commands. This file is unique to the current step and is a different file for each step in a job. For more information, see "Workflow commands for GitHub Actions."
+ "repository": t.Job.Run.Repo.OwnerName + "/" + t.Job.Run.Repo.Name, // string, The owner and repository name. For example, Codertocat/Hello-World.
+ "repository_owner": t.Job.Run.Repo.OwnerName, // string, The repository owner's name. For example, Codertocat.
+ "repositoryUrl": t.Job.Run.Repo.HTMLURL(), // string, The Git URL to the repository. For example, git://github.com/codertocat/hello-world.git.
+ "retention_days": "", // string, The number of days that workflow run logs and artifacts are kept.
+ "run_id": fmt.Sprint(t.Job.RunID), // string, A unique number for each workflow run within a repository. This number does not change if you re-run the workflow run.
+ "run_number": fmt.Sprint(t.Job.Run.Index), // string, A unique number for each run of a particular workflow in a repository. This number begins at 1 for the workflow's first run, and increments with each new run. This number does not change if you re-run the workflow run.
+ "run_attempt": fmt.Sprint(t.Job.Attempt), // string, A unique number for each attempt of a particular workflow run in a repository. This number begins at 1 for the workflow run's first attempt, and increments with each re-run.
+ "secret_source": "Actions", // string, The source of a secret used in a workflow. Possible values are None, Actions, Dependabot, or Codespaces.
+ "server_url": setting.AppURL, // string, The URL of the GitHub server. For example: https://github.com.
+ "sha": sha, // string, The commit SHA that triggered the workflow. The value of this commit SHA depends on the event that triggered the workflow. For more information, see "Events that trigger workflows." For example, ffac537e6cbbf934b08745a378932722df287a53.
+ "token": t.Token, // string, A token to authenticate on behalf of the GitHub App installed on your repository. This is functionally equivalent to the GITHUB_TOKEN secret. For more information, see "Automatic token authentication."
+ "triggering_actor": "", // string, The username of the user that initiated the workflow run. If the workflow run is a re-run, this value may differ from github.actor. Any workflow re-runs will use the privileges of github.actor, even if the actor initiating the re-run (github.triggering_actor) has different privileges.
+ "workflow": t.Job.Run.WorkflowID, // string, The name of the workflow. If the workflow file doesn't specify a name, the value of this property is the full path of the workflow file in the repository.
+ "workspace": "", // string, The default working directory on the runner for steps, and the default location of your repository when using the checkout action.
+
+ // additional contexts
+ "gitea_default_actions_url": setting.Actions.DefaultActionsURL.URL(),
+ "gitea_runtime_token": giteaRuntimeToken,
+ })
+ if err != nil {
+ log.Error("structpb.NewStruct failed: %v", err)
+ }
+
+ return taskContext
+}
+
+func findTaskNeeds(ctx context.Context, task *actions_model.ActionTask) (map[string]*runnerv1.TaskNeed, error) {
+ if err := task.LoadAttributes(ctx); err != nil {
+ return nil, fmt.Errorf("LoadAttributes: %w", err)
+ }
+ if len(task.Job.Needs) == 0 {
+ return nil, nil
+ }
+ needs := container.SetOf(task.Job.Needs...)
+
+ jobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: task.Job.RunID})
+ if err != nil {
+ return nil, fmt.Errorf("FindRunJobs: %w", err)
+ }
+
+ ret := make(map[string]*runnerv1.TaskNeed, len(needs))
+ for _, job := range jobs {
+ if !needs.Contains(job.JobID) {
+ continue
+ }
+ if job.TaskID == 0 || !job.Status.IsDone() {
+ // it shouldn't happen, or the job has been rerun
+ continue
+ }
+ outputs := make(map[string]string)
+ got, err := actions_model.FindTaskOutputByTaskID(ctx, job.TaskID)
+ if err != nil {
+ return nil, fmt.Errorf("FindTaskOutputByTaskID: %w", err)
+ }
+ for _, v := range got {
+ outputs[v.OutputKey] = v.OutputValue
+ }
+ ret[job.JobID] = &runnerv1.TaskNeed{
+ Outputs: outputs,
+ Result: runnerv1.Result(job.Status),
+ }
+ }
+
+ return ret, nil
+}