summaryrefslogtreecommitdiffstats
path: root/routers/api
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-12-12 23:57:56 +0100
commite68b9d00a6e05b3a941f63ffb696f91e554ac5ec (patch)
tree97775d6c13b0f416af55314eb6a89ef792474615 /routers/api
parentInitial commit. (diff)
downloadforgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.tar.xz
forgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.zip
Adding upstream version 9.0.3.
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to '')
-rw-r--r--routers/api/actions/actions.go24
-rw-r--r--routers/api/actions/artifact.pb.go1058
-rw-r--r--routers/api/actions/artifact.proto73
-rw-r--r--routers/api/actions/artifacts.go507
-rw-r--r--routers/api/actions/artifacts_chunks.go301
-rw-r--r--routers/api/actions/artifacts_utils.go94
-rw-r--r--routers/api/actions/artifactsv4.go599
-rw-r--r--routers/api/actions/ping/ping.go38
-rw-r--r--routers/api/actions/ping/ping_test.go61
-rw-r--r--routers/api/actions/runner/interceptor.go80
-rw-r--r--routers/api/actions/runner/runner.go289
-rw-r--r--routers/api/actions/runner/utils.go189
-rw-r--r--routers/api/forgejo/v1/api.go20
-rw-r--r--routers/api/forgejo/v1/forgejo.go23
-rw-r--r--routers/api/forgejo/v1/generated.go167
-rw-r--r--routers/api/forgejo/v1/root.go14
-rw-r--r--routers/api/packages/README.md50
-rw-r--r--routers/api/packages/alpine/alpine.go287
-rw-r--r--routers/api/packages/api.go916
-rw-r--r--routers/api/packages/arch/arch.go274
-rw-r--r--routers/api/packages/cargo/cargo.go311
-rw-r--r--routers/api/packages/chef/auth.go274
-rw-r--r--routers/api/packages/chef/chef.go403
-rw-r--r--routers/api/packages/composer/api.go117
-rw-r--r--routers/api/packages/composer/composer.go261
-rw-r--r--routers/api/packages/conan/auth.go48
-rw-r--r--routers/api/packages/conan/conan.go807
-rw-r--r--routers/api/packages/conan/search.go163
-rw-r--r--routers/api/packages/conda/conda.go303
-rw-r--r--routers/api/packages/container/auth.go49
-rw-r--r--routers/api/packages/container/blob.go202
-rw-r--r--routers/api/packages/container/container.go785
-rw-r--r--routers/api/packages/container/errors.go52
-rw-r--r--routers/api/packages/container/manifest.go483
-rw-r--r--routers/api/packages/cran/cran.go264
-rw-r--r--routers/api/packages/debian/debian.go309
-rw-r--r--routers/api/packages/generic/generic.go212
-rw-r--r--routers/api/packages/generic/generic_test.go65
-rw-r--r--routers/api/packages/goproxy/goproxy.go224
-rw-r--r--routers/api/packages/helm/helm.go217
-rw-r--r--routers/api/packages/helper/helper.go63
-rw-r--r--routers/api/packages/maven/api.go50
-rw-r--r--routers/api/packages/maven/maven.go433
-rw-r--r--routers/api/packages/npm/api.go114
-rw-r--r--routers/api/packages/npm/npm.go462
-rw-r--r--routers/api/packages/nuget/api_v2.go402
-rw-r--r--routers/api/packages/nuget/api_v3.go255
-rw-r--r--routers/api/packages/nuget/auth.go47
-rw-r--r--routers/api/packages/nuget/links.go52
-rw-r--r--routers/api/packages/nuget/nuget.go710
-rw-r--r--routers/api/packages/pub/pub.go284
-rw-r--r--routers/api/packages/pypi/pypi.go194
-rw-r--r--routers/api/packages/pypi/pypi_test.go38
-rw-r--r--routers/api/packages/rpm/rpm.go318
-rw-r--r--routers/api/packages/rubygems/rubygems.go451
-rw-r--r--routers/api/packages/swift/swift.go465
-rw-r--r--routers/api/packages/vagrant/vagrant.go242
-rw-r--r--routers/api/shared/middleware.go152
-rw-r--r--routers/api/v1/activitypub/actor.go83
-rw-r--r--routers/api/v1/activitypub/person.go106
-rw-r--r--routers/api/v1/activitypub/repository.go80
-rw-r--r--routers/api/v1/activitypub/repository_test.go27
-rw-r--r--routers/api/v1/activitypub/reqsignature.go99
-rw-r--r--routers/api/v1/activitypub/response.go35
-rw-r--r--routers/api/v1/admin/adopt.go180
-rw-r--r--routers/api/v1/admin/cron.go86
-rw-r--r--routers/api/v1/admin/email.go87
-rw-r--r--routers/api/v1/admin/hooks.go176
-rw-r--r--routers/api/v1/admin/org.go123
-rw-r--r--routers/api/v1/admin/quota.go53
-rw-r--r--routers/api/v1/admin/quota_group.go436
-rw-r--r--routers/api/v1/admin/quota_rule.go219
-rw-r--r--routers/api/v1/admin/repo.go49
-rw-r--r--routers/api/v1/admin/runners.go26
-rw-r--r--routers/api/v1/admin/user.go509
-rw-r--r--routers/api/v1/api.go1659
-rw-r--r--routers/api/v1/misc/gitignore.go56
-rw-r--r--routers/api/v1/misc/label_templates.go60
-rw-r--r--routers/api/v1/misc/licenses.go76
-rw-r--r--routers/api/v1/misc/markup.go110
-rw-r--r--routers/api/v1/misc/markup_test.go184
-rw-r--r--routers/api/v1/misc/nodeinfo.go80
-rw-r--r--routers/api/v1/misc/signing.go63
-rw-r--r--routers/api/v1/misc/version.go25
-rw-r--r--routers/api/v1/notify/notifications.go77
-rw-r--r--routers/api/v1/notify/repo.go227
-rw-r--r--routers/api/v1/notify/threads.go118
-rw-r--r--routers/api/v1/notify/user.go175
-rw-r--r--routers/api/v1/org/action.go473
-rw-r--r--routers/api/v1/org/avatar.go80
-rw-r--r--routers/api/v1/org/hook.go189
-rw-r--r--routers/api/v1/org/label.go258
-rw-r--r--routers/api/v1/org/member.go325
-rw-r--r--routers/api/v1/org/org.go559
-rw-r--r--routers/api/v1/org/quota.go155
-rw-r--r--routers/api/v1/org/team.go891
-rw-r--r--routers/api/v1/packages/package.go215
-rw-r--r--routers/api/v1/repo/action.go653
-rw-r--r--routers/api/v1/repo/avatar.go88
-rw-r--r--routers/api/v1/repo/blob.go55
-rw-r--r--routers/api/v1/repo/branch.go1019
-rw-r--r--routers/api/v1/repo/collaborators.go370
-rw-r--r--routers/api/v1/repo/commits.go376
-rw-r--r--routers/api/v1/repo/compare.go99
-rw-r--r--routers/api/v1/repo/file.go1014
-rw-r--r--routers/api/v1/repo/flags.go245
-rw-r--r--routers/api/v1/repo/fork.go167
-rw-r--r--routers/api/v1/repo/git_hook.go196
-rw-r--r--routers/api/v1/repo/git_ref.go107
-rw-r--r--routers/api/v1/repo/hook.go308
-rw-r--r--routers/api/v1/repo/hook_test.go33
-rw-r--r--routers/api/v1/repo/issue.go1041
-rw-r--r--routers/api/v1/repo/issue_attachment.go411
-rw-r--r--routers/api/v1/repo/issue_comment.go691
-rw-r--r--routers/api/v1/repo/issue_comment_attachment.go400
-rw-r--r--routers/api/v1/repo/issue_dependency.go613
-rw-r--r--routers/api/v1/repo/issue_label.go385
-rw-r--r--routers/api/v1/repo/issue_pin.go309
-rw-r--r--routers/api/v1/repo/issue_reaction.go424
-rw-r--r--routers/api/v1/repo/issue_stopwatch.go245
-rw-r--r--routers/api/v1/repo/issue_subscription.go294
-rw-r--r--routers/api/v1/repo/issue_tracked_time.go637
-rw-r--r--routers/api/v1/repo/key.go292
-rw-r--r--routers/api/v1/repo/label.go285
-rw-r--r--routers/api/v1/repo/language.go81
-rw-r--r--routers/api/v1/repo/main_test.go21
-rw-r--r--routers/api/v1/repo/migrate.go281
-rw-r--r--routers/api/v1/repo/milestone.go309
-rw-r--r--routers/api/v1/repo/mirror.go449
-rw-r--r--routers/api/v1/repo/notes.go104
-rw-r--r--routers/api/v1/repo/patch.go114
-rw-r--r--routers/api/v1/repo/pull.go1648
-rw-r--r--routers/api/v1/repo/pull_review.go1107
-rw-r--r--routers/api/v1/repo/release.go424
-rw-r--r--routers/api/v1/repo/release_attachment.go467
-rw-r--r--routers/api/v1/repo/release_tags.go125
-rw-r--r--routers/api/v1/repo/repo.go1338
-rw-r--r--routers/api/v1/repo/repo_test.go86
-rw-r--r--routers/api/v1/repo/star.go60
-rw-r--r--routers/api/v1/repo/status.go282
-rw-r--r--routers/api/v1/repo/subscriber.go60
-rw-r--r--routers/api/v1/repo/tag.go668
-rw-r--r--routers/api/v1/repo/teams.go235
-rw-r--r--routers/api/v1/repo/topic.go305
-rw-r--r--routers/api/v1/repo/transfer.go254
-rw-r--r--routers/api/v1/repo/tree.go70
-rw-r--r--routers/api/v1/repo/wiki.go536
-rw-r--r--routers/api/v1/settings/settings.go86
-rw-r--r--routers/api/v1/shared/quota.go102
-rw-r--r--routers/api/v1/shared/runners.go32
-rw-r--r--routers/api/v1/swagger/action.go34
-rw-r--r--routers/api/v1/swagger/activity.go15
-rw-r--r--routers/api/v1/swagger/activitypub.go15
-rw-r--r--routers/api/v1/swagger/app.go22
-rw-r--r--routers/api/v1/swagger/cron.go15
-rw-r--r--routers/api/v1/swagger/issue.go127
-rw-r--r--routers/api/v1/swagger/key.go50
-rw-r--r--routers/api/v1/swagger/misc.go71
-rw-r--r--routers/api/v1/swagger/nodeinfo.go15
-rw-r--r--routers/api/v1/swagger/notify.go29
-rw-r--r--routers/api/v1/swagger/options.go234
-rw-r--r--routers/api/v1/swagger/org.go43
-rw-r--r--routers/api/v1/swagger/package.go29
-rw-r--r--routers/api/v1/swagger/quota.go64
-rw-r--r--routers/api/v1/swagger/repo.go450
-rw-r--r--routers/api/v1/swagger/settings.go34
-rw-r--r--routers/api/v1/swagger/user.go50
-rw-r--r--routers/api/v1/user/action.go381
-rw-r--r--routers/api/v1/user/app.go434
-rw-r--r--routers/api/v1/user/avatar.go73
-rw-r--r--routers/api/v1/user/email.go144
-rw-r--r--routers/api/v1/user/follower.go281
-rw-r--r--routers/api/v1/user/gpg_key.go333
-rw-r--r--routers/api/v1/user/helper.go35
-rw-r--r--routers/api/v1/user/hook.go179
-rw-r--r--routers/api/v1/user/key.go317
-rw-r--r--routers/api/v1/user/quota.go128
-rw-r--r--routers/api/v1/user/repo.go190
-rw-r--r--routers/api/v1/user/runners.go30
-rw-r--r--routers/api/v1/user/settings.go75
-rw-r--r--routers/api/v1/user/star.go213
-rw-r--r--routers/api/v1/user/user.go322
-rw-r--r--routers/api/v1/user/watch.go215
-rw-r--r--routers/api/v1/utils/block.go65
-rw-r--r--routers/api/v1/utils/git.go99
-rw-r--r--routers/api/v1/utils/hook.go419
-rw-r--r--routers/api/v1/utils/page.go18
187 files changed, 49023 insertions, 0 deletions
diff --git a/routers/api/actions/actions.go b/routers/api/actions/actions.go
new file mode 100644
index 0000000..a418b3a
--- /dev/null
+++ b/routers/api/actions/actions.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/actions/ping"
+ "code.gitea.io/gitea/routers/api/actions/runner"
+)
+
+func Routes(prefix string) *web.Route {
+ m := web.NewRoute()
+
+ path, handler := ping.NewPingServiceHandler()
+ m.Post(path+"*", http.StripPrefix(prefix, handler).ServeHTTP)
+
+ path, handler = runner.NewRunnerServiceHandler()
+ m.Post(path+"*", http.StripPrefix(prefix, handler).ServeHTTP)
+
+ return m
+}
diff --git a/routers/api/actions/artifact.pb.go b/routers/api/actions/artifact.pb.go
new file mode 100644
index 0000000..590eda9
--- /dev/null
+++ b/routers/api/actions/artifact.pb.go
@@ -0,0 +1,1058 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
+// source: artifact.proto
+
+package actions
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CreateArtifactRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"`
+ Version int32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *CreateArtifactRequest) Reset() {
+ *x = CreateArtifactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateArtifactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateArtifactRequest) ProtoMessage() {}
+
+func (x *CreateArtifactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateArtifactRequest.ProtoReflect.Descriptor instead.
+func (*CreateArtifactRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateArtifactRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *CreateArtifactRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *CreateArtifactRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateArtifactRequest) GetExpiresAt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpiresAt
+ }
+ return nil
+}
+
+func (x *CreateArtifactRequest) GetVersion() int32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+type CreateArtifactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+ SignedUploadUrl string `protobuf:"bytes,2,opt,name=signed_upload_url,json=signedUploadUrl,proto3" json:"signed_upload_url,omitempty"`
+}
+
+func (x *CreateArtifactResponse) Reset() {
+ *x = CreateArtifactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateArtifactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateArtifactResponse) ProtoMessage() {}
+
+func (x *CreateArtifactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateArtifactResponse.ProtoReflect.Descriptor instead.
+func (*CreateArtifactResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CreateArtifactResponse) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+func (x *CreateArtifactResponse) GetSignedUploadUrl() string {
+ if x != nil {
+ return x.SignedUploadUrl
+ }
+ return ""
+}
+
+type FinalizeArtifactRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"`
+ Hash *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=hash,proto3" json:"hash,omitempty"`
+}
+
+func (x *FinalizeArtifactRequest) Reset() {
+ *x = FinalizeArtifactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeArtifactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeArtifactRequest) ProtoMessage() {}
+
+func (x *FinalizeArtifactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeArtifactRequest.ProtoReflect.Descriptor instead.
+func (*FinalizeArtifactRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FinalizeArtifactRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *FinalizeArtifactRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *FinalizeArtifactRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *FinalizeArtifactRequest) GetSize() int64 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *FinalizeArtifactRequest) GetHash() *wrapperspb.StringValue {
+ if x != nil {
+ return x.Hash
+ }
+ return nil
+}
+
+type FinalizeArtifactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+ ArtifactId int64 `protobuf:"varint,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+}
+
+func (x *FinalizeArtifactResponse) Reset() {
+ *x = FinalizeArtifactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeArtifactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeArtifactResponse) ProtoMessage() {}
+
+func (x *FinalizeArtifactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeArtifactResponse.ProtoReflect.Descriptor instead.
+func (*FinalizeArtifactResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *FinalizeArtifactResponse) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+func (x *FinalizeArtifactResponse) GetArtifactId() int64 {
+ if x != nil {
+ return x.ArtifactId
+ }
+ return 0
+}
+
+type ListArtifactsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ NameFilter *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=name_filter,json=nameFilter,proto3" json:"name_filter,omitempty"`
+ IdFilter *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=id_filter,json=idFilter,proto3" json:"id_filter,omitempty"`
+}
+
+func (x *ListArtifactsRequest) Reset() {
+ *x = ListArtifactsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListArtifactsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListArtifactsRequest) ProtoMessage() {}
+
+func (x *ListArtifactsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListArtifactsRequest.ProtoReflect.Descriptor instead.
+func (*ListArtifactsRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListArtifactsRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsRequest) GetNameFilter() *wrapperspb.StringValue {
+ if x != nil {
+ return x.NameFilter
+ }
+ return nil
+}
+
+func (x *ListArtifactsRequest) GetIdFilter() *wrapperspb.Int64Value {
+ if x != nil {
+ return x.IdFilter
+ }
+ return nil
+}
+
+type ListArtifactsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Artifacts []*ListArtifactsResponse_MonolithArtifact `protobuf:"bytes,1,rep,name=artifacts,proto3" json:"artifacts,omitempty"`
+}
+
+func (x *ListArtifactsResponse) Reset() {
+ *x = ListArtifactsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListArtifactsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListArtifactsResponse) ProtoMessage() {}
+
+func (x *ListArtifactsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListArtifactsResponse.ProtoReflect.Descriptor instead.
+func (*ListArtifactsResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListArtifactsResponse) GetArtifacts() []*ListArtifactsResponse_MonolithArtifact {
+ if x != nil {
+ return x.Artifacts
+ }
+ return nil
+}
+
+type ListArtifactsResponse_MonolithArtifact struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ DatabaseId int64 `protobuf:"varint,3,opt,name=database_id,json=databaseId,proto3" json:"database_id,omitempty"`
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"`
+ CreatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) Reset() {
+ *x = ListArtifactsResponse_MonolithArtifact{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListArtifactsResponse_MonolithArtifact) ProtoMessage() {}
+
+func (x *ListArtifactsResponse_MonolithArtifact) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListArtifactsResponse_MonolithArtifact.ProtoReflect.Descriptor instead.
+func (*ListArtifactsResponse_MonolithArtifact) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetDatabaseId() int64 {
+ if x != nil {
+ return x.DatabaseId
+ }
+ return 0
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetSize() int64 {
+ if x != nil {
+ return x.Size
+ }
+ return 0
+}
+
+func (x *ListArtifactsResponse_MonolithArtifact) GetCreatedAt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return nil
+}
+
+type GetSignedArtifactURLRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetSignedArtifactURLRequest) Reset() {
+ *x = GetSignedArtifactURLRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSignedArtifactURLRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSignedArtifactURLRequest) ProtoMessage() {}
+
+func (x *GetSignedArtifactURLRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSignedArtifactURLRequest.ProtoReflect.Descriptor instead.
+func (*GetSignedArtifactURLRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *GetSignedArtifactURLRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *GetSignedArtifactURLRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *GetSignedArtifactURLRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type GetSignedArtifactURLResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SignedUrl string `protobuf:"bytes,1,opt,name=signed_url,json=signedUrl,proto3" json:"signed_url,omitempty"`
+}
+
+func (x *GetSignedArtifactURLResponse) Reset() {
+ *x = GetSignedArtifactURLResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSignedArtifactURLResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSignedArtifactURLResponse) ProtoMessage() {}
+
+func (x *GetSignedArtifactURLResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSignedArtifactURLResponse.ProtoReflect.Descriptor instead.
+func (*GetSignedArtifactURLResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *GetSignedArtifactURLResponse) GetSignedUrl() string {
+ if x != nil {
+ return x.SignedUrl
+ }
+ return ""
+}
+
+type DeleteArtifactRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"`
+ WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"`
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteArtifactRequest) Reset() {
+ *x = DeleteArtifactRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteArtifactRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteArtifactRequest) ProtoMessage() {}
+
+func (x *DeleteArtifactRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteArtifactRequest.ProtoReflect.Descriptor instead.
+func (*DeleteArtifactRequest) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *DeleteArtifactRequest) GetWorkflowRunBackendId() string {
+ if x != nil {
+ return x.WorkflowRunBackendId
+ }
+ return ""
+}
+
+func (x *DeleteArtifactRequest) GetWorkflowJobRunBackendId() string {
+ if x != nil {
+ return x.WorkflowJobRunBackendId
+ }
+ return ""
+}
+
+func (x *DeleteArtifactRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type DeleteArtifactResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+ ArtifactId int64 `protobuf:"varint,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+}
+
+func (x *DeleteArtifactResponse) Reset() {
+ *x = DeleteArtifactResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_artifact_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteArtifactResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteArtifactResponse) ProtoMessage() {}
+
+func (x *DeleteArtifactResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_artifact_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteArtifactResponse.ProtoReflect.Descriptor instead.
+func (*DeleteArtifactResponse) Descriptor() ([]byte, []int) {
+ return file_artifact_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *DeleteArtifactResponse) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+func (x *DeleteArtifactResponse) GetArtifactId() int64 {
+ if x != nil {
+ return x.ArtifactId
+ }
+ return 0
+}
+
+var File_artifact_proto protoreflect.FileDescriptor
+
+var file_artifact_proto_rawDesc = []byte{
+ 0x0a, 0x0e, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x1d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xf5, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66,
+ 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f,
+ 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49,
+ 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f,
+ 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
+ 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18,
+ 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02,
+ 0x6f, 0x6b, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73,
+ 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x72, 0x6c, 0x22, 0xe8,
+ 0x01, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66,
+ 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f,
+ 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49,
+ 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f,
+ 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
+ 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0x4b, 0x0a, 0x18, 0x46, 0x69, 0x6e,
+ 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63,
+ 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69,
+ 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, 0x84, 0x02, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x41,
+ 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f,
+ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x09, 0x69, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x08, 0x69, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x7c, 0x0a,
+ 0x15, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61,
+ 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72,
+ 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f,
+ 0x4d, 0x6f, 0x6e, 0x6f, 0x6c, 0x69, 0x74, 0x68, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74,
+ 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x26,
+ 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x4d, 0x6f, 0x6e, 0x6f, 0x6c, 0x69, 0x74, 0x68, 0x41, 0x72,
+ 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
+ 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a,
+ 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75,
+ 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52,
+ 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04,
+ 0x73, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f,
+ 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22,
+ 0xa6, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x72, 0x74,
+ 0x69, 0x66, 0x61, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f,
+ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65,
+ 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53,
+ 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x55, 0x52, 0x4c,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e,
+ 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75,
+ 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42,
+ 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b,
+ 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77,
+ 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x49, 0x0a, 0x16, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74,
+ 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66,
+ 0x61, 0x63, 0x74, 0x49, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_artifact_proto_rawDescOnce sync.Once
+ file_artifact_proto_rawDescData = file_artifact_proto_rawDesc
+)
+
+func file_artifact_proto_rawDescGZIP() []byte {
+ file_artifact_proto_rawDescOnce.Do(func() {
+ file_artifact_proto_rawDescData = protoimpl.X.CompressGZIP(file_artifact_proto_rawDescData)
+ })
+ return file_artifact_proto_rawDescData
+}
+
+var (
+ file_artifact_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
+ file_artifact_proto_goTypes = []interface{}{
+ (*CreateArtifactRequest)(nil), // 0: github.actions.results.api.v1.CreateArtifactRequest
+ (*CreateArtifactResponse)(nil), // 1: github.actions.results.api.v1.CreateArtifactResponse
+ (*FinalizeArtifactRequest)(nil), // 2: github.actions.results.api.v1.FinalizeArtifactRequest
+ (*FinalizeArtifactResponse)(nil), // 3: github.actions.results.api.v1.FinalizeArtifactResponse
+ (*ListArtifactsRequest)(nil), // 4: github.actions.results.api.v1.ListArtifactsRequest
+ (*ListArtifactsResponse)(nil), // 5: github.actions.results.api.v1.ListArtifactsResponse
+ (*ListArtifactsResponse_MonolithArtifact)(nil), // 6: github.actions.results.api.v1.ListArtifactsResponse_MonolithArtifact
+ (*GetSignedArtifactURLRequest)(nil), // 7: github.actions.results.api.v1.GetSignedArtifactURLRequest
+ (*GetSignedArtifactURLResponse)(nil), // 8: github.actions.results.api.v1.GetSignedArtifactURLResponse
+ (*DeleteArtifactRequest)(nil), // 9: github.actions.results.api.v1.DeleteArtifactRequest
+ (*DeleteArtifactResponse)(nil), // 10: github.actions.results.api.v1.DeleteArtifactResponse
+ (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+ (*wrapperspb.StringValue)(nil), // 12: google.protobuf.StringValue
+ (*wrapperspb.Int64Value)(nil), // 13: google.protobuf.Int64Value
+ }
+)
+
+var file_artifact_proto_depIdxs = []int32{
+ 11, // 0: github.actions.results.api.v1.CreateArtifactRequest.expires_at:type_name -> google.protobuf.Timestamp
+ 12, // 1: github.actions.results.api.v1.FinalizeArtifactRequest.hash:type_name -> google.protobuf.StringValue
+ 12, // 2: github.actions.results.api.v1.ListArtifactsRequest.name_filter:type_name -> google.protobuf.StringValue
+ 13, // 3: github.actions.results.api.v1.ListArtifactsRequest.id_filter:type_name -> google.protobuf.Int64Value
+ 6, // 4: github.actions.results.api.v1.ListArtifactsResponse.artifacts:type_name -> github.actions.results.api.v1.ListArtifactsResponse_MonolithArtifact
+ 11, // 5: github.actions.results.api.v1.ListArtifactsResponse_MonolithArtifact.created_at:type_name -> google.protobuf.Timestamp
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_artifact_proto_init() }
+func file_artifact_proto_init() {
+ if File_artifact_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_artifact_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateArtifactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CreateArtifactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeArtifactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeArtifactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListArtifactsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListArtifactsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListArtifactsResponse_MonolithArtifact); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSignedArtifactURLRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSignedArtifactURLResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteArtifactRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_artifact_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteArtifactResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_artifact_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 11,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_artifact_proto_goTypes,
+ DependencyIndexes: file_artifact_proto_depIdxs,
+ MessageInfos: file_artifact_proto_msgTypes,
+ }.Build()
+ File_artifact_proto = out.File
+ file_artifact_proto_rawDesc = nil
+ file_artifact_proto_goTypes = nil
+ file_artifact_proto_depIdxs = nil
+}
diff --git a/routers/api/actions/artifact.proto b/routers/api/actions/artifact.proto
new file mode 100644
index 0000000..c68e5d0
--- /dev/null
+++ b/routers/api/actions/artifact.proto
@@ -0,0 +1,73 @@
+syntax = "proto3";
+
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+
+package github.actions.results.api.v1;
+
+message CreateArtifactRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+ google.protobuf.Timestamp expires_at = 4;
+ int32 version = 5;
+}
+
+message CreateArtifactResponse {
+ bool ok = 1;
+ string signed_upload_url = 2;
+}
+
+message FinalizeArtifactRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+ int64 size = 4;
+ google.protobuf.StringValue hash = 5;
+}
+
+message FinalizeArtifactResponse {
+ bool ok = 1;
+ int64 artifact_id = 2;
+}
+
+message ListArtifactsRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ google.protobuf.StringValue name_filter = 3;
+ google.protobuf.Int64Value id_filter = 4;
+}
+
+message ListArtifactsResponse {
+ repeated ListArtifactsResponse_MonolithArtifact artifacts = 1;
+}
+
+message ListArtifactsResponse_MonolithArtifact {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ int64 database_id = 3;
+ string name = 4;
+ int64 size = 5;
+ google.protobuf.Timestamp created_at = 6;
+}
+
+message GetSignedArtifactURLRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+}
+
+message GetSignedArtifactURLResponse {
+ string signed_url = 1;
+}
+
+message DeleteArtifactRequest {
+ string workflow_run_backend_id = 1;
+ string workflow_job_run_backend_id = 2;
+ string name = 3;
+}
+
+message DeleteArtifactResponse {
+ bool ok = 1;
+ int64 artifact_id = 2;
+}
diff --git a/routers/api/actions/artifacts.go b/routers/api/actions/artifacts.go
new file mode 100644
index 0000000..bc29e44
--- /dev/null
+++ b/routers/api/actions/artifacts.go
@@ -0,0 +1,507 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+// GitHub Actions Artifacts API Simple Description
+//
+// 1. Upload artifact
+// 1.1. Post upload url
+// Post: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
+// Request:
+// {
+// "Type": "actions_storage",
+// "Name": "artifact"
+// }
+// Response:
+// {
+// "fileContainerResourceUrl":"/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload"
+// }
+// it acquires an upload url for artifact upload
+// 1.2. Upload artifact
+// PUT: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
+// it upload chunk with headers:
+// x-tfs-filelength: 1024 // total file length
+// content-length: 1024 // chunk length
+// x-actions-results-md5: md5sum // md5sum of chunk
+// content-range: bytes 0-1023/1024 // chunk range
+// we save all chunks to one storage directory after md5sum check
+// 1.3. Confirm upload
+// PATCH: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
+// it confirm upload and merge all chunks to one file, save this file to storage
+//
+// 2. Download artifact
+// 2.1 list artifacts
+// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
+// Response:
+// {
+// "count": 1,
+// "value": [
+// {
+// "name": "artifact",
+// "fileContainerResourceUrl": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path"
+// }
+// ]
+// }
+// 2.2 download artifact
+// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path?api-version=6.0-preview
+// Response:
+// {
+// "value": [
+// {
+// "contentLocation": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download",
+// "path": "artifact/filename",
+// "itemType": "file"
+// }
+// ]
+// }
+// 2.3 download artifact file
+// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download?itemPath=artifact%2Ffilename
+// Response:
+// download file
+//
+
+import (
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ quota_model "code.gitea.io/gitea/models/quota"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ web_types "code.gitea.io/gitea/modules/web/types"
+ actions_service "code.gitea.io/gitea/services/actions"
+ "code.gitea.io/gitea/services/context"
+)
+
+const artifactRouteBase = "/_apis/pipelines/workflows/{run_id}/artifacts"
+
+type artifactContextKeyType struct{}
+
+var artifactContextKey = artifactContextKeyType{}
+
+type ArtifactContext struct {
+ *context.Base
+
+ ActionTask *actions.ActionTask
+}
+
+func init() {
+ web.RegisterResponseStatusProvider[*ArtifactContext](func(req *http.Request) web_types.ResponseStatusProvider {
+ return req.Context().Value(artifactContextKey).(*ArtifactContext)
+ })
+}
+
+func ArtifactsRoutes(prefix string) *web.Route {
+ m := web.NewRoute()
+ m.Use(ArtifactContexter())
+
+ r := artifactRoutes{
+ prefix: prefix,
+ fs: storage.ActionsArtifacts,
+ }
+
+ m.Group(artifactRouteBase, func() {
+ // retrieve, list and confirm artifacts
+ m.Combo("").Get(r.listArtifacts).Post(r.getUploadArtifactURL).Patch(r.comfirmUploadArtifact)
+ // handle container artifacts list and download
+ m.Put("/{artifact_hash}/upload", r.uploadArtifact)
+ // handle artifacts download
+ m.Get("/{artifact_hash}/download_url", r.getDownloadArtifactURL)
+ m.Get("/{artifact_id}/download", r.downloadArtifact)
+ })
+
+ return m
+}
+
+func ArtifactContexter() func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
+ base, baseCleanUp := context.NewBaseContext(resp, req)
+ defer baseCleanUp()
+
+ ctx := &ArtifactContext{Base: base}
+ ctx.AppendContextValue(artifactContextKey, ctx)
+
+ // action task call server api with Bearer ACTIONS_RUNTIME_TOKEN
+ // we should verify the ACTIONS_RUNTIME_TOKEN
+ authHeader := req.Header.Get("Authorization")
+ if len(authHeader) == 0 || !strings.HasPrefix(authHeader, "Bearer ") {
+ ctx.Error(http.StatusUnauthorized, "Bad authorization header")
+ return
+ }
+
+ // New act_runner uses jwt to authenticate
+ tID, err := actions_service.ParseAuthorizationToken(req)
+
+ var task *actions.ActionTask
+ if err == nil {
+ task, err = actions.GetTaskByID(req.Context(), tID)
+ if err != nil {
+ log.Error("Error runner api getting task by ID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task by ID")
+ return
+ }
+ if task.Status != actions.StatusRunning {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ } else {
+ // Old act_runner uses GITEA_TOKEN to authenticate
+ authToken := strings.TrimPrefix(authHeader, "Bearer ")
+
+ task, err = actions.GetRunningTaskByToken(req.Context(), authToken)
+ if err != nil {
+ log.Error("Error runner api getting task: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task")
+ return
+ }
+ }
+
+ if err := task.LoadJob(req.Context()); err != nil {
+ log.Error("Error runner api getting job: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting job")
+ return
+ }
+
+ ctx.ActionTask = task
+ next.ServeHTTP(ctx.Resp, ctx.Req)
+ })
+ }
+}
+
+type artifactRoutes struct {
+ prefix string
+ fs storage.ObjectStorage
+}
+
+func (ar artifactRoutes) buildArtifactURL(runID int64, artifactHash, suffix string) string {
+ uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(ar.prefix, "/") +
+ strings.ReplaceAll(artifactRouteBase, "{run_id}", strconv.FormatInt(runID, 10)) +
+ "/" + artifactHash + "/" + suffix
+ return uploadURL
+}
+
+type getUploadArtifactRequest struct {
+ Type string
+ Name string
+ RetentionDays int64
+}
+
+type getUploadArtifactResponse struct {
+ FileContainerResourceURL string `json:"fileContainerResourceUrl"`
+}
+
+// getUploadArtifactURL generates a URL for uploading an artifact
+func (ar artifactRoutes) getUploadArtifactURL(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ var req getUploadArtifactRequest
+ if err := json.NewDecoder(ctx.Req.Body).Decode(&req); err != nil {
+ log.Error("Error decode request body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error decode request body")
+ return
+ }
+
+ // set retention days
+ retentionQuery := ""
+ if req.RetentionDays > 0 {
+ retentionQuery = fmt.Sprintf("?retentionDays=%d", req.RetentionDays)
+ }
+
+ // use md5(artifact_name) to create upload url
+ artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(req.Name)))
+ resp := getUploadArtifactResponse{
+ FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "upload"+retentionQuery),
+ }
+ log.Debug("[artifact] get upload url: %s", resp.FileContainerResourceURL)
+ ctx.JSON(http.StatusOK, resp)
+}
+
+func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
+ task, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+ artifactName, artifactPath, ok := parseArtifactItemPath(ctx)
+ if !ok {
+ return
+ }
+
+ // check the owner's quota
+ ok, err := quota_model.EvaluateForUser(ctx, ctx.ActionTask.OwnerID, quota_model.LimitSubjectSizeAssetsArtifacts)
+ if err != nil {
+ log.Error("quota_model.EvaluateForUser: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error checking quota")
+ return
+ }
+ if !ok {
+ ctx.Error(http.StatusRequestEntityTooLarge, "Quota exceeded")
+ return
+ }
+
+ // get upload file size
+ fileRealTotalSize, contentLength := getUploadFileSize(ctx)
+
+ // get artifact retention days
+ expiredDays := setting.Actions.ArtifactRetentionDays
+ if queryRetentionDays := ctx.Req.URL.Query().Get("retentionDays"); queryRetentionDays != "" {
+ var err error
+ expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64)
+ if err != nil {
+ log.Error("Error parse retention days: %v", err)
+ ctx.Error(http.StatusBadRequest, "Error parse retention days")
+ return
+ }
+ }
+ log.Debug("[artifact] upload chunk, name: %s, path: %s, size: %d, retention days: %d",
+ artifactName, artifactPath, fileRealTotalSize, expiredDays)
+
+ // create or get artifact with name and path
+ artifact, err := actions.CreateArtifact(ctx, task, artifactName, artifactPath, expiredDays)
+ if err != nil {
+ log.Error("Error create or get artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
+ return
+ }
+
+ // save chunk to storage, if success, return chunk stotal size
+ // if artifact is not gzip when uploading, chunksTotalSize == fileRealTotalSize
+ // if artifact is gzip when uploading, chunksTotalSize < fileRealTotalSize
+ chunksTotalSize, err := saveUploadChunk(ar.fs, ctx, artifact, contentLength, runID)
+ if err != nil {
+ log.Error("Error save upload chunk: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error save upload chunk")
+ return
+ }
+
+ // update artifact size if zero or not match, over write artifact size
+ if artifact.FileSize == 0 ||
+ artifact.FileCompressedSize == 0 ||
+ artifact.FileSize != fileRealTotalSize ||
+ artifact.FileCompressedSize != chunksTotalSize {
+ artifact.FileSize = fileRealTotalSize
+ artifact.FileCompressedSize = chunksTotalSize
+ artifact.ContentEncoding = ctx.Req.Header.Get("Content-Encoding")
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ log.Error("Error update artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error update artifact")
+ return
+ }
+ log.Debug("[artifact] update artifact size, artifact_id: %d, size: %d, compressed size: %d",
+ artifact.ID, artifact.FileSize, artifact.FileCompressedSize)
+ }
+
+ ctx.JSON(http.StatusOK, map[string]string{
+ "message": "success",
+ })
+}
+
+// comfirmUploadArtifact confirm upload artifact.
+// if all chunks are uploaded, merge them to one file.
+func (ar artifactRoutes) comfirmUploadArtifact(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+ artifactName := ctx.Req.URL.Query().Get("artifactName")
+ if artifactName == "" {
+ log.Warn("Error artifact name is empty")
+ ctx.Error(http.StatusBadRequest, "Error artifact name is empty")
+ return
+ }
+ if err := mergeChunksForRun(ctx, ar.fs, runID, artifactName); err != nil {
+ log.Error("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ ctx.JSON(http.StatusOK, map[string]string{
+ "message": "success",
+ })
+}
+
+type (
+ listArtifactsResponse struct {
+ Count int64 `json:"count"`
+ Value []listArtifactsResponseItem `json:"value"`
+ }
+ listArtifactsResponseItem struct {
+ Name string `json:"name"`
+ FileContainerResourceURL string `json:"fileContainerResourceUrl"`
+ }
+)
+
+func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{RunID: runID})
+ if err != nil {
+ log.Error("Error getting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if len(artifacts) == 0 {
+ log.Debug("[artifact] handleListArtifacts, no artifacts")
+ ctx.Error(http.StatusNotFound)
+ return
+ }
+
+ var (
+ items []listArtifactsResponseItem
+ values = make(map[string]bool)
+ )
+
+ for _, art := range artifacts {
+ if values[art.ArtifactName] {
+ continue
+ }
+ artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(art.ArtifactName)))
+ item := listArtifactsResponseItem{
+ Name: art.ArtifactName,
+ FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "download_url"),
+ }
+ items = append(items, item)
+ values[art.ArtifactName] = true
+
+ log.Debug("[artifact] handleListArtifacts, name: %s, url: %s", item.Name, item.FileContainerResourceURL)
+ }
+
+ respData := listArtifactsResponse{
+ Count: int64(len(items)),
+ Value: items,
+ }
+ ctx.JSON(http.StatusOK, respData)
+}
+
+type (
+ downloadArtifactResponse struct {
+ Value []downloadArtifactResponseItem `json:"value"`
+ }
+ downloadArtifactResponseItem struct {
+ Path string `json:"path"`
+ ItemType string `json:"itemType"`
+ ContentLocation string `json:"contentLocation"`
+ }
+)
+
+// getDownloadArtifactURL generates download url for each artifact
+func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath"))
+ if !validateArtifactHash(ctx, itemPath) {
+ return
+ }
+
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
+ RunID: runID,
+ ArtifactName: itemPath,
+ })
+ if err != nil {
+ log.Error("Error getting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if len(artifacts) == 0 {
+ log.Debug("[artifact] getDownloadArtifactURL, no artifacts")
+ ctx.Error(http.StatusNotFound)
+ return
+ }
+
+ if itemPath != artifacts[0].ArtifactName {
+ log.Error("Error mismatch artifact name, itemPath: %v, artifact: %v", itemPath, artifacts[0].ArtifactName)
+ ctx.Error(http.StatusBadRequest, "Error mismatch artifact name")
+ return
+ }
+
+ var items []downloadArtifactResponseItem
+ for _, artifact := range artifacts {
+ var downloadURL string
+ if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
+ u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName)
+ if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
+ log.Error("Error getting serve direct url: %v", err)
+ }
+ if u != nil {
+ downloadURL = u.String()
+ }
+ }
+ if downloadURL == "" {
+ downloadURL = ar.buildArtifactURL(runID, strconv.FormatInt(artifact.ID, 10), "download")
+ }
+ item := downloadArtifactResponseItem{
+ Path: util.PathJoinRel(itemPath, artifact.ArtifactPath),
+ ItemType: "file",
+ ContentLocation: downloadURL,
+ }
+ log.Debug("[artifact] getDownloadArtifactURL, path: %s, url: %s", item.Path, item.ContentLocation)
+ items = append(items, item)
+ }
+ respData := downloadArtifactResponse{
+ Value: items,
+ }
+ ctx.JSON(http.StatusOK, respData)
+}
+
+// downloadArtifact downloads artifact content
+func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) {
+ _, runID, ok := validateRunID(ctx)
+ if !ok {
+ return
+ }
+
+ artifactID := ctx.ParamsInt64("artifact_id")
+ artifact, exist, err := db.GetByID[actions.ActionArtifact](ctx, artifactID)
+ if err != nil {
+ log.Error("Error getting artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if !exist {
+ log.Error("artifact with ID %d does not exist", artifactID)
+ ctx.Error(http.StatusNotFound, fmt.Sprintf("artifact with ID %d does not exist", artifactID))
+ return
+ }
+ if artifact.RunID != runID {
+ log.Error("Error mismatch runID and artifactID, task: %v, artifact: %v", runID, artifactID)
+ ctx.Error(http.StatusBadRequest)
+ return
+ }
+
+ fd, err := ar.fs.Open(artifact.StoragePath)
+ if err != nil {
+ log.Error("Error opening file: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ defer fd.Close()
+
+ // if artifact is compressed, set content-encoding header to gzip
+ if artifact.ContentEncoding == "gzip" {
+ ctx.Resp.Header().Set("Content-Encoding", "gzip")
+ }
+ log.Debug("[artifact] downloadArtifact, name: %s, path: %s, storage: %s, size: %d", artifact.ArtifactName, artifact.ArtifactPath, artifact.StoragePath, artifact.FileSize)
+ ctx.ServeContent(fd, &context.ServeHeaderOptions{
+ Filename: artifact.ArtifactName,
+ LastModified: artifact.CreatedUnix.AsLocalTime(),
+ })
+}
diff --git a/routers/api/actions/artifacts_chunks.go b/routers/api/actions/artifacts_chunks.go
new file mode 100644
index 0000000..cdb5658
--- /dev/null
+++ b/routers/api/actions/artifacts_chunks.go
@@ -0,0 +1,301 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/storage"
+)
+
+func saveUploadChunkBase(st storage.ObjectStorage, ctx *ArtifactContext,
+ artifact *actions.ActionArtifact,
+ contentSize, runID, start, end, length int64, checkMd5 bool,
+) (int64, error) {
+ // build chunk store path
+ storagePath := fmt.Sprintf("tmp%d/%d-%d-%d-%d.chunk", runID, runID, artifact.ID, start, end)
+ var r io.Reader = ctx.Req.Body
+ var hasher hash.Hash
+ if checkMd5 {
+ // use io.TeeReader to avoid reading all body to md5 sum.
+ // it writes data to hasher after reading end
+ // if hash is not matched, delete the read-end result
+ hasher = md5.New()
+ r = io.TeeReader(r, hasher)
+ }
+ // save chunk to storage
+ writtenSize, err := st.Save(storagePath, r, contentSize)
+ if err != nil {
+ return -1, fmt.Errorf("save chunk to storage error: %v", err)
+ }
+ var checkErr error
+ if checkMd5 {
+ // check md5
+ reqMd5String := ctx.Req.Header.Get(artifactXActionsResultsMD5Header)
+ chunkMd5String := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
+ log.Info("[artifact] check chunk md5, sum: %s, header: %s", chunkMd5String, reqMd5String)
+ // if md5 not match, delete the chunk
+ if reqMd5String != chunkMd5String {
+ checkErr = fmt.Errorf("md5 not match")
+ }
+ }
+ if writtenSize != contentSize {
+ checkErr = errors.Join(checkErr, fmt.Errorf("contentSize not match body size"))
+ }
+ if checkErr != nil {
+ if err := st.Delete(storagePath); err != nil {
+ log.Error("Error deleting chunk: %s, %v", storagePath, err)
+ }
+ return -1, checkErr
+ }
+ log.Info("[artifact] save chunk %s, size: %d, artifact id: %d, start: %d, end: %d",
+ storagePath, contentSize, artifact.ID, start, end)
+ // return chunk total size
+ return length, nil
+}
+
+func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
+ artifact *actions.ActionArtifact,
+ contentSize, runID int64,
+) (int64, error) {
+ // parse content-range header, format: bytes 0-1023/146515
+ contentRange := ctx.Req.Header.Get("Content-Range")
+ start, end, length := int64(0), int64(0), int64(0)
+ if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil {
+ log.Warn("parse content range error: %v, content-range: %s", err, contentRange)
+ return -1, fmt.Errorf("parse content range error: %v", err)
+ }
+ return saveUploadChunkBase(st, ctx, artifact, contentSize, runID, start, end, length, true)
+}
+
+func appendUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
+ artifact *actions.ActionArtifact,
+ start, contentSize, runID int64,
+) (int64, error) {
+ end := start + contentSize - 1
+ return saveUploadChunkBase(st, ctx, artifact, contentSize, runID, start, end, contentSize, false)
+}
+
+type chunkFileItem struct {
+ RunID int64
+ ArtifactID int64
+ Start int64
+ End int64
+ Path string
+}
+
+func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) {
+ storageDir := fmt.Sprintf("tmp%d", runID)
+ var chunks []*chunkFileItem
+ if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
+ baseName := filepath.Base(fpath)
+ // when read chunks from storage, it only contains storage dir and basename,
+ // no matter the subdirectory setting in storage config
+ item := chunkFileItem{Path: storageDir + "/" + baseName}
+ if _, err := fmt.Sscanf(baseName, "%d-%d-%d-%d.chunk", &item.RunID, &item.ArtifactID, &item.Start, &item.End); err != nil {
+ return fmt.Errorf("parse content range error: %v", err)
+ }
+ chunks = append(chunks, &item)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ // chunks group by artifact id
+ chunksMap := make(map[int64][]*chunkFileItem)
+ for _, c := range chunks {
+ chunksMap[c.ArtifactID] = append(chunksMap[c.ArtifactID], c)
+ }
+ return chunksMap, nil
+}
+
+func listChunksByRunIDV4(st storage.ObjectStorage, runID, artifactID int64, blist *BlockList) ([]*chunkFileItem, error) {
+ storageDir := fmt.Sprintf("tmpv4%d", runID)
+ var chunks []*chunkFileItem
+ chunkMap := map[string]*chunkFileItem{}
+ dummy := &chunkFileItem{}
+ for _, name := range blist.Latest {
+ chunkMap[name] = dummy
+ }
+ if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
+ baseName := filepath.Base(fpath)
+ if !strings.HasPrefix(baseName, "block-") {
+ return nil
+ }
+ // when read chunks from storage, it only contains storage dir and basename,
+ // no matter the subdirectory setting in storage config
+ item := chunkFileItem{Path: storageDir + "/" + baseName, ArtifactID: artifactID}
+ var size int64
+ var b64chunkName string
+ if _, err := fmt.Sscanf(baseName, "block-%d-%d-%s", &item.RunID, &size, &b64chunkName); err != nil {
+ return fmt.Errorf("parse content range error: %v", err)
+ }
+ rchunkName, err := base64.URLEncoding.DecodeString(b64chunkName)
+ if err != nil {
+ return fmt.Errorf("failed to parse chunkName: %v", err)
+ }
+ chunkName := string(rchunkName)
+ item.End = item.Start + size - 1
+ if _, ok := chunkMap[chunkName]; ok {
+ chunkMap[chunkName] = &item
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ for i, name := range blist.Latest {
+ chunk, ok := chunkMap[name]
+ if !ok || chunk.Path == "" {
+ return nil, fmt.Errorf("missing Chunk (%d/%d): %s", i, len(blist.Latest), name)
+ }
+ chunks = append(chunks, chunk)
+ if i > 0 {
+ chunk.Start = chunkMap[blist.Latest[i-1]].End + 1
+ chunk.End += chunk.Start
+ }
+ }
+ return chunks, nil
+}
+
+func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error {
+ // read all db artifacts by name
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
+ RunID: runID,
+ ArtifactName: artifactName,
+ })
+ if err != nil {
+ return err
+ }
+ // read all uploading chunks from storage
+ chunksMap, err := listChunksByRunID(st, runID)
+ if err != nil {
+ return err
+ }
+ // range db artifacts to merge chunks
+ for _, art := range artifacts {
+ chunks, ok := chunksMap[art.ID]
+ if !ok {
+ log.Debug("artifact %d chunks not found", art.ID)
+ continue
+ }
+ if err := mergeChunksForArtifact(ctx, chunks, st, art, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st storage.ObjectStorage, artifact *actions.ActionArtifact, checksum string) error {
+ sort.Slice(chunks, func(i, j int) bool {
+ return chunks[i].Start < chunks[j].Start
+ })
+ allChunks := make([]*chunkFileItem, 0)
+ startAt := int64(-1)
+ // check if all chunks are uploaded and in order and clean repeated chunks
+ for _, c := range chunks {
+ // startAt is -1 means this is the first chunk
+ // previous c.ChunkEnd + 1 == c.ChunkStart means this chunk is in order
+ // StartAt is not -1 and c.ChunkStart is not startAt + 1 means there is a chunk missing
+ if c.Start == (startAt + 1) {
+ allChunks = append(allChunks, c)
+ startAt = c.End
+ }
+ }
+ // if the last chunk.End + 1 is not equal to chunk.ChunkLength, means chunks are not uploaded completely
+ if startAt+1 != artifact.FileCompressedSize {
+ log.Debug("[artifact] chunks are not uploaded completely, artifact_id: %d", artifact.ID)
+ return nil
+ }
+ // use multiReader
+ readers := make([]io.Reader, 0, len(allChunks))
+ closeReaders := func() {
+ for _, r := range readers {
+ _ = r.(io.Closer).Close() // it guarantees to be io.Closer by the following loop's Open function
+ }
+ readers = nil
+ }
+ defer closeReaders()
+ for _, c := range allChunks {
+ var readCloser io.ReadCloser
+ var err error
+ if readCloser, err = st.Open(c.Path); err != nil {
+ return fmt.Errorf("open chunk error: %v, %s", err, c.Path)
+ }
+ readers = append(readers, readCloser)
+ }
+ mergedReader := io.MultiReader(readers...)
+ shaPrefix := "sha256:"
+ var hash hash.Hash
+ if strings.HasPrefix(checksum, shaPrefix) {
+ hash = sha256.New()
+ }
+ if hash != nil {
+ mergedReader = io.TeeReader(mergedReader, hash)
+ }
+
+ // if chunk is gzip, use gz as extension
+ // download-artifact action will use content-encoding header to decide if it should decompress the file
+ extension := "chunk"
+ if artifact.ContentEncoding == "gzip" {
+ extension = "chunk.gz"
+ }
+
+ // save merged file
+ storagePath := fmt.Sprintf("%d/%d/%d.%s", artifact.RunID%255, artifact.ID%255, time.Now().UnixNano(), extension)
+ written, err := st.Save(storagePath, mergedReader, artifact.FileCompressedSize)
+ if err != nil {
+ return fmt.Errorf("save merged file error: %v", err)
+ }
+ if written != artifact.FileCompressedSize {
+ return fmt.Errorf("merged file size is not equal to chunk length")
+ }
+
+ defer func() {
+ closeReaders() // close before delete
+ // drop chunks
+ for _, c := range chunks {
+ if err := st.Delete(c.Path); err != nil {
+ log.Warn("Error deleting chunk: %s, %v", c.Path, err)
+ }
+ }
+ }()
+
+ if hash != nil {
+ rawChecksum := hash.Sum(nil)
+ actualChecksum := hex.EncodeToString(rawChecksum)
+ if !strings.HasSuffix(checksum, actualChecksum) {
+ return fmt.Errorf("update artifact error checksum is invalid %v vs %v", checksum, actualChecksum)
+ }
+ }
+
+ // save storage path to artifact
+ log.Debug("[artifact] merge chunks to artifact: %d, %s, old:%s", artifact.ID, storagePath, artifact.StoragePath)
+ // if artifact is already uploaded, delete the old file
+ if artifact.StoragePath != "" {
+ if err := st.Delete(artifact.StoragePath); err != nil {
+ log.Warn("Error deleting old artifact: %s, %v", artifact.StoragePath, err)
+ }
+ }
+
+ artifact.StoragePath = storagePath
+ artifact.Status = int64(actions.ArtifactStatusUploadConfirmed)
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ return fmt.Errorf("update artifact error: %v", err)
+ }
+
+ return nil
+}
diff --git a/routers/api/actions/artifacts_utils.go b/routers/api/actions/artifacts_utils.go
new file mode 100644
index 0000000..db602f1
--- /dev/null
+++ b/routers/api/actions/artifacts_utils.go
@@ -0,0 +1,94 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "crypto/md5"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+const (
+ artifactXTfsFileLengthHeader = "x-tfs-filelength"
+ artifactXActionsResultsMD5Header = "x-actions-results-md5"
+)
+
+// The rules are from https://github.com/actions/toolkit/blob/main/packages/artifact/src/internal/path-and-artifact-name-validation.ts#L32
+var invalidArtifactNameChars = strings.Join([]string{"\\", "/", "\"", ":", "<", ">", "|", "*", "?", "\r", "\n"}, "")
+
+func validateArtifactName(ctx *ArtifactContext, artifactName string) bool {
+ if strings.ContainsAny(artifactName, invalidArtifactNameChars) {
+ log.Error("Error checking artifact name contains invalid character")
+ ctx.Error(http.StatusBadRequest, "Error checking artifact name contains invalid character")
+ return false
+ }
+ return true
+}
+
+func validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) {
+ task := ctx.ActionTask
+ runID := ctx.ParamsInt64("run_id")
+ if task.Job.RunID != runID {
+ log.Error("Error runID not match")
+ ctx.Error(http.StatusBadRequest, "run-id does not match")
+ return nil, 0, false
+ }
+ return task, runID, true
+}
+
+func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask, int64, bool) { //nolint:unparam
+ task := ctx.ActionTask
+ runID, err := strconv.ParseInt(rawRunID, 10, 64)
+ if err != nil || task.Job.RunID != runID {
+ log.Error("Error runID not match")
+ ctx.Error(http.StatusBadRequest, "run-id does not match")
+ return nil, 0, false
+ }
+ return task, runID, true
+}
+
+func validateArtifactHash(ctx *ArtifactContext, artifactName string) bool {
+ paramHash := ctx.Params("artifact_hash")
+ // use artifact name to create upload url
+ artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(artifactName)))
+ if paramHash == artifactHash {
+ return true
+ }
+ log.Warn("Invalid artifact hash: %s", paramHash)
+ ctx.Error(http.StatusBadRequest, "Invalid artifact hash")
+ return false
+}
+
+func parseArtifactItemPath(ctx *ArtifactContext) (string, string, bool) {
+ // itemPath is generated from upload-artifact action
+ // it's formatted as {artifact_name}/{artfict_path_in_runner}
+ // act_runner in host mode on Windows, itemPath is joined by Windows slash '\'
+ itemPath := util.PathJoinRelX(ctx.Req.URL.Query().Get("itemPath"))
+ artifactName := strings.Split(itemPath, "/")[0]
+ artifactPath := strings.TrimPrefix(itemPath, artifactName+"/")
+ if !validateArtifactHash(ctx, artifactName) {
+ return "", "", false
+ }
+ if !validateArtifactName(ctx, artifactName) {
+ return "", "", false
+ }
+ return artifactName, artifactPath, true
+}
+
+// getUploadFileSize returns the size of the file to be uploaded.
+// The raw size is the size of the file as reported by the header X-TFS-FileLength.
+func getUploadFileSize(ctx *ArtifactContext) (int64, int64) {
+ contentLength := ctx.Req.ContentLength
+ xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64)
+ if xTfsLength > 0 {
+ return xTfsLength, contentLength
+ }
+ return contentLength, contentLength
+}
diff --git a/routers/api/actions/artifactsv4.go b/routers/api/actions/artifactsv4.go
new file mode 100644
index 0000000..677e89d
--- /dev/null
+++ b/routers/api/actions/artifactsv4.go
@@ -0,0 +1,599 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+// GitHub Actions Artifacts V4 API Simple Description
+//
+// 1. Upload artifact
+// 1.1. CreateArtifact
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact
+// Request:
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name": "test",
+// "version": 4
+// }
+// Response:
+// {
+// "ok": true,
+// "signedUploadUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75"
+// }
+// 1.2. Upload Zip Content to Blobstorage (unauthenticated request)
+// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
+// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
+// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
+// 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
+// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order
+// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
+// Request
+// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+// <BlockList>
+// <Latest>blockId1</Latest>
+// <Latest>blockId2</Latest>
+// </BlockList>
+// 1.5. FinalizeArtifact
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
+// Request
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name": "test",
+// "size": "2097",
+// "hash": "sha256:b6325614d5649338b87215d9536b3c0477729b8638994c74cdefacb020a2cad4"
+// }
+// Response
+// {
+// "ok": true,
+// "artifactId": "4"
+// }
+// 2. Download artifact
+// 2.1. ListArtifacts and optionally filter by artifact exact name or id
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts
+// Request
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name_filter": "test"
+// }
+// Response
+// {
+// "artifacts": [
+// {
+// "workflowRunBackendId": "21",
+// "workflowJobRunBackendId": "49",
+// "databaseId": "4",
+// "name": "test",
+// "size": "2093",
+// "createdAt": "2024-01-23T00:13:28Z"
+// }
+// ]
+// }
+// 2.2. GetSignedArtifactURL get the URL to download the artifact zip file of a specific artifact
+// Post: /twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL
+// Request
+// {
+// "workflow_run_backend_id": "21",
+// "workflow_job_run_backend_id": "49",
+// "name": "test"
+// }
+// Response
+// {
+// "signedUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76"
+// }
+// 2.3. Download Zip from Blobstorage (unauthenticated request)
+// GET: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ quota_model "code.gitea.io/gitea/models/quota"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/common"
+ "code.gitea.io/gitea/services/context"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ ArtifactV4RouteBase = "/twirp/github.actions.results.api.v1.ArtifactService"
+ ArtifactV4ContentEncoding = "application/zip"
+)
+
+type artifactV4Routes struct {
+ prefix string
+ fs storage.ObjectStorage
+}
+
+func ArtifactV4Contexter() func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
+ base, baseCleanUp := context.NewBaseContext(resp, req)
+ defer baseCleanUp()
+
+ ctx := &ArtifactContext{Base: base}
+ ctx.AppendContextValue(artifactContextKey, ctx)
+
+ next.ServeHTTP(ctx.Resp, ctx.Req)
+ })
+ }
+}
+
+func ArtifactsV4Routes(prefix string) *web.Route {
+ m := web.NewRoute()
+
+ r := artifactV4Routes{
+ prefix: prefix,
+ fs: storage.ActionsArtifacts,
+ }
+
+ m.Group("", func() {
+ m.Post("CreateArtifact", r.createArtifact)
+ m.Post("FinalizeArtifact", r.finalizeArtifact)
+ m.Post("ListArtifacts", r.listArtifacts)
+ m.Post("GetSignedArtifactURL", r.getSignedArtifactURL)
+ m.Post("DeleteArtifact", r.deleteArtifact)
+ }, ArtifactContexter())
+ m.Group("", func() {
+ m.Put("UploadArtifact", r.uploadArtifact)
+ m.Get("DownloadArtifact", r.downloadArtifact)
+ }, ArtifactV4Contexter())
+
+ return m
+}
+
+func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID, artifactID int64) []byte {
+ mac := hmac.New(sha256.New, setting.GetGeneralTokenSigningSecret())
+ mac.Write([]byte(endp))
+ mac.Write([]byte(expires))
+ mac.Write([]byte(artifactName))
+ mac.Write([]byte(fmt.Sprint(taskID)))
+ mac.Write([]byte(fmt.Sprint(artifactID)))
+ return mac.Sum(nil)
+}
+
+func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID, artifactID int64) string {
+ expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST")
+ uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") +
+ "/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID, artifactID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID) + "&artifactID=" + fmt.Sprint(artifactID)
+ return uploadURL
+}
+
+func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*actions.ActionTask, string, bool) {
+ rawTaskID := ctx.Req.URL.Query().Get("taskID")
+ rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
+ sig := ctx.Req.URL.Query().Get("sig")
+ expires := ctx.Req.URL.Query().Get("expires")
+ artifactName := ctx.Req.URL.Query().Get("artifactName")
+ dsig, _ := base64.URLEncoding.DecodeString(sig)
+ taskID, _ := strconv.ParseInt(rawTaskID, 10, 64)
+ artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
+
+ expecedsig := r.buildSignature(endp, expires, artifactName, taskID, artifactID)
+ if !hmac.Equal(dsig, expecedsig) {
+ log.Error("Error unauthorized")
+ ctx.Error(http.StatusUnauthorized, "Error unauthorized")
+ return nil, "", false
+ }
+ t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", expires)
+ if err != nil || t.Before(time.Now()) {
+ log.Error("Error link expired")
+ ctx.Error(http.StatusUnauthorized, "Error link expired")
+ return nil, "", false
+ }
+ task, err := actions.GetTaskByID(ctx, taskID)
+ if err != nil {
+ log.Error("Error runner api getting task by ID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task by ID")
+ return nil, "", false
+ }
+ if task.Status != actions.StatusRunning {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return nil, "", false
+ }
+ if err := task.LoadJob(ctx); err != nil {
+ log.Error("Error runner api getting job: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting job")
+ return nil, "", false
+ }
+ return task, artifactName, true
+}
+
+func (r *artifactV4Routes) getArtifactByName(ctx *ArtifactContext, runID int64, name string) (*actions.ActionArtifact, error) {
+ var art actions.ActionArtifact
+ has, err := db.GetEngine(ctx).Where("run_id = ? AND artifact_name = ? AND artifact_path = ? AND content_encoding = ?", runID, name, name+".zip", ArtifactV4ContentEncoding).Get(&art)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, util.ErrNotExist
+ }
+ return &art, nil
+}
+
+func (r *artifactV4Routes) parseProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) bool {
+ body, err := io.ReadAll(ctx.Req.Body)
+ if err != nil {
+ log.Error("Error decode request body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error decode request body")
+ return false
+ }
+ err = protojson.Unmarshal(body, req)
+ if err != nil {
+ log.Error("Error decode request body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error decode request body")
+ return false
+ }
+ return true
+}
+
+func (r *artifactV4Routes) sendProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) {
+ resp, err := protojson.Marshal(req)
+ if err != nil {
+ log.Error("Error encode response body: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error encode response body")
+ return
+ }
+ ctx.Resp.Header().Set("Content-Type", "application/json;charset=utf-8")
+ ctx.Resp.WriteHeader(http.StatusOK)
+ _, _ = ctx.Resp.Write(resp)
+}
+
+func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
+ var req CreateArtifactRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, _, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ artifactName := req.Name
+
+ rententionDays := setting.Actions.ArtifactRetentionDays
+ if req.ExpiresAt != nil {
+ rententionDays = int64(time.Until(req.ExpiresAt.AsTime()).Hours() / 24)
+ }
+ // create or get artifact with name and path
+ artifact, err := actions.CreateArtifact(ctx, ctx.ActionTask, artifactName, artifactName+".zip", rententionDays)
+ if err != nil {
+ log.Error("Error create or get artifact: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
+ return
+ }
+ artifact.ContentEncoding = ArtifactV4ContentEncoding
+ artifact.FileSize = 0
+ artifact.FileCompressedSize = 0
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ log.Error("Error UpdateArtifactByID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
+ return
+ }
+
+ respData := CreateArtifactResponse{
+ Ok: true,
+ SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID),
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
+ task, artifactName, ok := r.verifySignature(ctx, "UploadArtifact")
+ if !ok {
+ return
+ }
+
+ // check the owner's quota
+ ok, err := quota_model.EvaluateForUser(ctx, task.OwnerID, quota_model.LimitSubjectSizeAssetsArtifacts)
+ if err != nil {
+ log.Error("quota_model.EvaluateForUser: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error checking quota")
+ return
+ }
+ if !ok {
+ ctx.Error(http.StatusRequestEntityTooLarge, "Quota exceeded")
+ return
+ }
+
+ comp := ctx.Req.URL.Query().Get("comp")
+ switch comp {
+ case "block", "appendBlock":
+ blockid := ctx.Req.URL.Query().Get("blockid")
+ if blockid == "" {
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ _, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
+ if err != nil {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ artifact.FileCompressedSize += ctx.Req.ContentLength
+ artifact.FileSize += ctx.Req.ContentLength
+ if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+ log.Error("Error UpdateArtifactByID: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
+ return
+ }
+ } else {
+ _, err := r.fs.Save(fmt.Sprintf("tmpv4%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, base64.URLEncoding.EncodeToString([]byte(blockid))), ctx.Req.Body, -1)
+ if err != nil {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ }
+ ctx.JSON(http.StatusCreated, "appended")
+ case "blocklist":
+ rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
+ artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
+ _, err := r.fs.Save(fmt.Sprintf("tmpv4%d/%d-%d-blocklist", task.Job.RunID, task.Job.RunID, artifactID), ctx.Req.Body, -1)
+ if err != nil {
+ log.Error("Error runner api getting task: task is not running")
+ ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+ return
+ }
+ ctx.JSON(http.StatusCreated, "created")
+ }
+}
+
+type BlockList struct {
+ Latest []string `xml:"Latest"`
+}
+
+type Latest struct {
+ Value string `xml:",chardata"`
+}
+
+func (r *artifactV4Routes) readBlockList(runID, artifactID int64) (*BlockList, error) {
+ blockListName := fmt.Sprintf("tmpv4%d/%d-%d-blocklist", runID, runID, artifactID)
+ s, err := r.fs.Open(blockListName)
+ if err != nil {
+ return nil, err
+ }
+
+ xdec := xml.NewDecoder(s)
+ blockList := &BlockList{}
+ err = xdec.Decode(blockList)
+
+ delerr := r.fs.Delete(blockListName)
+ if delerr != nil {
+ log.Warn("Failed to delete blockList %s: %v", blockListName, delerr)
+ }
+ return blockList, err
+}
+
+func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
+ var req FinalizeArtifactRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, runID, req.Name)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ var chunks []*chunkFileItem
+ blockList, err := r.readBlockList(runID, artifact.ID)
+ if err != nil {
+ log.Warn("Failed to read BlockList, fallback to old behavior: %v", err)
+ chunkMap, err := listChunksByRunID(r.fs, runID)
+ if err != nil {
+ log.Error("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ chunks, ok = chunkMap[artifact.ID]
+ if !ok {
+ log.Error("Error merge chunks")
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ } else {
+ chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList)
+ if err != nil {
+ log.Error("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+ artifact.FileSize = chunks[len(chunks)-1].End + 1
+ artifact.FileCompressedSize = chunks[len(chunks)-1].End + 1
+ }
+
+ checksum := ""
+ if req.Hash != nil {
+ checksum = req.Hash.Value
+ }
+ if err := mergeChunksForArtifact(ctx, chunks, r.fs, artifact, checksum); err != nil {
+ log.Warn("Error merge chunks: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+ return
+ }
+
+ respData := FinalizeArtifactResponse{
+ Ok: true,
+ ArtifactId: artifact.ID,
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) listArtifacts(ctx *ArtifactContext) {
+ var req ListArtifactsRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{RunID: runID})
+ if err != nil {
+ log.Error("Error getting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+ if len(artifacts) == 0 {
+ log.Debug("[artifact] handleListArtifacts, no artifacts")
+ ctx.Error(http.StatusNotFound)
+ return
+ }
+
+ list := []*ListArtifactsResponse_MonolithArtifact{}
+
+ table := map[string]*ListArtifactsResponse_MonolithArtifact{}
+ for _, artifact := range artifacts {
+ if _, ok := table[artifact.ArtifactName]; ok || req.IdFilter != nil && artifact.ID != req.IdFilter.Value || req.NameFilter != nil && artifact.ArtifactName != req.NameFilter.Value || artifact.ArtifactName+".zip" != artifact.ArtifactPath || artifact.ContentEncoding != ArtifactV4ContentEncoding {
+ table[artifact.ArtifactName] = nil
+ continue
+ }
+
+ table[artifact.ArtifactName] = &ListArtifactsResponse_MonolithArtifact{
+ Name: artifact.ArtifactName,
+ CreatedAt: timestamppb.New(artifact.CreatedUnix.AsTime()),
+ DatabaseId: artifact.ID,
+ WorkflowRunBackendId: req.WorkflowRunBackendId,
+ WorkflowJobRunBackendId: req.WorkflowJobRunBackendId,
+ Size: artifact.FileSize,
+ }
+ }
+ for _, artifact := range table {
+ if artifact != nil {
+ list = append(list, artifact)
+ }
+ }
+
+ respData := ListArtifactsResponse{
+ Artifacts: list,
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
+ var req GetSignedArtifactURLRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ artifactName := req.Name
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, runID, artifactName)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ respData := GetSignedArtifactURLResponse{}
+
+ if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
+ u, err := storage.ActionsArtifacts.URL(artifact.StoragePath, artifact.ArtifactPath)
+ if u != nil && err == nil {
+ respData.SignedUrl = u.String()
+ }
+ }
+ if respData.SignedUrl == "" {
+ respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID)
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
+
+func (r *artifactV4Routes) downloadArtifact(ctx *ArtifactContext) {
+ task, artifactName, ok := r.verifySignature(ctx, "DownloadArtifact")
+ if !ok {
+ return
+ }
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ file, err := r.fs.Open(artifact.StoragePath)
+ if err != nil {
+ log.Error("Error artifact could not be opened: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+
+ common.ServeContentByReadSeeker(ctx.Base, artifactName, util.ToPointer(artifact.UpdatedUnix.AsTime()), file)
+}
+
+func (r *artifactV4Routes) deleteArtifact(ctx *ArtifactContext) {
+ var req DeleteArtifactRequest
+
+ if ok := r.parseProtbufBody(ctx, &req); !ok {
+ return
+ }
+ _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
+ if !ok {
+ return
+ }
+
+ // get artifact by name
+ artifact, err := r.getArtifactByName(ctx, runID, req.Name)
+ if err != nil {
+ log.Error("Error artifact not found: %v", err)
+ ctx.Error(http.StatusNotFound, "Error artifact not found")
+ return
+ }
+
+ err = actions.SetArtifactNeedDelete(ctx, runID, req.Name)
+ if err != nil {
+ log.Error("Error deleting artifacts: %v", err)
+ ctx.Error(http.StatusInternalServerError, err.Error())
+ return
+ }
+
+ respData := DeleteArtifactResponse{
+ Ok: true,
+ ArtifactId: artifact.ID,
+ }
+ r.sendProtbufBody(ctx, &respData)
+}
diff --git a/routers/api/actions/ping/ping.go b/routers/api/actions/ping/ping.go
new file mode 100644
index 0000000..13985c9
--- /dev/null
+++ b/routers/api/actions/ping/ping.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package ping
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/modules/log"
+
+ pingv1 "code.gitea.io/actions-proto-go/ping/v1"
+ "code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
+ "connectrpc.com/connect"
+)
+
+func NewPingServiceHandler() (string, http.Handler) {
+ return pingv1connect.NewPingServiceHandler(&Service{})
+}
+
+var _ pingv1connect.PingServiceHandler = (*Service)(nil)
+
+type Service struct {
+ pingv1connect.UnimplementedPingServiceHandler
+}
+
+func (s *Service) Ping(
+ ctx context.Context,
+ req *connect.Request[pingv1.PingRequest],
+) (*connect.Response[pingv1.PingResponse], error) {
+ log.Trace("Content-Type: %s", req.Header().Get("Content-Type"))
+ log.Trace("User-Agent: %s", req.Header().Get("User-Agent"))
+ res := connect.NewResponse(&pingv1.PingResponse{
+ Data: fmt.Sprintf("Hello, %s!", req.Msg.Data),
+ })
+ return res, nil
+}
diff --git a/routers/api/actions/ping/ping_test.go b/routers/api/actions/ping/ping_test.go
new file mode 100644
index 0000000..098b003
--- /dev/null
+++ b/routers/api/actions/ping/ping_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package ping
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ pingv1 "code.gitea.io/actions-proto-go/ping/v1"
+ "code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
+ "connectrpc.com/connect"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestService(t *testing.T) {
+ mux := http.NewServeMux()
+ mux.Handle(pingv1connect.NewPingServiceHandler(
+ &Service{},
+ ))
+ MainServiceTest(t, mux)
+}
+
+func MainServiceTest(t *testing.T, h http.Handler) {
+ t.Parallel()
+ server := httptest.NewUnstartedServer(h)
+ server.EnableHTTP2 = true
+ server.StartTLS()
+ defer server.Close()
+
+ connectClient := pingv1connect.NewPingServiceClient(
+ server.Client(),
+ server.URL,
+ )
+
+ grpcClient := pingv1connect.NewPingServiceClient(
+ server.Client(),
+ server.URL,
+ connect.WithGRPC(),
+ )
+
+ grpcWebClient := pingv1connect.NewPingServiceClient(
+ server.Client(),
+ server.URL,
+ connect.WithGRPCWeb(),
+ )
+
+ clients := []pingv1connect.PingServiceClient{connectClient, grpcClient, grpcWebClient}
+ t.Run("ping request", func(t *testing.T) {
+ for _, client := range clients {
+ result, err := client.Ping(context.Background(), connect.NewRequest(&pingv1.PingRequest{
+ Data: "foobar",
+ }))
+ require.NoError(t, err)
+ assert.Equal(t, "Hello, foobar!", result.Msg.Data)
+ }
+ })
+}
diff --git a/routers/api/actions/runner/interceptor.go b/routers/api/actions/runner/interceptor.go
new file mode 100644
index 0000000..521ba91
--- /dev/null
+++ b/routers/api/actions/runner/interceptor.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package runner
+
+import (
+ "context"
+ "crypto/subtle"
+ "errors"
+ "strings"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "connectrpc.com/connect"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+const (
+ uuidHeaderKey = "x-runner-uuid"
+ tokenHeaderKey = "x-runner-token"
+)
+
+var withRunner = connect.WithInterceptors(connect.UnaryInterceptorFunc(func(unaryFunc connect.UnaryFunc) connect.UnaryFunc {
+ return func(ctx context.Context, request connect.AnyRequest) (connect.AnyResponse, error) {
+ methodName := getMethodName(request)
+ if methodName == "Register" {
+ return unaryFunc(ctx, request)
+ }
+ uuid := request.Header().Get(uuidHeaderKey)
+ token := request.Header().Get(tokenHeaderKey)
+
+ runner, err := actions_model.GetRunnerByUUID(ctx, uuid)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ return nil, status.Error(codes.Unauthenticated, "unregistered runner")
+ }
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ if subtle.ConstantTimeCompare([]byte(runner.TokenHash), []byte(auth_model.HashToken(token, runner.TokenSalt))) != 1 {
+ return nil, status.Error(codes.Unauthenticated, "unregistered runner")
+ }
+
+ cols := []string{"last_online"}
+ runner.LastOnline = timeutil.TimeStampNow()
+ if methodName == "UpdateTask" || methodName == "UpdateLog" {
+ runner.LastActive = timeutil.TimeStampNow()
+ cols = append(cols, "last_active")
+ }
+ if err := actions_model.UpdateRunner(ctx, runner, cols...); err != nil {
+ log.Error("can't update runner status: %v", err)
+ }
+
+ ctx = context.WithValue(ctx, runnerCtxKey{}, runner)
+ return unaryFunc(ctx, request)
+ }
+}))
+
+func getMethodName(req connect.AnyRequest) string {
+ splits := strings.Split(req.Spec().Procedure, "/")
+ if len(splits) > 0 {
+ return splits[len(splits)-1]
+ }
+ return ""
+}
+
+type runnerCtxKey struct{}
+
+func GetRunner(ctx context.Context) *actions_model.ActionRunner {
+ if v := ctx.Value(runnerCtxKey{}); v != nil {
+ if r, ok := v.(*actions_model.ActionRunner); ok {
+ return r
+ }
+ }
+ return nil
+}
diff --git a/routers/api/actions/runner/runner.go b/routers/api/actions/runner/runner.go
new file mode 100644
index 0000000..017bdf6
--- /dev/null
+++ b/routers/api/actions/runner/runner.go
@@ -0,0 +1,289 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package runner
+
+import (
+ "context"
+ "errors"
+ "net/http"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/actions"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+ actions_service "code.gitea.io/gitea/services/actions"
+
+ runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
+ "code.gitea.io/actions-proto-go/runner/v1/runnerv1connect"
+ "connectrpc.com/connect"
+ gouuid "github.com/google/uuid"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func NewRunnerServiceHandler() (string, http.Handler) {
+ return runnerv1connect.NewRunnerServiceHandler(
+ &Service{},
+ connect.WithCompressMinBytes(1024),
+ withRunner,
+ )
+}
+
+var _ runnerv1connect.RunnerServiceClient = (*Service)(nil)
+
+type Service struct {
+ runnerv1connect.UnimplementedRunnerServiceHandler
+}
+
+// Register for new runner.
+func (s *Service) Register(
+ ctx context.Context,
+ req *connect.Request[runnerv1.RegisterRequest],
+) (*connect.Response[runnerv1.RegisterResponse], error) {
+ if req.Msg.Token == "" || req.Msg.Name == "" {
+ return nil, errors.New("missing runner token, name")
+ }
+
+ runnerToken, err := actions_model.GetRunnerToken(ctx, req.Msg.Token)
+ if err != nil {
+ return nil, errors.New("runner registration token not found")
+ }
+
+ if !runnerToken.IsActive {
+ return nil, errors.New("runner registration token has been invalidated, please use the latest one")
+ }
+
+ if runnerToken.OwnerID > 0 {
+ if _, err := user_model.GetUserByID(ctx, runnerToken.OwnerID); err != nil {
+ return nil, errors.New("owner of the token not found")
+ }
+ }
+
+ if runnerToken.RepoID > 0 {
+ if _, err := repo_model.GetRepositoryByID(ctx, runnerToken.RepoID); err != nil {
+ return nil, errors.New("repository of the token not found")
+ }
+ }
+
+ labels := req.Msg.Labels
+
+ // create new runner
+ name, _ := util.SplitStringAtByteN(req.Msg.Name, 255)
+ runner := &actions_model.ActionRunner{
+ UUID: gouuid.New().String(),
+ Name: name,
+ OwnerID: runnerToken.OwnerID,
+ RepoID: runnerToken.RepoID,
+ Version: req.Msg.Version,
+ AgentLabels: labels,
+ }
+ if err := runner.GenerateToken(); err != nil {
+ return nil, errors.New("can't generate token")
+ }
+
+ // create new runner
+ if err := actions_model.CreateRunner(ctx, runner); err != nil {
+ return nil, errors.New("can't create new runner")
+ }
+
+ // update token status
+ runnerToken.IsActive = true
+ if err := actions_model.UpdateRunnerToken(ctx, runnerToken, "is_active"); err != nil {
+ return nil, errors.New("can't update runner token status")
+ }
+
+ res := connect.NewResponse(&runnerv1.RegisterResponse{
+ Runner: &runnerv1.Runner{
+ Id: runner.ID,
+ Uuid: runner.UUID,
+ Token: runner.Token,
+ Name: runner.Name,
+ Version: runner.Version,
+ Labels: runner.AgentLabels,
+ },
+ })
+
+ return res, nil
+}
+
+func (s *Service) Declare(
+ ctx context.Context,
+ req *connect.Request[runnerv1.DeclareRequest],
+) (*connect.Response[runnerv1.DeclareResponse], error) {
+ runner := GetRunner(ctx)
+ runner.AgentLabels = req.Msg.Labels
+ runner.Version = req.Msg.Version
+ if err := actions_model.UpdateRunner(ctx, runner, "agent_labels", "version"); err != nil {
+ return nil, status.Errorf(codes.Internal, "update runner: %v", err)
+ }
+
+ return connect.NewResponse(&runnerv1.DeclareResponse{
+ Runner: &runnerv1.Runner{
+ Id: runner.ID,
+ Uuid: runner.UUID,
+ Token: runner.Token,
+ Name: runner.Name,
+ Version: runner.Version,
+ Labels: runner.AgentLabels,
+ },
+ }), nil
+}
+
+// FetchTask assigns a task to the runner
+func (s *Service) FetchTask(
+ ctx context.Context,
+ req *connect.Request[runnerv1.FetchTaskRequest],
+) (*connect.Response[runnerv1.FetchTaskResponse], error) {
+ runner := GetRunner(ctx)
+
+ var task *runnerv1.Task
+ tasksVersion := req.Msg.TasksVersion // task version from runner
+ latestVersion, err := actions_model.GetTasksVersionByScope(ctx, runner.OwnerID, runner.RepoID)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "query tasks version failed: %v", err)
+ } else if latestVersion == 0 {
+ if err := actions_model.IncreaseTaskVersion(ctx, runner.OwnerID, runner.RepoID); err != nil {
+ return nil, status.Errorf(codes.Internal, "fail to increase task version: %v", err)
+ }
+ // if we don't increase the value of `latestVersion` here,
+ // the response of FetchTask will return tasksVersion as zero.
+ // and the runner will treat it as an old version of Gitea.
+ latestVersion++
+ }
+
+ if tasksVersion != latestVersion {
+ // if the task version in request is not equal to the version in db,
+ // it means there may still be some tasks not be assigned.
+ // try to pick a task for the runner that send the request.
+ if t, ok, err := pickTask(ctx, runner); err != nil {
+ log.Error("pick task failed: %v", err)
+ return nil, status.Errorf(codes.Internal, "pick task: %v", err)
+ } else if ok {
+ task = t
+ }
+ }
+ res := connect.NewResponse(&runnerv1.FetchTaskResponse{
+ Task: task,
+ TasksVersion: latestVersion,
+ })
+ return res, nil
+}
+
+// UpdateTask updates the task status.
+func (s *Service) UpdateTask(
+ ctx context.Context,
+ req *connect.Request[runnerv1.UpdateTaskRequest],
+) (*connect.Response[runnerv1.UpdateTaskResponse], error) {
+ task, err := actions_model.UpdateTaskByState(ctx, req.Msg.State)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "update task: %v", err)
+ }
+
+ for k, v := range req.Msg.Outputs {
+ if len(k) > 255 {
+ log.Warn("Ignore the output of task %d because the key is too long: %q", task.ID, k)
+ continue
+ }
+ // The value can be a maximum of 1 MB
+ if l := len(v); l > 1024*1024 {
+ log.Warn("Ignore the output %q of task %d because the value is too long: %v", k, task.ID, l)
+ continue
+ }
+ // There's another limitation on GitHub that the total of all outputs in a workflow run can be a maximum of 50 MB.
+ // We don't check the total size here because it's not easy to do, and it doesn't really worth it.
+ // See https://docs.github.com/en/actions/using-jobs/defining-outputs-for-jobs
+
+ if err := actions_model.InsertTaskOutputIfNotExist(ctx, task.ID, k, v); err != nil {
+ log.Warn("Failed to insert the output %q of task %d: %v", k, task.ID, err)
+ // It's ok not to return errors, the runner will resend the outputs.
+ }
+ }
+ sentOutputs, err := actions_model.FindTaskOutputKeyByTaskID(ctx, task.ID)
+ if err != nil {
+ log.Warn("Failed to find the sent outputs of task %d: %v", task.ID, err)
+ // It's not to return errors, it can be handled when the runner resends sent outputs.
+ }
+
+ if err := task.LoadJob(ctx); err != nil {
+ return nil, status.Errorf(codes.Internal, "load job: %v", err)
+ }
+ if err := task.Job.LoadRun(ctx); err != nil {
+ return nil, status.Errorf(codes.Internal, "load run: %v", err)
+ }
+
+ // don't create commit status for cron job
+ if task.Job.Run.ScheduleID == 0 {
+ actions_service.CreateCommitStatus(ctx, task.Job)
+ }
+
+ if req.Msg.State.Result != runnerv1.Result_RESULT_UNSPECIFIED {
+ if err := actions_service.EmitJobsIfReady(task.Job.RunID); err != nil {
+ log.Error("Emit ready jobs of run %d: %v", task.Job.RunID, err)
+ }
+ }
+
+ return connect.NewResponse(&runnerv1.UpdateTaskResponse{
+ State: &runnerv1.TaskState{
+ Id: req.Msg.State.Id,
+ Result: task.Status.AsResult(),
+ },
+ SentOutputs: sentOutputs,
+ }), nil
+}
+
+// UpdateLog uploads log of the task.
+func (s *Service) UpdateLog(
+ ctx context.Context,
+ req *connect.Request[runnerv1.UpdateLogRequest],
+) (*connect.Response[runnerv1.UpdateLogResponse], error) {
+ res := connect.NewResponse(&runnerv1.UpdateLogResponse{})
+
+ task, err := actions_model.GetTaskByID(ctx, req.Msg.TaskId)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "get task: %v", err)
+ }
+ ack := task.LogLength
+
+ if len(req.Msg.Rows) == 0 || req.Msg.Index > ack || int64(len(req.Msg.Rows))+req.Msg.Index <= ack {
+ res.Msg.AckIndex = ack
+ return res, nil
+ }
+
+ if task.LogInStorage {
+ return nil, status.Errorf(codes.AlreadyExists, "log file has been archived")
+ }
+
+ rows := req.Msg.Rows[ack-req.Msg.Index:]
+ ns, err := actions.WriteLogs(ctx, task.LogFilename, task.LogSize, rows)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "write logs: %v", err)
+ }
+ task.LogLength += int64(len(rows))
+ for _, n := range ns {
+ task.LogIndexes = append(task.LogIndexes, task.LogSize)
+ task.LogSize += int64(n)
+ }
+
+ res.Msg.AckIndex = task.LogLength
+
+ var remove func()
+ if req.Msg.NoMore {
+ task.LogInStorage = true
+ remove, err = actions.TransferLogs(ctx, task.LogFilename)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "transfer logs: %v", err)
+ }
+ }
+
+ if err := actions_model.UpdateTask(ctx, task, "log_indexes", "log_length", "log_size", "log_in_storage"); err != nil {
+ return nil, status.Errorf(codes.Internal, "update task: %v", err)
+ }
+ if remove != nil {
+ remove()
+ }
+
+ return res, nil
+}
diff --git a/routers/api/actions/runner/utils.go b/routers/api/actions/runner/utils.go
new file mode 100644
index 0000000..ff6ec5b
--- /dev/null
+++ b/routers/api/actions/runner/utils.go
@@ -0,0 +1,189 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ secret_model "code.gitea.io/gitea/models/secret"
+ actions_module "code.gitea.io/gitea/modules/actions"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/services/actions"
+
+ runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
+ "google.golang.org/protobuf/types/known/structpb"
+)
+
+func pickTask(ctx context.Context, runner *actions_model.ActionRunner) (*runnerv1.Task, bool, error) {
+ t, ok, err := actions_model.CreateTaskForRunner(ctx, runner)
+ if err != nil {
+ return nil, false, fmt.Errorf("CreateTaskForRunner: %w", err)
+ }
+ if !ok {
+ return nil, false, nil
+ }
+
+ secrets, err := secret_model.GetSecretsOfTask(ctx, t)
+ if err != nil {
+ return nil, false, fmt.Errorf("GetSecretsOfTask: %w", err)
+ }
+
+ vars, err := actions_model.GetVariablesOfRun(ctx, t.Job.Run)
+ if err != nil {
+ return nil, false, fmt.Errorf("GetVariablesOfRun: %w", err)
+ }
+
+ actions.CreateCommitStatus(ctx, t.Job)
+
+ task := &runnerv1.Task{
+ Id: t.ID,
+ WorkflowPayload: t.Job.WorkflowPayload,
+ Context: generateTaskContext(t),
+ Secrets: secrets,
+ Vars: vars,
+ }
+
+ if needs, err := findTaskNeeds(ctx, t); err != nil {
+ log.Error("Cannot find needs for task %v: %v", t.ID, err)
+ // Go on with empty needs.
+ // If return error, the task will be wild, which means the runner will never get it when it has been assigned to the runner.
+ // In contrast, missing needs is less serious.
+ // And the task will fail and the runner will report the error in the logs.
+ } else {
+ task.Needs = needs
+ }
+
+ return task, true, nil
+}
+
+func generateTaskContext(t *actions_model.ActionTask) *structpb.Struct {
+ event := map[string]any{}
+ _ = json.Unmarshal([]byte(t.Job.Run.EventPayload), &event)
+
+ // TriggerEvent is added in https://github.com/go-gitea/gitea/pull/25229
+ // This fallback is for the old ActionRun that doesn't have the TriggerEvent field
+ // and should be removed in 1.22
+ eventName := t.Job.Run.TriggerEvent
+ if eventName == "" {
+ eventName = t.Job.Run.Event.Event()
+ }
+
+ baseRef := ""
+ headRef := ""
+ ref := t.Job.Run.Ref
+ sha := t.Job.Run.CommitSHA
+ if pullPayload, err := t.Job.Run.GetPullRequestEventPayload(); err == nil && pullPayload.PullRequest != nil && pullPayload.PullRequest.Base != nil && pullPayload.PullRequest.Head != nil {
+ baseRef = pullPayload.PullRequest.Base.Ref
+ headRef = pullPayload.PullRequest.Head.Ref
+
+ // if the TriggerEvent is pull_request_target, ref and sha need to be set according to the base of pull request
+ // In GitHub's documentation, ref should be the branch or tag that triggered workflow. But when the TriggerEvent is pull_request_target,
+ // the ref will be the base branch.
+ if t.Job.Run.TriggerEvent == actions_module.GithubEventPullRequestTarget {
+ ref = git.BranchPrefix + pullPayload.PullRequest.Base.Name
+ sha = pullPayload.PullRequest.Base.Sha
+ }
+ }
+
+ refName := git.RefName(ref)
+
+ giteaRuntimeToken, err := actions.CreateAuthorizationToken(t.ID, t.Job.RunID, t.JobID)
+ if err != nil {
+ log.Error("actions.CreateAuthorizationToken failed: %v", err)
+ }
+
+ taskContext, err := structpb.NewStruct(map[string]any{
+ // standard contexts, see https://docs.github.com/en/actions/learn-github-actions/contexts#github-context
+ "action": "", // string, The name of the action currently running, or the id of a step. GitHub removes special characters, and uses the name __run when the current step runs a script without an id. If you use the same action more than once in the same job, the name will include a suffix with the sequence number with underscore before it. For example, the first script you run will have the name __run, and the second script will be named __run_2. Similarly, the second invocation of actions/checkout will be actionscheckout2.
+ "action_path": "", // string, The path where an action is located. This property is only supported in composite actions. You can use this path to access files located in the same repository as the action.
+ "action_ref": "", // string, For a step executing an action, this is the ref of the action being executed. For example, v2.
+ "action_repository": "", // string, For a step executing an action, this is the owner and repository name of the action. For example, actions/checkout.
+ "action_status": "", // string, For a composite action, the current result of the composite action.
+ "actor": t.Job.Run.TriggerUser.Name, // string, The username of the user that triggered the initial workflow run. If the workflow run is a re-run, this value may differ from github.triggering_actor. Any workflow re-runs will use the privileges of github.actor, even if the actor initiating the re-run (github.triggering_actor) has different privileges.
+ "api_url": setting.AppURL + "api/v1", // string, The URL of the GitHub REST API.
+ "base_ref": baseRef, // string, The base_ref or target branch of the pull request in a workflow run. This property is only available when the event that triggers a workflow run is either pull_request or pull_request_target.
+ "env": "", // string, Path on the runner to the file that sets environment variables from workflow commands. This file is unique to the current step and is a different file for each step in a job. For more information, see "Workflow commands for GitHub Actions."
+ "event": event, // object, The full event webhook payload. You can access individual properties of the event using this context. This object is identical to the webhook payload of the event that triggered the workflow run, and is different for each event. The webhooks for each GitHub Actions event is linked in "Events that trigger workflows." For example, for a workflow run triggered by the push event, this object contains the contents of the push webhook payload.
+ "event_name": eventName, // string, The name of the event that triggered the workflow run.
+ "event_path": "", // string, The path to the file on the runner that contains the full event webhook payload.
+ "graphql_url": "", // string, The URL of the GitHub GraphQL API.
+ "head_ref": headRef, // string, The head_ref or source branch of the pull request in a workflow run. This property is only available when the event that triggers a workflow run is either pull_request or pull_request_target.
+ "job": fmt.Sprint(t.JobID), // string, The job_id of the current job.
+ "ref": ref, // string, The fully-formed ref of the branch or tag that triggered the workflow run. For workflows triggered by push, this is the branch or tag ref that was pushed. For workflows triggered by pull_request, this is the pull request merge branch. For workflows triggered by release, this is the release tag created. For other triggers, this is the branch or tag ref that triggered the workflow run. This is only set if a branch or tag is available for the event type. The ref given is fully-formed, meaning that for branches the format is refs/heads/<branch_name>, for pull requests it is refs/pull/<pr_number>/merge, and for tags it is refs/tags/<tag_name>. For example, refs/heads/feature-branch-1.
+ "ref_name": refName.ShortName(), // string, The short ref name of the branch or tag that triggered the workflow run. This value matches the branch or tag name shown on GitHub. For example, feature-branch-1.
+ "ref_protected": false, // boolean, true if branch protections are configured for the ref that triggered the workflow run.
+ "ref_type": refName.RefType(), // string, The type of ref that triggered the workflow run. Valid values are branch or tag.
+ "path": "", // string, Path on the runner to the file that sets system PATH variables from workflow commands. This file is unique to the current step and is a different file for each step in a job. For more information, see "Workflow commands for GitHub Actions."
+ "repository": t.Job.Run.Repo.OwnerName + "/" + t.Job.Run.Repo.Name, // string, The owner and repository name. For example, Codertocat/Hello-World.
+ "repository_owner": t.Job.Run.Repo.OwnerName, // string, The repository owner's name. For example, Codertocat.
+ "repositoryUrl": t.Job.Run.Repo.HTMLURL(), // string, The Git URL to the repository. For example, git://github.com/codertocat/hello-world.git.
+ "retention_days": "", // string, The number of days that workflow run logs and artifacts are kept.
+ "run_id": fmt.Sprint(t.Job.RunID), // string, A unique number for each workflow run within a repository. This number does not change if you re-run the workflow run.
+ "run_number": fmt.Sprint(t.Job.Run.Index), // string, A unique number for each run of a particular workflow in a repository. This number begins at 1 for the workflow's first run, and increments with each new run. This number does not change if you re-run the workflow run.
+ "run_attempt": fmt.Sprint(t.Job.Attempt), // string, A unique number for each attempt of a particular workflow run in a repository. This number begins at 1 for the workflow run's first attempt, and increments with each re-run.
+ "secret_source": "Actions", // string, The source of a secret used in a workflow. Possible values are None, Actions, Dependabot, or Codespaces.
+ "server_url": setting.AppURL, // string, The URL of the GitHub server. For example: https://github.com.
+ "sha": sha, // string, The commit SHA that triggered the workflow. The value of this commit SHA depends on the event that triggered the workflow. For more information, see "Events that trigger workflows." For example, ffac537e6cbbf934b08745a378932722df287a53.
+ "token": t.Token, // string, A token to authenticate on behalf of the GitHub App installed on your repository. This is functionally equivalent to the GITHUB_TOKEN secret. For more information, see "Automatic token authentication."
+ "triggering_actor": "", // string, The username of the user that initiated the workflow run. If the workflow run is a re-run, this value may differ from github.actor. Any workflow re-runs will use the privileges of github.actor, even if the actor initiating the re-run (github.triggering_actor) has different privileges.
+ "workflow": t.Job.Run.WorkflowID, // string, The name of the workflow. If the workflow file doesn't specify a name, the value of this property is the full path of the workflow file in the repository.
+ "workspace": "", // string, The default working directory on the runner for steps, and the default location of your repository when using the checkout action.
+
+ // additional contexts
+ "gitea_default_actions_url": setting.Actions.DefaultActionsURL.URL(),
+ "gitea_runtime_token": giteaRuntimeToken,
+ })
+ if err != nil {
+ log.Error("structpb.NewStruct failed: %v", err)
+ }
+
+ return taskContext
+}
+
+func findTaskNeeds(ctx context.Context, task *actions_model.ActionTask) (map[string]*runnerv1.TaskNeed, error) {
+ if err := task.LoadAttributes(ctx); err != nil {
+ return nil, fmt.Errorf("LoadAttributes: %w", err)
+ }
+ if len(task.Job.Needs) == 0 {
+ return nil, nil
+ }
+ needs := container.SetOf(task.Job.Needs...)
+
+ jobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: task.Job.RunID})
+ if err != nil {
+ return nil, fmt.Errorf("FindRunJobs: %w", err)
+ }
+
+ ret := make(map[string]*runnerv1.TaskNeed, len(needs))
+ for _, job := range jobs {
+ if !needs.Contains(job.JobID) {
+ continue
+ }
+ if job.TaskID == 0 || !job.Status.IsDone() {
+ // it shouldn't happen, or the job has been rerun
+ continue
+ }
+ outputs := make(map[string]string)
+ got, err := actions_model.FindTaskOutputByTaskID(ctx, job.TaskID)
+ if err != nil {
+ return nil, fmt.Errorf("FindTaskOutputByTaskID: %w", err)
+ }
+ for _, v := range got {
+ outputs[v.OutputKey] = v.OutputValue
+ }
+ ret[job.JobID] = &runnerv1.TaskNeed{
+ Outputs: outputs,
+ Result: runnerv1.Result(job.Status),
+ }
+ }
+
+ return ret, nil
+}
diff --git a/routers/api/forgejo/v1/api.go b/routers/api/forgejo/v1/api.go
new file mode 100644
index 0000000..88c7502
--- /dev/null
+++ b/routers/api/forgejo/v1/api.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package v1
+
+import (
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/shared"
+)
+
+func Routes() *web.Route {
+ m := web.NewRoute()
+
+ m.Use(shared.Middlewares()...)
+
+ forgejo := NewForgejo()
+ m.Get("", Root)
+ m.Get("/version", forgejo.GetVersion)
+ return m
+}
diff --git a/routers/api/forgejo/v1/forgejo.go b/routers/api/forgejo/v1/forgejo.go
new file mode 100644
index 0000000..0f1f4f1
--- /dev/null
+++ b/routers/api/forgejo/v1/forgejo.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: MIT
+
+package v1
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+type Forgejo struct{}
+
+var _ ServerInterface = &Forgejo{}
+
+func NewForgejo() *Forgejo {
+ return &Forgejo{}
+}
+
+func (f *Forgejo) GetVersion(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ _ = json.NewEncoder(w).Encode(Version{&setting.ForgejoVersion})
+}
diff --git a/routers/api/forgejo/v1/generated.go b/routers/api/forgejo/v1/generated.go
new file mode 100644
index 0000000..725ddf5
--- /dev/null
+++ b/routers/api/forgejo/v1/generated.go
@@ -0,0 +1,167 @@
+// Package v1 provides primitives to interact with the openapi HTTP API.
+//
+// Code generated by github.com/deepmap/oapi-codegen version v1.12.4 DO NOT EDIT.
+package v1
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/go-chi/chi/v5"
+)
+
+// Version defines model for Version.
+type Version struct {
+ Version *string `json:"version,omitempty"`
+}
+
+// ServerInterface represents all server handlers.
+type ServerInterface interface {
+ // API version
+ // (GET /version)
+ GetVersion(w http.ResponseWriter, r *http.Request)
+}
+
+// ServerInterfaceWrapper converts contexts to parameters.
+type ServerInterfaceWrapper struct {
+ Handler ServerInterface
+ HandlerMiddlewares []MiddlewareFunc
+ ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error)
+}
+
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// GetVersion operation middleware
+func (siw *ServerInterfaceWrapper) GetVersion(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ siw.Handler.GetVersion(w, r)
+ })
+
+ for _, middleware := range siw.HandlerMiddlewares {
+ handler = middleware(handler)
+ }
+
+ handler.ServeHTTP(w, r.WithContext(ctx))
+}
+
+type UnescapedCookieParamError struct {
+ ParamName string
+ Err error
+}
+
+func (e *UnescapedCookieParamError) Error() string {
+ return fmt.Sprintf("error unescaping cookie parameter '%s'", e.ParamName)
+}
+
+func (e *UnescapedCookieParamError) Unwrap() error {
+ return e.Err
+}
+
+type UnmarshallingParamError struct {
+ ParamName string
+ Err error
+}
+
+func (e *UnmarshallingParamError) Error() string {
+ return fmt.Sprintf("Error unmarshalling parameter %s as JSON: %s", e.ParamName, e.Err.Error())
+}
+
+func (e *UnmarshallingParamError) Unwrap() error {
+ return e.Err
+}
+
+type RequiredParamError struct {
+ ParamName string
+}
+
+func (e *RequiredParamError) Error() string {
+ return fmt.Sprintf("Query argument %s is required, but not found", e.ParamName)
+}
+
+type RequiredHeaderError struct {
+ ParamName string
+ Err error
+}
+
+func (e *RequiredHeaderError) Error() string {
+ return fmt.Sprintf("Header parameter %s is required, but not found", e.ParamName)
+}
+
+func (e *RequiredHeaderError) Unwrap() error {
+ return e.Err
+}
+
+type InvalidParamFormatError struct {
+ ParamName string
+ Err error
+}
+
+func (e *InvalidParamFormatError) Error() string {
+ return fmt.Sprintf("Invalid format for parameter %s: %s", e.ParamName, e.Err.Error())
+}
+
+func (e *InvalidParamFormatError) Unwrap() error {
+ return e.Err
+}
+
+type TooManyValuesForParamError struct {
+ ParamName string
+ Count int
+}
+
+func (e *TooManyValuesForParamError) Error() string {
+ return fmt.Sprintf("Expected one value for %s, got %d", e.ParamName, e.Count)
+}
+
+// Handler creates http.Handler with routing matching OpenAPI spec.
+func Handler(si ServerInterface) http.Handler {
+ return HandlerWithOptions(si, ChiServerOptions{})
+}
+
+type ChiServerOptions struct {
+ BaseURL string
+ BaseRouter chi.Router
+ Middlewares []MiddlewareFunc
+ ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error)
+}
+
+// HandlerFromMux creates http.Handler with routing matching OpenAPI spec based on the provided mux.
+func HandlerFromMux(si ServerInterface, r chi.Router) http.Handler {
+ return HandlerWithOptions(si, ChiServerOptions{
+ BaseRouter: r,
+ })
+}
+
+func HandlerFromMuxWithBaseURL(si ServerInterface, r chi.Router, baseURL string) http.Handler {
+ return HandlerWithOptions(si, ChiServerOptions{
+ BaseURL: baseURL,
+ BaseRouter: r,
+ })
+}
+
+// HandlerWithOptions creates http.Handler with additional options
+func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handler {
+ r := options.BaseRouter
+
+ if r == nil {
+ r = chi.NewRouter()
+ }
+ if options.ErrorHandlerFunc == nil {
+ options.ErrorHandlerFunc = func(w http.ResponseWriter, r *http.Request, err error) {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ }
+ }
+ wrapper := ServerInterfaceWrapper{
+ Handler: si,
+ HandlerMiddlewares: options.Middlewares,
+ ErrorHandlerFunc: options.ErrorHandlerFunc,
+ }
+
+ r.Group(func(r chi.Router) {
+ r.Get(options.BaseURL+"/version", wrapper.GetVersion)
+ })
+
+ return r
+}
diff --git a/routers/api/forgejo/v1/root.go b/routers/api/forgejo/v1/root.go
new file mode 100644
index 0000000..b976c51
--- /dev/null
+++ b/routers/api/forgejo/v1/root.go
@@ -0,0 +1,14 @@
+// Copyright The Forgejo Authors.
+// SPDX-License-Identifier: MIT
+
+package v1
+
+import (
+ "net/http"
+)
+
+func Root(w http.ResponseWriter, r *http.Request) {
+ // https://www.rfc-editor.org/rfc/rfc8631
+ w.Header().Set("Link", "</assets/forgejo/api.v1.yml>; rel=\"service-desc\"")
+ w.WriteHeader(http.StatusNoContent)
+}
diff --git a/routers/api/packages/README.md b/routers/api/packages/README.md
new file mode 100644
index 0000000..74d1492
--- /dev/null
+++ b/routers/api/packages/README.md
@@ -0,0 +1,50 @@
+# Gitea Package Registry
+
+This document gives a brief overview how the package registry is organized in code.
+
+## Structure
+
+The package registry code is divided into multiple modules to split the functionality and make code reuse possible.
+
+| Module | Description |
+| - | - |
+| `models/packages` | Common methods and models used by all registry types |
+| `models/packages/<type>` | Methods used by specific registry type. There should be no need to use type specific models. |
+| `modules/packages` | Common methods and types used by multiple registry types |
+| `modules/packages/<type>` | Registry type specific methods and types (e.g. metadata extraction of package files) |
+| `routers/api/packages` | Route definitions for all registry types |
+| `routers/api/packages/<type>` | Route implementation for a specific registry type |
+| `services/packages` | Helper methods used by registry types to handle common tasks like package creation and deletion in `routers` |
+| `services/packages/<type>` | Registry type specific methods used by `routers` and `services` |
+
+## Models
+
+Every package registry implementation uses the same underlying models:
+
+| Model | Description |
+| - | - |
+| `Package` | The root of a package providing values fixed for every version (e.g. the package name) |
+| `PackageVersion` | A version of a package containing metadata (e.g. the package description) |
+| `PackageFile` | A file of a package describing its content (e.g. file name) |
+| `PackageBlob` | The content of a file (may be shared by multiple files) |
+| `PackageProperty` | Additional properties attached to `Package`, `PackageVersion` or `PackageFile` (e.g. used if metadata is needed for routing) |
+
+The following diagram shows the relationship between the models:
+```
+Package <1---*> PackageVersion <1---*> PackageFile <*---1> PackageBlob
+```
+
+## Adding a new package registry type
+
+Before adding a new package registry type have a look at the existing implementation to get an impression of how it could work.
+Most registry types offer endpoints to retrieve the metadata, upload and download package files.
+The upload endpoint is often the heavy part because it must validate the uploaded blob, extract metadata and create the models.
+The methods to validate and extract the metadata should be added in the `modules/packages/<type>` package.
+If the upload is valid the methods in `services/packages` allow to store the upload and create the corresponding models.
+It depends if the registry type allows multiple files per package version which method should be called:
+- `CreatePackageAndAddFile`: error if package version already exists
+- `CreatePackageOrAddFileToExisting`: error if file already exists
+- `AddFileToExistingPackage`: error if package version does not exist or file already exists
+
+`services/packages` also contains helper methods to download a file or to remove a package version.
+There are no helper methods for metadata endpoints because they are very type specific.
diff --git a/routers/api/packages/alpine/alpine.go b/routers/api/packages/alpine/alpine.go
new file mode 100644
index 0000000..831a910
--- /dev/null
+++ b/routers/api/packages/alpine/alpine.go
@@ -0,0 +1,287 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package alpine
+
+import (
+ "crypto/x509"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ alpine_model "code.gitea.io/gitea/models/packages/alpine"
+ "code.gitea.io/gitea/modules/json"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ alpine_module "code.gitea.io/gitea/modules/packages/alpine"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+ alpine_service "code.gitea.io/gitea/services/packages/alpine"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+func createOrAddToExisting(ctx *context.Context, pck *alpine_module.Package, branch, repository, architecture string, buf packages_module.HashedSizeReader, fileMetadataRaw []byte) {
+ _, _, err := packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeAlpine,
+ Name: pck.Name,
+ Version: pck.Version,
+ },
+ Creator: ctx.Doer,
+ Metadata: pck.VersionMetadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s-%s.apk", pck.Name, pck.Version),
+ CompositeKey: fmt.Sprintf("%s|%s|%s", branch, repository, architecture),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ Properties: map[string]string{
+ alpine_module.PropertyBranch: branch,
+ alpine_module.PropertyRepository: repository,
+ alpine_module.PropertyArchitecture: architecture,
+ alpine_module.PropertyMetadata: string(fileMetadataRaw),
+ },
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion, packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if err := alpine_service.BuildSpecificRepositoryFiles(ctx, ctx.Package.Owner.ID, branch, repository, pck.FileMetadata.Architecture); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+}
+
+func GetRepositoryKey(ctx *context.Context) {
+ _, pub, err := alpine_service.GetOrCreateKeyPair(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pubPem, _ := pem.Decode([]byte(pub))
+ if pubPem == nil {
+ apiError(ctx, http.StatusInternalServerError, "failed to decode private key pem")
+ return
+ }
+
+ pubKey, err := x509.ParsePKIXPublicKey(pubPem.Bytes)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ fingerprint, err := util.CreatePublicKeyFingerprint(pubKey)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.ServeContent(strings.NewReader(pub), &context.ServeHeaderOptions{
+ ContentType: "application/x-pem-file",
+ Filename: fmt.Sprintf("%s@%s.rsa.pub", ctx.Package.Owner.LowerName, hex.EncodeToString(fingerprint)),
+ })
+}
+
+func GetRepositoryFile(ctx *context.Context) {
+ pv, err := alpine_service.GetOrCreateRepositoryVersion(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageVersion(
+ ctx,
+ pv,
+ &packages_service.PackageFileInfo{
+ Filename: alpine_service.IndexArchiveFilename,
+ CompositeKey: fmt.Sprintf("%s|%s|%s", ctx.Params("branch"), ctx.Params("repository"), ctx.Params("architecture")),
+ },
+ )
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+func UploadPackageFile(ctx *context.Context) {
+ branch := strings.TrimSpace(ctx.Params("branch"))
+ repository := strings.TrimSpace(ctx.Params("repository"))
+ if branch == "" || repository == "" {
+ apiError(ctx, http.StatusBadRequest, "invalid branch or repository")
+ return
+ }
+
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pck, err := alpine_module.ParsePackage(buf)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) || err == io.EOF {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ fileMetadataRaw, err := json.Marshal(pck.FileMetadata)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ // Check whether the package being uploaded has no architecture defined.
+ // If true, loop through the available architectures in the repo and create
+ // the package file for the each architecture. If there are no architectures
+ // available on the repository, fallback to x86_64
+ if pck.FileMetadata.Architecture == "noarch" {
+ architectures, err := alpine_model.GetArchitectures(ctx, ctx.Package.Owner.ID, repository)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(architectures) == 0 {
+ architectures = []string{
+ "x86_64",
+ }
+ }
+
+ for _, arch := range architectures {
+ pck.FileMetadata.Architecture = arch
+
+ fileMetadataRaw, err := json.Marshal(pck.FileMetadata)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ createOrAddToExisting(ctx, pck, branch, repository, pck.FileMetadata.Architecture, buf, fileMetadataRaw)
+ }
+ } else {
+ createOrAddToExisting(ctx, pck, branch, repository, pck.FileMetadata.Architecture, buf, fileMetadataRaw)
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func DownloadPackageFile(ctx *context.Context) {
+ branch := ctx.Params("branch")
+ repository := ctx.Params("repository")
+ architecture := ctx.Params("architecture")
+
+ opts := &packages_model.PackageFileSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ PackageType: packages_model.TypeAlpine,
+ Query: ctx.Params("filename"),
+ CompositeKey: fmt.Sprintf("%s|%s|%s", branch, repository, architecture),
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, opts)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pfs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetPackageFileStream(ctx, pfs[0])
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+func DeletePackageFile(ctx *context.Context) {
+ branch, repository, architecture := ctx.Params("branch"), ctx.Params("repository"), ctx.Params("architecture")
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ PackageType: packages_model.TypeAlpine,
+ Query: ctx.Params("filename"),
+ CompositeKey: fmt.Sprintf("%s|%s|%s", branch, repository, architecture),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pfs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ if err := packages_service.RemovePackageFileAndVersionIfUnreferenced(ctx, ctx.Doer, pfs[0]); err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if err := alpine_service.BuildSpecificRepositoryFiles(ctx, ctx.Package.Owner.ID, branch, repository, architecture); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/packages/api.go b/routers/api/packages/api.go
new file mode 100644
index 0000000..1337ce4
--- /dev/null
+++ b/routers/api/packages/api.go
@@ -0,0 +1,916 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "net/http"
+ "regexp"
+ "strings"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/perm"
+ quota_model "code.gitea.io/gitea/models/quota"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/packages/alpine"
+ "code.gitea.io/gitea/routers/api/packages/arch"
+ "code.gitea.io/gitea/routers/api/packages/cargo"
+ "code.gitea.io/gitea/routers/api/packages/chef"
+ "code.gitea.io/gitea/routers/api/packages/composer"
+ "code.gitea.io/gitea/routers/api/packages/conan"
+ "code.gitea.io/gitea/routers/api/packages/conda"
+ "code.gitea.io/gitea/routers/api/packages/container"
+ "code.gitea.io/gitea/routers/api/packages/cran"
+ "code.gitea.io/gitea/routers/api/packages/debian"
+ "code.gitea.io/gitea/routers/api/packages/generic"
+ "code.gitea.io/gitea/routers/api/packages/goproxy"
+ "code.gitea.io/gitea/routers/api/packages/helm"
+ "code.gitea.io/gitea/routers/api/packages/maven"
+ "code.gitea.io/gitea/routers/api/packages/npm"
+ "code.gitea.io/gitea/routers/api/packages/nuget"
+ "code.gitea.io/gitea/routers/api/packages/pub"
+ "code.gitea.io/gitea/routers/api/packages/pypi"
+ "code.gitea.io/gitea/routers/api/packages/rpm"
+ "code.gitea.io/gitea/routers/api/packages/rubygems"
+ "code.gitea.io/gitea/routers/api/packages/swift"
+ "code.gitea.io/gitea/routers/api/packages/vagrant"
+ "code.gitea.io/gitea/services/auth"
+ "code.gitea.io/gitea/services/context"
+)
+
+func reqPackageAccess(accessMode perm.AccessMode) func(ctx *context.Context) {
+ return func(ctx *context.Context) {
+ if ctx.Data["IsApiToken"] == true {
+ scope, ok := ctx.Data["ApiTokenScope"].(auth_model.AccessTokenScope)
+ if ok { // it's a personal access token but not oauth2 token
+ scopeMatched := false
+ var err error
+ if accessMode == perm.AccessModeRead {
+ scopeMatched, err = scope.HasScope(auth_model.AccessTokenScopeReadPackage)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "HasScope", err.Error())
+ return
+ }
+ } else if accessMode == perm.AccessModeWrite {
+ scopeMatched, err = scope.HasScope(auth_model.AccessTokenScopeWritePackage)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "HasScope", err.Error())
+ return
+ }
+ }
+ if !scopeMatched {
+ ctx.Resp.Header().Set("WWW-Authenticate", `Basic realm="Gitea Package API"`)
+ ctx.Error(http.StatusUnauthorized, "reqPackageAccess", "user should have specific permission or be a site admin")
+ return
+ }
+
+ // check if scope only applies to public resources
+ publicOnly, err := scope.PublicOnly()
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "tokenRequiresScope", "parsing public resource scope failed: "+err.Error())
+ return
+ }
+
+ if publicOnly {
+ if ctx.Package != nil && ctx.Package.Owner.Visibility.IsPrivate() {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public packages")
+ return
+ }
+ }
+ }
+ }
+
+ if ctx.Package.AccessMode < accessMode && !ctx.IsUserSiteAdmin() {
+ ctx.Resp.Header().Set("WWW-Authenticate", `Basic realm="Gitea Package API"`)
+ ctx.Error(http.StatusUnauthorized, "reqPackageAccess", "user should have specific permission or be a site admin")
+ return
+ }
+ }
+}
+
+func enforcePackagesQuota() func(ctx *context.Context) {
+ return func(ctx *context.Context) {
+ ok, err := quota_model.EvaluateForUser(ctx, ctx.Doer.ID, quota_model.LimitSubjectSizeAssetsPackagesAll)
+ if err != nil {
+ log.Error("quota_model.EvaluateForUser: %v", err)
+ ctx.Error(http.StatusInternalServerError, "Error checking quota")
+ return
+ }
+ if !ok {
+ ctx.Error(http.StatusRequestEntityTooLarge, "enforcePackagesQuota", "quota exceeded")
+ return
+ }
+ }
+}
+
+func verifyAuth(r *web.Route, authMethods []auth.Method) {
+ if setting.Service.EnableReverseProxyAuth {
+ authMethods = append(authMethods, &auth.ReverseProxy{})
+ }
+ authGroup := auth.NewGroup(authMethods...)
+
+ r.Use(func(ctx *context.Context) {
+ var err error
+ ctx.Doer, err = authGroup.Verify(ctx.Req, ctx.Resp, ctx, ctx.Session)
+ if err != nil {
+ log.Error("Failed to verify user: %v", err)
+ ctx.Error(http.StatusUnauthorized, "authGroup.Verify")
+ return
+ }
+ ctx.IsSigned = ctx.Doer != nil
+ })
+}
+
+// CommonRoutes provide endpoints for most package managers (except containers - see below)
+// These are mounted on `/api/packages` (not `/api/v1/packages`)
+func CommonRoutes() *web.Route {
+ r := web.NewRoute()
+
+ r.Use(context.PackageContexter())
+
+ verifyAuth(r, []auth.Method{
+ &auth.OAuth2{},
+ &auth.Basic{},
+ &nuget.Auth{},
+ &conan.Auth{},
+ &chef.Auth{},
+ })
+
+ r.Group("/{username}", func() {
+ r.Group("/alpine", func() {
+ r.Get("/key", alpine.GetRepositoryKey)
+ r.Group("/{branch}/{repository}", func() {
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), alpine.UploadPackageFile)
+ r.Group("/{architecture}", func() {
+ r.Get("/APKINDEX.tar.gz", alpine.GetRepositoryFile)
+ r.Group("/{filename}", func() {
+ r.Get("", alpine.DownloadPackageFile)
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), alpine.DeletePackageFile)
+ })
+ })
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/arch", func() {
+ r.Group("/repository.key", func() {
+ r.Head("", arch.GetRepositoryKey)
+ r.Get("", arch.GetRepositoryKey)
+ })
+
+ r.Methods("HEAD,GET,PUT,DELETE", "*", func(ctx *context.Context) {
+ pathGroups := strings.Split(strings.Trim(ctx.Params("*"), "/"), "/")
+ groupLen := len(pathGroups)
+ isGetHead := ctx.Req.Method == "HEAD" || ctx.Req.Method == "GET"
+ isPut := ctx.Req.Method == "PUT"
+ isDelete := ctx.Req.Method == "DELETE"
+ if isGetHead {
+ if groupLen < 2 {
+ ctx.Status(http.StatusNotFound)
+ return
+ }
+ if groupLen == 2 {
+ ctx.SetParams("group", "")
+ ctx.SetParams("arch", pathGroups[0])
+ ctx.SetParams("file", pathGroups[1])
+ } else {
+ ctx.SetParams("group", strings.Join(pathGroups[:groupLen-2], "/"))
+ ctx.SetParams("arch", pathGroups[groupLen-2])
+ ctx.SetParams("file", pathGroups[groupLen-1])
+ }
+ arch.GetPackageOrDB(ctx)
+ return
+ } else if isPut {
+ ctx.SetParams("group", strings.Join(pathGroups, "/"))
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+ arch.PushPackage(ctx)
+ return
+ } else if isDelete {
+ if groupLen < 3 {
+ ctx.Status(http.StatusBadRequest)
+ return
+ }
+ if groupLen == 3 {
+ ctx.SetParams("group", "")
+ ctx.SetParams("package", pathGroups[0])
+ ctx.SetParams("version", pathGroups[1])
+ ctx.SetParams("arch", pathGroups[2])
+ } else {
+ ctx.SetParams("group", strings.Join(pathGroups[:groupLen-3], "/"))
+ ctx.SetParams("package", pathGroups[groupLen-3])
+ ctx.SetParams("version", pathGroups[groupLen-2])
+ ctx.SetParams("arch", pathGroups[groupLen-1])
+ }
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+ arch.RemovePackage(ctx)
+ return
+ }
+ ctx.Status(http.StatusNotFound)
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/cargo", func() {
+ r.Group("/api/v1/crates", func() {
+ r.Get("", cargo.SearchPackages)
+ r.Put("/new", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), cargo.UploadPackage)
+ r.Group("/{package}", func() {
+ r.Group("/{version}", func() {
+ r.Get("/download", cargo.DownloadPackageFile)
+ r.Delete("/yank", reqPackageAccess(perm.AccessModeWrite), cargo.YankPackage)
+ r.Put("/unyank", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), cargo.UnyankPackage)
+ })
+ r.Get("/owners", cargo.ListOwners)
+ })
+ })
+ r.Get("/config.json", cargo.RepositoryConfig)
+ r.Get("/1/{package}", cargo.EnumeratePackageVersions)
+ r.Get("/2/{package}", cargo.EnumeratePackageVersions)
+ // Use dummy placeholders because these parts are not of interest
+ r.Get("/3/{_}/{package}", cargo.EnumeratePackageVersions)
+ r.Get("/{_}/{__}/{package}", cargo.EnumeratePackageVersions)
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/chef", func() {
+ r.Group("/api/v1", func() {
+ r.Get("/universe", chef.PackagesUniverse)
+ r.Get("/search", chef.EnumeratePackages)
+ r.Group("/cookbooks", func() {
+ r.Get("", chef.EnumeratePackages)
+ r.Post("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), chef.UploadPackage)
+ r.Group("/{name}", func() {
+ r.Get("", chef.PackageMetadata)
+ r.Group("/versions/{version}", func() {
+ r.Get("", chef.PackageVersionMetadata)
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), chef.DeletePackageVersion)
+ r.Get("/download", chef.DownloadPackage)
+ })
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), chef.DeletePackage)
+ })
+ })
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/composer", func() {
+ r.Get("/packages.json", composer.ServiceIndex)
+ r.Get("/search.json", composer.SearchPackages)
+ r.Get("/list.json", composer.EnumeratePackages)
+ r.Get("/p2/{vendorname}/{projectname}~dev.json", composer.PackageMetadata)
+ r.Get("/p2/{vendorname}/{projectname}.json", composer.PackageMetadata)
+ r.Get("/files/{package}/{version}/{filename}", composer.DownloadPackageFile)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), composer.UploadPackage)
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/conan", func() {
+ r.Group("/v1", func() {
+ r.Get("/ping", conan.Ping)
+ r.Group("/users", func() {
+ r.Get("/authenticate", conan.Authenticate)
+ r.Get("/check_credentials", conan.CheckCredentials)
+ })
+ r.Group("/conans", func() {
+ r.Get("/search", conan.SearchRecipes)
+ r.Group("/{name}/{version}/{user}/{channel}", func() {
+ r.Get("", conan.RecipeSnapshot)
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), conan.DeleteRecipeV1)
+ r.Get("/search", conan.SearchPackagesV1)
+ r.Get("/digest", conan.RecipeDownloadURLs)
+ r.Post("/upload_urls", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), conan.RecipeUploadURLs)
+ r.Get("/download_urls", conan.RecipeDownloadURLs)
+ r.Group("/packages", func() {
+ r.Post("/delete", reqPackageAccess(perm.AccessModeWrite), conan.DeletePackageV1)
+ r.Group("/{package_reference}", func() {
+ r.Get("", conan.PackageSnapshot)
+ r.Get("/digest", conan.PackageDownloadURLs)
+ r.Post("/upload_urls", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), conan.PackageUploadURLs)
+ r.Get("/download_urls", conan.PackageDownloadURLs)
+ })
+ })
+ }, conan.ExtractPathParameters)
+ })
+ r.Group("/files/{name}/{version}/{user}/{channel}/{recipe_revision}", func() {
+ r.Group("/recipe/{filename}", func() {
+ r.Get("", conan.DownloadRecipeFile)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), conan.UploadRecipeFile)
+ })
+ r.Group("/package/{package_reference}/{package_revision}/{filename}", func() {
+ r.Get("", conan.DownloadPackageFile)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), conan.UploadPackageFile)
+ })
+ }, conan.ExtractPathParameters)
+ })
+ r.Group("/v2", func() {
+ r.Get("/ping", conan.Ping)
+ r.Group("/users", func() {
+ r.Get("/authenticate", conan.Authenticate)
+ r.Get("/check_credentials", conan.CheckCredentials)
+ })
+ r.Group("/conans", func() {
+ r.Get("/search", conan.SearchRecipes)
+ r.Group("/{name}/{version}/{user}/{channel}", func() {
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), conan.DeleteRecipeV2)
+ r.Get("/search", conan.SearchPackagesV2)
+ r.Get("/latest", conan.LatestRecipeRevision)
+ r.Group("/revisions", func() {
+ r.Get("", conan.ListRecipeRevisions)
+ r.Group("/{recipe_revision}", func() {
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), conan.DeleteRecipeV2)
+ r.Get("/search", conan.SearchPackagesV2)
+ r.Group("/files", func() {
+ r.Get("", conan.ListRecipeRevisionFiles)
+ r.Group("/{filename}", func() {
+ r.Get("", conan.DownloadRecipeFile)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), conan.UploadRecipeFile)
+ })
+ })
+ r.Group("/packages", func() {
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), conan.DeletePackageV2)
+ r.Group("/{package_reference}", func() {
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), conan.DeletePackageV2)
+ r.Get("/latest", conan.LatestPackageRevision)
+ r.Group("/revisions", func() {
+ r.Get("", conan.ListPackageRevisions)
+ r.Group("/{package_revision}", func() {
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), conan.DeletePackageV2)
+ r.Group("/files", func() {
+ r.Get("", conan.ListPackageRevisionFiles)
+ r.Group("/{filename}", func() {
+ r.Get("", conan.DownloadPackageFile)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), conan.UploadPackageFile)
+ })
+ })
+ })
+ })
+ })
+ })
+ })
+ })
+ }, conan.ExtractPathParameters)
+ })
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/conda", func() {
+ var (
+ downloadPattern = regexp.MustCompile(`\A(.+/)?(.+)/((?:[^/]+(?:\.tar\.bz2|\.conda))|(?:current_)?repodata\.json(?:\.bz2)?)\z`)
+ uploadPattern = regexp.MustCompile(`\A(.+/)?([^/]+(?:\.tar\.bz2|\.conda))\z`)
+ )
+
+ r.Get("/*", func(ctx *context.Context) {
+ m := downloadPattern.FindStringSubmatch(ctx.Params("*"))
+ if len(m) == 0 {
+ ctx.Status(http.StatusNotFound)
+ return
+ }
+
+ ctx.SetParams("channel", strings.TrimSuffix(m[1], "/"))
+ ctx.SetParams("architecture", m[2])
+ ctx.SetParams("filename", m[3])
+
+ switch m[3] {
+ case "repodata.json", "repodata.json.bz2", "current_repodata.json", "current_repodata.json.bz2":
+ conda.EnumeratePackages(ctx)
+ default:
+ conda.DownloadPackageFile(ctx)
+ }
+ })
+ r.Put("/*", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), func(ctx *context.Context) {
+ m := uploadPattern.FindStringSubmatch(ctx.Params("*"))
+ if len(m) == 0 {
+ ctx.Status(http.StatusNotFound)
+ return
+ }
+
+ ctx.SetParams("channel", strings.TrimSuffix(m[1], "/"))
+ ctx.SetParams("filename", m[2])
+
+ conda.UploadPackageFile(ctx)
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/cran", func() {
+ r.Group("/src", func() {
+ r.Group("/contrib", func() {
+ r.Get("/PACKAGES", cran.EnumerateSourcePackages)
+ r.Get("/PACKAGES{format}", cran.EnumerateSourcePackages)
+ r.Get("/{filename}", cran.DownloadSourcePackageFile)
+ })
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), cran.UploadSourcePackageFile)
+ })
+ r.Group("/bin", func() {
+ r.Group("/{platform}/contrib/{rversion}", func() {
+ r.Get("/PACKAGES", cran.EnumerateBinaryPackages)
+ r.Get("/PACKAGES{format}", cran.EnumerateBinaryPackages)
+ r.Get("/{filename}", cran.DownloadBinaryPackageFile)
+ })
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), cran.UploadBinaryPackageFile)
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/debian", func() {
+ r.Get("/repository.key", debian.GetRepositoryKey)
+ r.Group("/dists/{distribution}", func() {
+ r.Get("/{filename}", debian.GetRepositoryFile)
+ r.Get("/by-hash/{algorithm}/{hash}", debian.GetRepositoryFileByHash)
+ r.Group("/{component}/{architecture}", func() {
+ r.Get("/{filename}", debian.GetRepositoryFile)
+ r.Get("/by-hash/{algorithm}/{hash}", debian.GetRepositoryFileByHash)
+ })
+ })
+ r.Group("/pool/{distribution}/{component}", func() {
+ r.Get("/{name}_{version}_{architecture}.deb", debian.DownloadPackageFile)
+ r.Group("", func() {
+ r.Put("/upload", enforcePackagesQuota(), debian.UploadPackageFile)
+ r.Delete("/{name}/{version}/{architecture}", debian.DeletePackageFile)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/go", func() {
+ r.Put("/upload", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), goproxy.UploadPackage)
+ r.Get("/sumdb/sum.golang.org/supported", func(ctx *context.Context) {
+ ctx.Status(http.StatusNotFound)
+ })
+
+ // Manual mapping of routes because the package name contains slashes which chi does not support
+ // https://go.dev/ref/mod#goproxy-protocol
+ r.Get("/*", func(ctx *context.Context) {
+ path := ctx.Params("*")
+
+ if strings.HasSuffix(path, "/@latest") {
+ ctx.SetParams("name", path[:len(path)-len("/@latest")])
+ ctx.SetParams("version", "latest")
+
+ goproxy.PackageVersionMetadata(ctx)
+ return
+ }
+
+ parts := strings.SplitN(path, "/@v/", 2)
+ if len(parts) != 2 {
+ ctx.Status(http.StatusNotFound)
+ return
+ }
+
+ ctx.SetParams("name", parts[0])
+
+ // <package/name>/@v/list
+ if parts[1] == "list" {
+ goproxy.EnumeratePackageVersions(ctx)
+ return
+ }
+
+ // <package/name>/@v/<version>.zip
+ if strings.HasSuffix(parts[1], ".zip") {
+ ctx.SetParams("version", parts[1][:len(parts[1])-len(".zip")])
+
+ goproxy.DownloadPackageFile(ctx)
+ return
+ }
+ // <package/name>/@v/<version>.info
+ if strings.HasSuffix(parts[1], ".info") {
+ ctx.SetParams("version", parts[1][:len(parts[1])-len(".info")])
+
+ goproxy.PackageVersionMetadata(ctx)
+ return
+ }
+ // <package/name>/@v/<version>.mod
+ if strings.HasSuffix(parts[1], ".mod") {
+ ctx.SetParams("version", parts[1][:len(parts[1])-len(".mod")])
+
+ goproxy.PackageVersionGoModContent(ctx)
+ return
+ }
+
+ ctx.Status(http.StatusNotFound)
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/generic", func() {
+ r.Group("/{packagename}/{packageversion}", func() {
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), generic.DeletePackage)
+ r.Group("/{filename}", func() {
+ r.Get("", generic.DownloadPackageFile)
+ r.Group("", func() {
+ r.Put("", enforcePackagesQuota(), generic.UploadPackage)
+ r.Delete("", generic.DeletePackageFile)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ })
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/helm", func() {
+ r.Get("/index.yaml", helm.Index)
+ r.Get("/{filename}", helm.DownloadPackageFile)
+ r.Post("/api/charts", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), helm.UploadPackage)
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/maven", func() {
+ r.Put("/*", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), maven.UploadPackageFile)
+ r.Get("/*", maven.DownloadPackageFile)
+ r.Head("/*", maven.ProvidePackageFileHeader)
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/nuget", func() {
+ r.Group("", func() { // Needs to be unauthenticated for the NuGet client.
+ r.Get("/", nuget.ServiceIndexV2)
+ r.Get("/index.json", nuget.ServiceIndexV3)
+ r.Get("/$metadata", nuget.FeedCapabilityResource)
+ })
+ r.Group("", func() {
+ r.Get("/query", nuget.SearchServiceV3)
+ r.Group("/registration/{id}", func() {
+ r.Get("/index.json", nuget.RegistrationIndex)
+ r.Get("/{version}", nuget.RegistrationLeafV3)
+ })
+ r.Group("/package/{id}", func() {
+ r.Get("/index.json", nuget.EnumeratePackageVersionsV3)
+ r.Get("/{version}/{filename}", nuget.DownloadPackageFile)
+ })
+ r.Group("", func() {
+ r.Put("/", enforcePackagesQuota(), nuget.UploadPackage)
+ r.Put("/symbolpackage", enforcePackagesQuota(), nuget.UploadSymbolPackage)
+ r.Delete("/{id}/{version}", nuget.DeletePackage)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ r.Get("/symbols/{filename}/{guid:[0-9a-fA-F]{32}[fF]{8}}/{filename2}", nuget.DownloadSymbolFile)
+ r.Get("/Packages(Id='{id:[^']+}',Version='{version:[^']+}')", nuget.RegistrationLeafV2)
+ r.Group("/Packages()", func() {
+ r.Get("", nuget.SearchServiceV2)
+ r.Get("/$count", nuget.SearchServiceV2Count)
+ })
+ r.Group("/FindPackagesById()", func() {
+ r.Get("", nuget.EnumeratePackageVersionsV2)
+ r.Get("/$count", nuget.EnumeratePackageVersionsV2Count)
+ })
+ r.Group("/Search()", func() {
+ r.Get("", nuget.SearchServiceV2)
+ r.Get("/$count", nuget.SearchServiceV2Count)
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ })
+ r.Group("/npm", func() {
+ r.Group("/@{scope}/{id}", func() {
+ r.Get("", npm.PackageMetadata)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), npm.UploadPackage)
+ r.Group("/-/{version}/{filename}", func() {
+ r.Get("", npm.DownloadPackageFile)
+ r.Delete("/-rev/{revision}", reqPackageAccess(perm.AccessModeWrite), npm.DeletePackageVersion)
+ })
+ r.Get("/-/{filename}", npm.DownloadPackageFileByName)
+ r.Group("/-rev/{revision}", func() {
+ r.Delete("", npm.DeletePackage)
+ r.Put("", npm.DeletePreview)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ })
+ r.Group("/{id}", func() {
+ r.Get("", npm.PackageMetadata)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), npm.UploadPackage)
+ r.Group("/-/{version}/{filename}", func() {
+ r.Get("", npm.DownloadPackageFile)
+ r.Delete("/-rev/{revision}", reqPackageAccess(perm.AccessModeWrite), npm.DeletePackageVersion)
+ })
+ r.Get("/-/{filename}", npm.DownloadPackageFileByName)
+ r.Group("/-rev/{revision}", func() {
+ r.Delete("", npm.DeletePackage)
+ r.Put("", npm.DeletePreview)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ })
+ r.Group("/-/package/@{scope}/{id}/dist-tags", func() {
+ r.Get("", npm.ListPackageTags)
+ r.Group("/{tag}", func() {
+ r.Put("", npm.AddPackageTag)
+ r.Delete("", npm.DeletePackageTag)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ })
+ r.Group("/-/package/{id}/dist-tags", func() {
+ r.Get("", npm.ListPackageTags)
+ r.Group("/{tag}", func() {
+ r.Put("", npm.AddPackageTag)
+ r.Delete("", npm.DeletePackageTag)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ })
+ r.Group("/-/v1/search", func() {
+ r.Get("", npm.PackageSearch)
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/pub", func() {
+ r.Group("/api/packages", func() {
+ r.Group("/versions/new", func() {
+ r.Get("", pub.RequestUpload)
+ r.Post("/upload", enforcePackagesQuota(), pub.UploadPackageFile)
+ r.Get("/finalize/{id}/{version}", pub.FinalizePackage)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ r.Group("/{id}", func() {
+ r.Get("", pub.EnumeratePackageVersions)
+ r.Get("/files/{version}", pub.DownloadPackageFile)
+ r.Get("/{version}", pub.PackageVersionMetadata)
+ })
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/pypi", func() {
+ r.Post("/", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), pypi.UploadPackageFile)
+ r.Get("/files/{id}/{version}/{filename}", pypi.DownloadPackageFile)
+ r.Get("/simple/{id}", pypi.PackageMetadata)
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/rpm", func() {
+ r.Group("/repository.key", func() {
+ r.Head("", rpm.GetRepositoryKey)
+ r.Get("", rpm.GetRepositoryKey)
+ })
+
+ var (
+ repoPattern = regexp.MustCompile(`\A(.*?)\.repo\z`)
+ uploadPattern = regexp.MustCompile(`\A(.*?)/upload\z`)
+ filePattern = regexp.MustCompile(`\A(.*?)/package/([^/]+)/([^/]+)/([^/]+)(?:/([^/]+\.rpm)|)\z`)
+ repoFilePattern = regexp.MustCompile(`\A(.*?)/repodata/([^/]+)\z`)
+ )
+
+ r.Methods("HEAD,GET,PUT,DELETE", "*", func(ctx *context.Context) {
+ path := ctx.Params("*")
+ isHead := ctx.Req.Method == "HEAD"
+ isGetHead := ctx.Req.Method == "HEAD" || ctx.Req.Method == "GET"
+ isPut := ctx.Req.Method == "PUT"
+ isDelete := ctx.Req.Method == "DELETE"
+
+ m := repoPattern.FindStringSubmatch(path)
+ if len(m) == 2 && isGetHead {
+ ctx.SetParams("group", strings.Trim(m[1], "/"))
+ rpm.GetRepositoryConfig(ctx)
+ return
+ }
+
+ m = repoFilePattern.FindStringSubmatch(path)
+ if len(m) == 3 && isGetHead {
+ ctx.SetParams("group", strings.Trim(m[1], "/"))
+ ctx.SetParams("filename", m[2])
+ if isHead {
+ rpm.CheckRepositoryFileExistence(ctx)
+ } else {
+ rpm.GetRepositoryFile(ctx)
+ }
+ return
+ }
+
+ m = uploadPattern.FindStringSubmatch(path)
+ if len(m) == 2 && isPut {
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+ enforcePackagesQuota()(ctx)
+ if ctx.Written() {
+ return
+ }
+ ctx.SetParams("group", strings.Trim(m[1], "/"))
+ rpm.UploadPackageFile(ctx)
+ return
+ }
+
+ m = filePattern.FindStringSubmatch(path)
+ if len(m) == 6 && (isGetHead || isDelete) {
+ ctx.SetParams("group", strings.Trim(m[1], "/"))
+ ctx.SetParams("name", m[2])
+ ctx.SetParams("version", m[3])
+ ctx.SetParams("architecture", m[4])
+ if isGetHead {
+ rpm.DownloadPackageFile(ctx)
+ } else {
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+ rpm.DeletePackageFile(ctx)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNotFound)
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/rubygems", func() {
+ r.Get("/specs.4.8.gz", rubygems.EnumeratePackages)
+ r.Get("/latest_specs.4.8.gz", rubygems.EnumeratePackagesLatest)
+ r.Get("/prerelease_specs.4.8.gz", rubygems.EnumeratePackagesPreRelease)
+ r.Get("/info/{package}", rubygems.ServePackageInfo)
+ r.Get("/versions", rubygems.ServeVersionsFile)
+ r.Get("/quick/Marshal.4.8/{filename}", rubygems.ServePackageSpecification)
+ r.Get("/gems/{filename}", rubygems.DownloadPackageFile)
+ r.Group("/api/v1/gems", func() {
+ r.Post("/", enforcePackagesQuota(), rubygems.UploadPackageFile)
+ r.Delete("/yank", rubygems.DeletePackage)
+ }, reqPackageAccess(perm.AccessModeWrite))
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/swift", func() {
+ r.Group("/{scope}/{name}", func() {
+ r.Group("", func() {
+ r.Get("", swift.EnumeratePackageVersions)
+ r.Get(".json", swift.EnumeratePackageVersions)
+ }, swift.CheckAcceptMediaType(swift.AcceptJSON))
+ r.Group("/{version}", func() {
+ r.Get("/Package.swift", swift.CheckAcceptMediaType(swift.AcceptSwift), swift.DownloadManifest)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), swift.CheckAcceptMediaType(swift.AcceptJSON), enforcePackagesQuota(), swift.UploadPackageFile)
+ r.Get("", func(ctx *context.Context) {
+ // Can't use normal routes here: https://github.com/go-chi/chi/issues/781
+
+ version := ctx.Params("version")
+ if strings.HasSuffix(version, ".zip") {
+ swift.CheckAcceptMediaType(swift.AcceptZip)(ctx)
+ if ctx.Written() {
+ return
+ }
+ ctx.SetParams("version", version[:len(version)-4])
+ swift.DownloadPackageFile(ctx)
+ } else {
+ swift.CheckAcceptMediaType(swift.AcceptJSON)(ctx)
+ if ctx.Written() {
+ return
+ }
+ if strings.HasSuffix(version, ".json") {
+ ctx.SetParams("version", version[:len(version)-5])
+ }
+ swift.PackageVersionMetadata(ctx)
+ }
+ })
+ })
+ })
+ r.Get("/identifiers", swift.CheckAcceptMediaType(swift.AcceptJSON), swift.LookupPackageIdentifiers)
+ }, reqPackageAccess(perm.AccessModeRead))
+ r.Group("/vagrant", func() {
+ r.Group("/authenticate", func() {
+ r.Get("", vagrant.CheckAuthenticate)
+ })
+ r.Group("/{name}", func() {
+ r.Head("", vagrant.CheckBoxAvailable)
+ r.Get("", vagrant.EnumeratePackageVersions)
+ r.Group("/{version}/{provider}", func() {
+ r.Get("", vagrant.DownloadPackageFile)
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), enforcePackagesQuota(), vagrant.UploadPackageFile)
+ })
+ })
+ }, reqPackageAccess(perm.AccessModeRead))
+ }, context.UserAssignmentWeb(), context.PackageAssignment())
+
+ return r
+}
+
+// ContainerRoutes provides endpoints that implement the OCI API to serve containers
+// These have to be mounted on `/v2/...` to comply with the OCI spec:
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md
+func ContainerRoutes() *web.Route {
+ r := web.NewRoute()
+
+ r.Use(context.PackageContexter())
+
+ verifyAuth(r, []auth.Method{
+ &auth.Basic{},
+ &container.Auth{},
+ })
+
+ r.Get("", container.ReqContainerAccess, container.DetermineSupport)
+ r.Group("/token", func() {
+ r.Get("", container.Authenticate)
+ r.Post("", container.AuthenticateNotImplemented)
+ })
+ r.Get("/_catalog", container.ReqContainerAccess, container.GetRepositoryList)
+ r.Group("/{username}", func() {
+ r.Group("/{image}", func() {
+ r.Group("/blobs/uploads", func() {
+ r.Post("", container.InitiateUploadBlob)
+ r.Group("/{uuid}", func() {
+ r.Get("", container.GetUploadBlob)
+ r.Patch("", container.UploadBlob)
+ r.Put("", container.EndUploadBlob)
+ r.Delete("", container.CancelUploadBlob)
+ })
+ }, reqPackageAccess(perm.AccessModeWrite))
+ r.Group("/blobs/{digest}", func() {
+ r.Head("", container.HeadBlob)
+ r.Get("", container.GetBlob)
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), container.DeleteBlob)
+ })
+ r.Group("/manifests/{reference}", func() {
+ r.Put("", reqPackageAccess(perm.AccessModeWrite), container.UploadManifest)
+ r.Head("", container.HeadManifest)
+ r.Get("", container.GetManifest)
+ r.Delete("", reqPackageAccess(perm.AccessModeWrite), container.DeleteManifest)
+ })
+ r.Get("/tags/list", container.GetTagList)
+ }, container.VerifyImageName)
+
+ var (
+ blobsUploadsPattern = regexp.MustCompile(`\A(.+)/blobs/uploads/([a-zA-Z0-9-_.=]+)\z`)
+ blobsPattern = regexp.MustCompile(`\A(.+)/blobs/([^/]+)\z`)
+ manifestsPattern = regexp.MustCompile(`\A(.+)/manifests/([^/]+)\z`)
+ )
+
+ // Manual mapping of routes because {image} can contain slashes which chi does not support
+ r.Methods("HEAD,GET,POST,PUT,PATCH,DELETE", "/*", func(ctx *context.Context) {
+ path := ctx.Params("*")
+ isHead := ctx.Req.Method == "HEAD"
+ isGet := ctx.Req.Method == "GET"
+ isPost := ctx.Req.Method == "POST"
+ isPut := ctx.Req.Method == "PUT"
+ isPatch := ctx.Req.Method == "PATCH"
+ isDelete := ctx.Req.Method == "DELETE"
+
+ if isPost && strings.HasSuffix(path, "/blobs/uploads") {
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.SetParams("image", path[:len(path)-14])
+ container.VerifyImageName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ container.InitiateUploadBlob(ctx)
+ return
+ }
+ if isGet && strings.HasSuffix(path, "/tags/list") {
+ ctx.SetParams("image", path[:len(path)-10])
+ container.VerifyImageName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ container.GetTagList(ctx)
+ return
+ }
+
+ m := blobsUploadsPattern.FindStringSubmatch(path)
+ if len(m) == 3 && (isGet || isPut || isPatch || isDelete) {
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.SetParams("image", m[1])
+ container.VerifyImageName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.SetParams("uuid", m[2])
+
+ if isGet {
+ container.GetUploadBlob(ctx)
+ } else if isPatch {
+ container.UploadBlob(ctx)
+ } else if isPut {
+ container.EndUploadBlob(ctx)
+ } else {
+ container.CancelUploadBlob(ctx)
+ }
+ return
+ }
+ m = blobsPattern.FindStringSubmatch(path)
+ if len(m) == 3 && (isHead || isGet || isDelete) {
+ ctx.SetParams("image", m[1])
+ container.VerifyImageName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.SetParams("digest", m[2])
+
+ if isHead {
+ container.HeadBlob(ctx)
+ } else if isGet {
+ container.GetBlob(ctx)
+ } else {
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+ container.DeleteBlob(ctx)
+ }
+ return
+ }
+ m = manifestsPattern.FindStringSubmatch(path)
+ if len(m) == 3 && (isHead || isGet || isPut || isDelete) {
+ ctx.SetParams("image", m[1])
+ container.VerifyImageName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.SetParams("reference", m[2])
+
+ if isHead {
+ container.HeadManifest(ctx)
+ } else if isGet {
+ container.GetManifest(ctx)
+ } else {
+ reqPackageAccess(perm.AccessModeWrite)(ctx)
+ if ctx.Written() {
+ return
+ }
+ if isPut {
+ container.UploadManifest(ctx)
+ } else {
+ container.DeleteManifest(ctx)
+ }
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNotFound)
+ })
+ }, container.ReqContainerAccess, context.UserAssignmentWeb(), context.PackageAssignment(), reqPackageAccess(perm.AccessModeRead))
+
+ return r
+}
diff --git a/routers/api/packages/arch/arch.go b/routers/api/packages/arch/arch.go
new file mode 100644
index 0000000..ecd2281
--- /dev/null
+++ b/routers/api/packages/arch/arch.go
@@ -0,0 +1,274 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package arch
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ arch_module "code.gitea.io/gitea/modules/packages/arch"
+ "code.gitea.io/gitea/modules/sync"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+ arch_service "code.gitea.io/gitea/services/packages/arch"
+)
+
+var (
+ archPkgOrSig = regexp.MustCompile(`^.*\.pkg\.tar\.\w+(\.sig)*$`)
+ archDBOrSig = regexp.MustCompile(`^.*.db(\.tar\.gz)*(\.sig)*$`)
+
+ locker = sync.NewExclusivePool()
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+func refreshLocker(ctx *context.Context, group string) func() {
+ key := fmt.Sprintf("pkg_%d_arch_pkg_%s", ctx.Package.Owner.ID, group)
+ locker.CheckIn(key)
+ return func() {
+ locker.CheckOut(key)
+ }
+}
+
+func GetRepositoryKey(ctx *context.Context) {
+ _, pub, err := arch_service.GetOrCreateKeyPair(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.ServeContent(strings.NewReader(pub), &context.ServeHeaderOptions{
+ ContentType: "application/pgp-keys",
+ Filename: "repository.key",
+ })
+}
+
+func PushPackage(ctx *context.Context) {
+ group := ctx.Params("group")
+ releaser := refreshLocker(ctx, group)
+ defer releaser()
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ p, err := arch_module.ParsePackage(buf)
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ _, err = buf.Seek(0, io.SeekStart)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ sign, err := arch_service.NewFileSign(ctx, ctx.Package.Owner.ID, buf)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer sign.Close()
+ _, err = buf.Seek(0, io.SeekStart)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ // update gpg sign
+ pgp, err := io.ReadAll(sign)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ p.FileMetadata.PgpSigned = base64.StdEncoding.EncodeToString(pgp)
+ _, err = sign.Seek(0, io.SeekStart)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ properties := map[string]string{
+ arch_module.PropertyDescription: p.Desc(),
+ arch_module.PropertyArch: p.FileMetadata.Arch,
+ arch_module.PropertyDistribution: group,
+ }
+
+ version, _, err := packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeArch,
+ Name: p.Name,
+ Version: p.Version,
+ },
+ Creator: ctx.Doer,
+ Metadata: p.VersionMetadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s-%s-%s.pkg.tar.%s", p.Name, p.Version, p.FileMetadata.Arch, p.CompressType),
+ CompositeKey: group,
+ },
+ OverwriteExisting: false,
+ IsLead: true,
+ Creator: ctx.ContextUser,
+ Data: buf,
+ Properties: properties,
+ },
+ )
+ if err != nil {
+ switch {
+ case errors.Is(err, packages_model.ErrDuplicatePackageVersion), errors.Is(err, packages_model.ErrDuplicatePackageFile):
+ apiError(ctx, http.StatusConflict, err)
+ case errors.Is(err, packages_service.ErrQuotaTotalCount), errors.Is(err, packages_service.ErrQuotaTypeSize), errors.Is(err, packages_service.ErrQuotaTotalSize):
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ // add sign file
+ _, err = packages_service.AddFileToPackageVersionInternal(ctx, version, &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ CompositeKey: group,
+ Filename: fmt.Sprintf("%s-%s-%s.pkg.tar.%s.sig", p.Name, p.Version, p.FileMetadata.Arch, p.CompressType),
+ },
+ OverwriteExisting: true,
+ IsLead: false,
+ Creator: ctx.Doer,
+ Data: sign,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if err = arch_service.BuildPacmanDB(ctx, ctx.Package.Owner.ID, group, p.FileMetadata.Arch); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if p.FileMetadata.Arch == "any" {
+ if err = arch_service.BuildCustomRepositoryFiles(ctx, ctx.Package.Owner.ID, group); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+ ctx.Status(http.StatusCreated)
+}
+
+func GetPackageOrDB(ctx *context.Context) {
+ var (
+ file = ctx.Params("file")
+ group = ctx.Params("group")
+ arch = ctx.Params("arch")
+ )
+ if archPkgOrSig.MatchString(file) {
+ pkg, u, pf, err := arch_service.GetPackageFile(ctx, group, file, ctx.Package.Owner.ID)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ helper.ServePackageFile(ctx, pkg, u, pf)
+ return
+ }
+
+ if archDBOrSig.MatchString(file) {
+ pkg, u, pf, err := arch_service.GetPackageDBFile(ctx, ctx.Package.Owner.ID, group, arch, strings.HasSuffix(file, ".sig"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ helper.ServePackageFile(ctx, pkg, u, pf)
+ return
+ }
+
+ ctx.Status(http.StatusNotFound)
+}
+
+func RemovePackage(ctx *context.Context) {
+ var (
+ group = ctx.Params("group")
+ pkg = ctx.Params("package")
+ ver = ctx.Params("version")
+ pkgArch = ctx.Params("arch")
+ )
+ releaser := refreshLocker(ctx, group)
+ defer releaser()
+ pv, err := packages_model.GetVersionByNameAndVersion(
+ ctx, ctx.Package.Owner.ID, packages_model.TypeArch, pkg, ver,
+ )
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ files, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ deleted := false
+ for _, file := range files {
+ extName := fmt.Sprintf("-%s.pkg.tar%s", pkgArch, filepath.Ext(file.LowerName))
+ if strings.HasSuffix(file.LowerName, ".sig") {
+ extName = fmt.Sprintf("-%s.pkg.tar%s.sig", pkgArch,
+ filepath.Ext(strings.TrimSuffix(file.LowerName, filepath.Ext(file.LowerName))))
+ }
+ if file.CompositeKey == group &&
+ strings.HasSuffix(file.LowerName, extName) {
+ deleted = true
+ err := packages_service.RemovePackageFileAndVersionIfUnreferenced(ctx, ctx.ContextUser, file)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+ }
+ if deleted {
+ err = arch_service.BuildCustomRepositoryFiles(ctx, ctx.Package.Owner.ID, group)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+ } else {
+ ctx.Error(http.StatusNotFound)
+ }
+}
diff --git a/routers/api/packages/cargo/cargo.go b/routers/api/packages/cargo/cargo.go
new file mode 100644
index 0000000..140e532
--- /dev/null
+++ b/routers/api/packages/cargo/cargo.go
@@ -0,0 +1,311 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package cargo
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ cargo_module "code.gitea.io/gitea/modules/packages/cargo"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ packages_service "code.gitea.io/gitea/services/packages"
+ cargo_service "code.gitea.io/gitea/services/packages/cargo"
+)
+
+// https://doc.rust-lang.org/cargo/reference/registries.html#web-api
+type StatusResponse struct {
+ OK bool `json:"ok"`
+ Errors []StatusMessage `json:"errors,omitempty"`
+}
+
+type StatusMessage struct {
+ Message string `json:"detail"`
+}
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.JSON(status, StatusResponse{
+ OK: false,
+ Errors: []StatusMessage{
+ {
+ Message: message,
+ },
+ },
+ })
+ })
+}
+
+// https://rust-lang.github.io/rfcs/2789-sparse-index.html
+func RepositoryConfig(ctx *context.Context) {
+ ctx.JSON(http.StatusOK, cargo_service.BuildConfig(ctx.Package.Owner, setting.Service.RequireSignInView || ctx.Package.Owner.Visibility != structs.VisibleTypePublic))
+}
+
+func EnumeratePackageVersions(ctx *context.Context) {
+ p, err := packages_model.GetPackageByName(ctx, ctx.Package.Owner.ID, packages_model.TypeCargo, ctx.Params("package"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ b, err := cargo_service.BuildPackageIndex(ctx, p)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if b == nil {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ ctx.PlainTextBytes(http.StatusOK, b.Bytes())
+}
+
+type SearchResult struct {
+ Crates []*SearchResultCrate `json:"crates"`
+ Meta SearchResultMeta `json:"meta"`
+}
+
+type SearchResultCrate struct {
+ Name string `json:"name"`
+ LatestVersion string `json:"max_version"`
+ Description string `json:"description"`
+}
+
+type SearchResultMeta struct {
+ Total int64 `json:"total"`
+}
+
+// https://doc.rust-lang.org/cargo/reference/registries.html#search
+func SearchPackages(ctx *context.Context) {
+ page := ctx.FormInt("page")
+ if page < 1 {
+ page = 1
+ }
+ perPage := ctx.FormInt("per_page")
+ paginator := db.ListOptions{
+ Page: page,
+ PageSize: convert.ToCorrectPageSize(perPage),
+ }
+
+ pvs, total, err := packages_model.SearchLatestVersions(
+ ctx,
+ &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeCargo,
+ Name: packages_model.SearchValue{Value: ctx.FormTrim("q")},
+ IsInternal: optional.Some(false),
+ Paginator: &paginator,
+ },
+ )
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ crates := make([]*SearchResultCrate, 0, len(pvs))
+ for _, pd := range pds {
+ crates = append(crates, &SearchResultCrate{
+ Name: pd.Package.Name,
+ LatestVersion: pd.Version.Version,
+ Description: pd.Metadata.(*cargo_module.Metadata).Description,
+ })
+ }
+
+ ctx.JSON(http.StatusOK, SearchResult{
+ Crates: crates,
+ Meta: SearchResultMeta{
+ Total: total,
+ },
+ })
+}
+
+type Owners struct {
+ Users []OwnerUser `json:"users"`
+}
+
+type OwnerUser struct {
+ ID int64 `json:"id"`
+ Login string `json:"login"`
+ Name string `json:"name"`
+}
+
+// https://doc.rust-lang.org/cargo/reference/registries.html#owners-list
+func ListOwners(ctx *context.Context) {
+ ctx.JSON(http.StatusOK, Owners{
+ Users: []OwnerUser{
+ {
+ ID: ctx.Package.Owner.ID,
+ Login: ctx.Package.Owner.Name,
+ Name: ctx.Package.Owner.DisplayName(),
+ },
+ },
+ })
+}
+
+// DownloadPackageFile serves the content of a package
+func DownloadPackageFile(ctx *context.Context) {
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeCargo,
+ Name: ctx.Params("package"),
+ Version: ctx.Params("version"),
+ },
+ &packages_service.PackageFileInfo{
+ Filename: strings.ToLower(fmt.Sprintf("%s-%s.crate", ctx.Params("package"), ctx.Params("version"))),
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// https://doc.rust-lang.org/cargo/reference/registries.html#publish
+func UploadPackage(ctx *context.Context) {
+ defer ctx.Req.Body.Close()
+
+ cp, err := cargo_module.ParsePackage(ctx.Req.Body)
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(cp.Content)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ if buf.Size() != cp.ContentSize {
+ apiError(ctx, http.StatusBadRequest, "invalid content size")
+ return
+ }
+
+ pv, _, err := packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeCargo,
+ Name: cp.Name,
+ Version: cp.Version,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: cp.Metadata,
+ VersionProperties: map[string]string{
+ cargo_module.PropertyYanked: strconv.FormatBool(false),
+ },
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(fmt.Sprintf("%s-%s.crate", cp.Name, cp.Version)),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if err := cargo_service.UpdatePackageIndexIfExists(ctx, ctx.Doer, ctx.Package.Owner, pv.PackageID); err != nil {
+ if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ log.Error("Rollback creation of package version: %v", err)
+ }
+
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, StatusResponse{OK: true})
+}
+
+// https://doc.rust-lang.org/cargo/reference/registries.html#yank
+func YankPackage(ctx *context.Context) {
+ yankPackage(ctx, true)
+}
+
+// https://doc.rust-lang.org/cargo/reference/registries.html#unyank
+func UnyankPackage(ctx *context.Context) {
+ yankPackage(ctx, false)
+}
+
+func yankPackage(ctx *context.Context, yank bool) {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeCargo, ctx.Params("package"), ctx.Params("version"))
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pps, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeVersion, pv.ID, cargo_module.PropertyYanked)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pps) == 0 {
+ apiError(ctx, http.StatusInternalServerError, "Property not found")
+ return
+ }
+
+ pp := pps[0]
+ pp.Value = strconv.FormatBool(yank)
+
+ if err := packages_model.UpdateProperty(ctx, pp); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if err := cargo_service.UpdatePackageIndexIfExists(ctx, ctx.Doer, ctx.Package.Owner, pv.PackageID); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, StatusResponse{OK: true})
+}
diff --git a/routers/api/packages/chef/auth.go b/routers/api/packages/chef/auth.go
new file mode 100644
index 0000000..a790e9a
--- /dev/null
+++ b/routers/api/packages/chef/auth.go
@@ -0,0 +1,274 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package chef
+
+import (
+ "context"
+ "crypto"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "hash"
+ "math/big"
+ "net/http"
+ "path"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ user_model "code.gitea.io/gitea/models/user"
+ chef_module "code.gitea.io/gitea/modules/packages/chef"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/services/auth"
+)
+
+const (
+ maxTimeDifference = 10 * time.Minute
+)
+
+var (
+ algorithmPattern = regexp.MustCompile(`algorithm=(\w+)`)
+ versionPattern = regexp.MustCompile(`version=(\d+\.\d+)`)
+ authorizationPattern = regexp.MustCompile(`\AX-Ops-Authorization-(\d+)`)
+
+ _ auth.Method = &Auth{}
+)
+
+// Documentation:
+// https://docs.chef.io/server/api_chef_server/#required-headers
+// https://github.com/chef-boneyard/chef-rfc/blob/master/rfc065-sign-v1.3.md
+// https://github.com/chef/mixlib-authentication/blob/bc8adbef833d4be23dc78cb23e6fe44b51ebc34f/lib/mixlib/authentication/signedheaderauth.rb
+
+type Auth struct{}
+
+func (a *Auth) Name() string {
+ return "chef"
+}
+
+// Verify extracts the user from the signed request
+// If the request is signed with the user private key the user is verified.
+func (a *Auth) Verify(req *http.Request, w http.ResponseWriter, store auth.DataStore, sess auth.SessionStore) (*user_model.User, error) {
+ u, err := getUserFromRequest(req)
+ if err != nil {
+ return nil, err
+ }
+ if u == nil {
+ return nil, nil
+ }
+
+ pub, err := getUserPublicKey(req.Context(), u)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := verifyTimestamp(req); err != nil {
+ return nil, err
+ }
+
+ version, err := getSignVersion(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := verifySignedHeaders(req, version, pub.(*rsa.PublicKey)); err != nil {
+ return nil, err
+ }
+
+ return u, nil
+}
+
+func getUserFromRequest(req *http.Request) (*user_model.User, error) {
+ username := req.Header.Get("X-Ops-Userid")
+ if username == "" {
+ return nil, nil
+ }
+
+ return user_model.GetUserByName(req.Context(), username)
+}
+
+func getUserPublicKey(ctx context.Context, u *user_model.User) (crypto.PublicKey, error) {
+ pubKey, err := user_model.GetSetting(ctx, u.ID, chef_module.SettingPublicPem)
+ if err != nil {
+ return nil, err
+ }
+
+ pubPem, _ := pem.Decode([]byte(pubKey))
+
+ return x509.ParsePKIXPublicKey(pubPem.Bytes)
+}
+
+func verifyTimestamp(req *http.Request) error {
+ hdr := req.Header.Get("X-Ops-Timestamp")
+ if hdr == "" {
+ return util.NewInvalidArgumentErrorf("X-Ops-Timestamp header missing")
+ }
+
+ ts, err := time.Parse(time.RFC3339, hdr)
+ if err != nil {
+ return err
+ }
+
+ diff := time.Now().UTC().Sub(ts)
+ if diff < 0 {
+ diff = -diff
+ }
+
+ if diff > maxTimeDifference {
+ return fmt.Errorf("time difference")
+ }
+
+ return nil
+}
+
+func getSignVersion(req *http.Request) (string, error) {
+ hdr := req.Header.Get("X-Ops-Sign")
+ if hdr == "" {
+ return "", util.NewInvalidArgumentErrorf("X-Ops-Sign header missing")
+ }
+
+ m := versionPattern.FindStringSubmatch(hdr)
+ if len(m) != 2 {
+ return "", util.NewInvalidArgumentErrorf("invalid X-Ops-Sign header")
+ }
+
+ switch m[1] {
+ case "1.0", "1.1", "1.2", "1.3":
+ default:
+ return "", util.NewInvalidArgumentErrorf("unsupported version")
+ }
+
+ version := m[1]
+
+ m = algorithmPattern.FindStringSubmatch(hdr)
+ if len(m) == 2 && m[1] != "sha1" && !(m[1] == "sha256" && version == "1.3") {
+ return "", util.NewInvalidArgumentErrorf("unsupported algorithm")
+ }
+
+ return version, nil
+}
+
+func verifySignedHeaders(req *http.Request, version string, pub *rsa.PublicKey) error {
+ authorizationData, err := getAuthorizationData(req)
+ if err != nil {
+ return err
+ }
+
+ checkData := buildCheckData(req, version)
+
+ switch version {
+ case "1.3":
+ return verifyDataNew(authorizationData, checkData, pub, crypto.SHA256)
+ case "1.2":
+ return verifyDataNew(authorizationData, checkData, pub, crypto.SHA1)
+ default:
+ return verifyDataOld(authorizationData, checkData, pub)
+ }
+}
+
+func getAuthorizationData(req *http.Request) ([]byte, error) {
+ valueList := make(map[int]string)
+ for k, vs := range req.Header {
+ if m := authorizationPattern.FindStringSubmatch(k); m != nil {
+ index, _ := strconv.Atoi(m[1])
+ var v string
+ if len(vs) == 0 {
+ v = ""
+ } else {
+ v = vs[0]
+ }
+ valueList[index] = v
+ }
+ }
+
+ tmp := make([]string, len(valueList))
+ for k, v := range valueList {
+ if k > len(tmp) {
+ return nil, fmt.Errorf("invalid X-Ops-Authorization headers")
+ }
+ tmp[k-1] = v
+ }
+
+ return base64.StdEncoding.DecodeString(strings.Join(tmp, ""))
+}
+
+func buildCheckData(req *http.Request, version string) []byte {
+ username := req.Header.Get("X-Ops-Userid")
+ if version != "1.0" && version != "1.3" {
+ sum := sha1.Sum([]byte(username))
+ username = base64.StdEncoding.EncodeToString(sum[:])
+ }
+
+ var data string
+ if version == "1.3" {
+ data = fmt.Sprintf(
+ "Method:%s\nPath:%s\nX-Ops-Content-Hash:%s\nX-Ops-Sign:version=%s\nX-Ops-Timestamp:%s\nX-Ops-UserId:%s\nX-Ops-Server-API-Version:%s",
+ req.Method,
+ path.Clean(req.URL.Path),
+ req.Header.Get("X-Ops-Content-Hash"),
+ version,
+ req.Header.Get("X-Ops-Timestamp"),
+ username,
+ req.Header.Get("X-Ops-Server-Api-Version"),
+ )
+ } else {
+ sum := sha1.Sum([]byte(path.Clean(req.URL.Path)))
+ data = fmt.Sprintf(
+ "Method:%s\nHashed Path:%s\nX-Ops-Content-Hash:%s\nX-Ops-Timestamp:%s\nX-Ops-UserId:%s",
+ req.Method,
+ base64.StdEncoding.EncodeToString(sum[:]),
+ req.Header.Get("X-Ops-Content-Hash"),
+ req.Header.Get("X-Ops-Timestamp"),
+ username,
+ )
+ }
+
+ return []byte(data)
+}
+
+func verifyDataNew(signature, data []byte, pub *rsa.PublicKey, algo crypto.Hash) error {
+ var h hash.Hash
+ if algo == crypto.SHA256 {
+ h = sha256.New()
+ } else {
+ h = sha1.New()
+ }
+ if _, err := h.Write(data); err != nil {
+ return err
+ }
+
+ return rsa.VerifyPKCS1v15(pub, algo, h.Sum(nil), signature)
+}
+
+func verifyDataOld(signature, data []byte, pub *rsa.PublicKey) error {
+ c := new(big.Int)
+ m := new(big.Int)
+ m.SetBytes(signature)
+ e := big.NewInt(int64(pub.E))
+ c.Exp(m, e, pub.N)
+
+ out := c.Bytes()
+
+ skip := 0
+ for i := 2; i < len(out); i++ {
+ if i+1 >= len(out) {
+ break
+ }
+ if out[i] == 0xFF && out[i+1] == 0 {
+ skip = i + 2
+ break
+ }
+ }
+
+ if !slices.Equal(out[skip:], data) {
+ return fmt.Errorf("could not verify signature")
+ }
+
+ return nil
+}
diff --git a/routers/api/packages/chef/chef.go b/routers/api/packages/chef/chef.go
new file mode 100644
index 0000000..b49f4e9
--- /dev/null
+++ b/routers/api/packages/chef/chef.go
@@ -0,0 +1,403 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package chef
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ chef_module "code.gitea.io/gitea/modules/packages/chef"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ type Error struct {
+ ErrorMessages []string `json:"error_messages"`
+ }
+
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.JSON(status, Error{
+ ErrorMessages: []string{message},
+ })
+ })
+}
+
+func PackagesUniverse(ctx *context.Context) {
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeChef,
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ type VersionInfo struct {
+ LocationType string `json:"location_type"`
+ LocationPath string `json:"location_path"`
+ DownloadURL string `json:"download_url"`
+ Dependencies map[string]string `json:"dependencies"`
+ }
+
+ baseURL := setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/chef/api/v1"
+
+ universe := make(map[string]map[string]*VersionInfo)
+ for _, pd := range pds {
+ if _, ok := universe[pd.Package.Name]; !ok {
+ universe[pd.Package.Name] = make(map[string]*VersionInfo)
+ }
+ universe[pd.Package.Name][pd.Version.Version] = &VersionInfo{
+ LocationType: "opscode",
+ LocationPath: baseURL,
+ DownloadURL: fmt.Sprintf("%s/cookbooks/%s/versions/%s/download", baseURL, url.PathEscape(pd.Package.Name), pd.Version.Version),
+ Dependencies: pd.Metadata.(*chef_module.Metadata).Dependencies,
+ }
+ }
+
+ ctx.JSON(http.StatusOK, universe)
+}
+
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_list.rb
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_search.rb
+func EnumeratePackages(ctx *context.Context) {
+ opts := &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeChef,
+ Name: packages_model.SearchValue{Value: ctx.FormTrim("q")},
+ IsInternal: optional.Some(false),
+ Paginator: db.NewAbsoluteListOptions(
+ ctx.FormInt("start"),
+ ctx.FormInt("items"),
+ ),
+ }
+
+ switch strings.ToLower(ctx.FormTrim("order")) {
+ case "recently_updated", "recently_added":
+ opts.Sort = packages_model.SortCreatedDesc
+ default:
+ opts.Sort = packages_model.SortNameAsc
+ }
+
+ pvs, total, err := packages_model.SearchLatestVersions(ctx, opts)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ type Item struct {
+ CookbookName string `json:"cookbook_name"`
+ CookbookMaintainer string `json:"cookbook_maintainer"`
+ CookbookDescription string `json:"cookbook_description"`
+ Cookbook string `json:"cookbook"`
+ }
+
+ type Result struct {
+ Start int `json:"start"`
+ Total int `json:"total"`
+ Items []*Item `json:"items"`
+ }
+
+ baseURL := setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/chef/api/v1/cookbooks/"
+
+ items := make([]*Item, 0, len(pds))
+ for _, pd := range pds {
+ metadata := pd.Metadata.(*chef_module.Metadata)
+
+ items = append(items, &Item{
+ CookbookName: pd.Package.Name,
+ CookbookMaintainer: metadata.Author,
+ CookbookDescription: metadata.Description,
+ Cookbook: baseURL + url.PathEscape(pd.Package.Name),
+ })
+ }
+
+ skip, _ := opts.Paginator.GetSkipTake()
+
+ ctx.JSON(http.StatusOK, &Result{
+ Start: skip,
+ Total: int(total),
+ Items: items,
+ })
+}
+
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_show.rb
+func PackageMetadata(ctx *context.Context) {
+ packageName := ctx.Params("name")
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeChef, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ sort.Slice(pds, func(i, j int) bool {
+ return pds[i].SemVer.LessThan(pds[j].SemVer)
+ })
+
+ type Result struct {
+ Name string `json:"name"`
+ Maintainer string `json:"maintainer"`
+ Description string `json:"description"`
+ Category string `json:"category"`
+ LatestVersion string `json:"latest_version"`
+ SourceURL string `json:"source_url"`
+ CreatedAt time.Time `json:"created_at"`
+ UpdatedAt time.Time `json:"updated_at"`
+ Deprecated bool `json:"deprecated"`
+ Versions []string `json:"versions"`
+ }
+
+ baseURL := fmt.Sprintf("%sapi/packages/%s/chef/api/v1/cookbooks/%s/versions/", setting.AppURL, ctx.Package.Owner.Name, url.PathEscape(packageName))
+
+ versions := make([]string, 0, len(pds))
+ for _, pd := range pds {
+ versions = append(versions, baseURL+pd.Version.Version)
+ }
+
+ latest := pds[len(pds)-1]
+
+ metadata := latest.Metadata.(*chef_module.Metadata)
+
+ ctx.JSON(http.StatusOK, &Result{
+ Name: latest.Package.Name,
+ Maintainer: metadata.Author,
+ Description: metadata.Description,
+ LatestVersion: baseURL + latest.Version.Version,
+ SourceURL: metadata.RepositoryURL,
+ CreatedAt: latest.Version.CreatedUnix.AsLocalTime(),
+ UpdatedAt: latest.Version.CreatedUnix.AsLocalTime(),
+ Deprecated: false,
+ Versions: versions,
+ })
+}
+
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_show.rb
+func PackageVersionMetadata(ctx *context.Context) {
+ packageName := ctx.Params("name")
+ packageVersion := strings.ReplaceAll(ctx.Params("version"), "_", ".") // Chef calls this endpoint with "_" instead of "."?!
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeChef, packageName, packageVersion)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ type Result struct {
+ Version string `json:"version"`
+ TarballFileSize int64 `json:"tarball_file_size"`
+ PublishedAt time.Time `json:"published_at"`
+ Cookbook string `json:"cookbook"`
+ File string `json:"file"`
+ License string `json:"license"`
+ Dependencies map[string]string `json:"dependencies"`
+ }
+
+ baseURL := fmt.Sprintf("%sapi/packages/%s/chef/api/v1/cookbooks/%s", setting.AppURL, ctx.Package.Owner.Name, url.PathEscape(pd.Package.Name))
+
+ metadata := pd.Metadata.(*chef_module.Metadata)
+
+ ctx.JSON(http.StatusOK, &Result{
+ Version: pd.Version.Version,
+ TarballFileSize: pd.Files[0].Blob.Size,
+ PublishedAt: pd.Version.CreatedUnix.AsLocalTime(),
+ Cookbook: baseURL,
+ File: fmt.Sprintf("%s/versions/%s/download", baseURL, pd.Version.Version),
+ License: metadata.License,
+ Dependencies: metadata.Dependencies,
+ })
+}
+
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_share.rb
+func UploadPackage(ctx *context.Context) {
+ file, _, err := ctx.Req.FormFile("tarball")
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ defer file.Close()
+
+ buf, err := packages_module.CreateHashedBufferFromReader(file)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pck, err := chef_module.ParsePackage(buf)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeChef,
+ Name: pck.Name,
+ Version: pck.Version,
+ },
+ Creator: ctx.Doer,
+ SemverCompatible: true,
+ Metadata: pck.Metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(pck.Version + ".tar.gz"),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, make(map[any]any))
+}
+
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_download.rb
+func DownloadPackage(ctx *context.Context) {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeChef, ctx.Params("name"), ctx.Params("version"))
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pf := pd.Files[0].File
+
+ s, u, _, err := packages_service.GetPackageFileStream(ctx, pf)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_unshare.rb
+func DeletePackageVersion(ctx *context.Context) {
+ packageName := ctx.Params("name")
+ packageVersion := ctx.Params("version")
+
+ err := packages_service.RemovePackageVersionByNameAndVersion(
+ ctx,
+ ctx.Doer,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeChef,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+// https://github.com/chef/chef/blob/main/knife/lib/chef/knife/supermarket_unshare.rb
+func DeletePackage(ctx *context.Context) {
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeChef, ctx.Params("name"))
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ for _, pv := range pvs {
+ if err := packages_service.RemovePackageVersion(ctx, ctx.Doer, pv); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ ctx.Status(http.StatusOK)
+}
diff --git a/routers/api/packages/composer/api.go b/routers/api/packages/composer/api.go
new file mode 100644
index 0000000..a3bcf80
--- /dev/null
+++ b/routers/api/packages/composer/api.go
@@ -0,0 +1,117 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package composer
+
+import (
+ "fmt"
+ "net/url"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ composer_module "code.gitea.io/gitea/modules/packages/composer"
+)
+
+// ServiceIndexResponse contains registry endpoints
+type ServiceIndexResponse struct {
+ SearchTemplate string `json:"search"`
+ MetadataTemplate string `json:"metadata-url"`
+ PackageList string `json:"list"`
+}
+
+func createServiceIndexResponse(registryURL string) *ServiceIndexResponse {
+ return &ServiceIndexResponse{
+ SearchTemplate: registryURL + "/search.json?q=%query%&type=%type%",
+ MetadataTemplate: registryURL + "/p2/%package%.json",
+ PackageList: registryURL + "/list.json",
+ }
+}
+
+// SearchResultResponse contains search results
+type SearchResultResponse struct {
+ Total int64 `json:"total"`
+ Results []*SearchResult `json:"results"`
+ NextLink string `json:"next,omitempty"`
+}
+
+// SearchResult contains a search result
+type SearchResult struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Downloads int64 `json:"downloads"`
+}
+
+func createSearchResultResponse(total int64, pds []*packages_model.PackageDescriptor, nextLink string) *SearchResultResponse {
+ results := make([]*SearchResult, 0, len(pds))
+
+ for _, pd := range pds {
+ results = append(results, &SearchResult{
+ Name: pd.Package.Name,
+ Description: pd.Metadata.(*composer_module.Metadata).Description,
+ Downloads: pd.Version.DownloadCount,
+ })
+ }
+
+ return &SearchResultResponse{
+ Total: total,
+ Results: results,
+ NextLink: nextLink,
+ }
+}
+
+// PackageMetadataResponse contains packages metadata
+type PackageMetadataResponse struct {
+ Minified string `json:"minified"`
+ Packages map[string][]*PackageVersionMetadata `json:"packages"`
+}
+
+// PackageVersionMetadata contains package metadata
+type PackageVersionMetadata struct {
+ *composer_module.Metadata
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Type string `json:"type"`
+ Created time.Time `json:"time"`
+ Dist Dist `json:"dist"`
+}
+
+// Dist contains package download information
+type Dist struct {
+ Type string `json:"type"`
+ URL string `json:"url"`
+ Checksum string `json:"shasum"`
+}
+
+func createPackageMetadataResponse(registryURL string, pds []*packages_model.PackageDescriptor) *PackageMetadataResponse {
+ versions := make([]*PackageVersionMetadata, 0, len(pds))
+
+ for _, pd := range pds {
+ packageType := ""
+ for _, pvp := range pd.VersionProperties {
+ if pvp.Name == composer_module.TypeProperty {
+ packageType = pvp.Value
+ break
+ }
+ }
+
+ versions = append(versions, &PackageVersionMetadata{
+ Name: pd.Package.Name,
+ Version: pd.Version.Version,
+ Type: packageType,
+ Created: pd.Version.CreatedUnix.AsLocalTime(),
+ Metadata: pd.Metadata.(*composer_module.Metadata),
+ Dist: Dist{
+ Type: "zip",
+ URL: fmt.Sprintf("%s/files/%s/%s/%s", registryURL, url.PathEscape(pd.Package.LowerName), url.PathEscape(pd.Version.LowerVersion), url.PathEscape(pd.Files[0].File.LowerName)),
+ Checksum: pd.Files[0].Blob.HashSHA1,
+ },
+ })
+ }
+
+ return &PackageMetadataResponse{
+ Minified: "composer/2.0",
+ Packages: map[string][]*PackageVersionMetadata{
+ pds[0].Package.Name: versions,
+ },
+ }
+}
diff --git a/routers/api/packages/composer/composer.go b/routers/api/packages/composer/composer.go
new file mode 100644
index 0000000..a045da4
--- /dev/null
+++ b/routers/api/packages/composer/composer.go
@@ -0,0 +1,261 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package composer
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ composer_module "code.gitea.io/gitea/modules/packages/composer"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/hashicorp/go-version"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ type Error struct {
+ Status int `json:"status"`
+ Message string `json:"message"`
+ }
+ ctx.JSON(status, struct {
+ Errors []Error `json:"errors"`
+ }{
+ Errors: []Error{
+ {Status: status, Message: message},
+ },
+ })
+ })
+}
+
+// ServiceIndex displays registry endpoints
+func ServiceIndex(ctx *context.Context) {
+ resp := createServiceIndexResponse(setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/composer")
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// SearchPackages searches packages, only "q" is supported
+// https://packagist.org/apidoc#search-packages
+func SearchPackages(ctx *context.Context) {
+ page := ctx.FormInt("page")
+ if page < 1 {
+ page = 1
+ }
+ perPage := ctx.FormInt("per_page")
+ paginator := db.ListOptions{
+ Page: page,
+ PageSize: convert.ToCorrectPageSize(perPage),
+ }
+
+ opts := &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeComposer,
+ Name: packages_model.SearchValue{Value: ctx.FormTrim("q")},
+ IsInternal: optional.Some(false),
+ Paginator: &paginator,
+ }
+ if ctx.FormTrim("type") != "" {
+ opts.Properties = map[string]string{
+ composer_module.TypeProperty: ctx.FormTrim("type"),
+ }
+ }
+
+ pvs, total, err := packages_model.SearchLatestVersions(ctx, opts)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ nextLink := ""
+ if len(pvs) == paginator.PageSize {
+ u, err := url.Parse(setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/composer/search.json")
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ q := u.Query()
+ q.Set("q", ctx.FormTrim("q"))
+ q.Set("type", ctx.FormTrim("type"))
+ q.Set("page", strconv.Itoa(page+1))
+ if perPage != 0 {
+ q.Set("per_page", strconv.Itoa(perPage))
+ }
+ u.RawQuery = q.Encode()
+
+ nextLink = u.String()
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createSearchResultResponse(total, pds, nextLink)
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// EnumeratePackages lists all package names
+// https://packagist.org/apidoc#list-packages
+func EnumeratePackages(ctx *context.Context) {
+ ps, err := packages_model.GetPackagesByType(ctx, ctx.Package.Owner.ID, packages_model.TypeComposer)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ names := make([]string, 0, len(ps))
+ for _, p := range ps {
+ names = append(names, p.Name)
+ }
+
+ ctx.JSON(http.StatusOK, map[string][]string{
+ "packageNames": names,
+ })
+}
+
+// PackageMetadata returns the metadata for a single package
+// https://packagist.org/apidoc#get-package-data
+func PackageMetadata(ctx *context.Context) {
+ vendorName := ctx.Params("vendorname")
+ projectName := ctx.Params("projectname")
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeComposer, vendorName+"/"+projectName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, packages_model.ErrPackageNotExist)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createPackageMetadataResponse(
+ setting.AppURL+"api/packages/"+ctx.Package.Owner.Name+"/composer",
+ pds,
+ )
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// DownloadPackageFile serves the content of a package
+func DownloadPackageFile(ctx *context.Context) {
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeComposer,
+ Name: ctx.Params("package"),
+ Version: ctx.Params("version"),
+ },
+ &packages_service.PackageFileInfo{
+ Filename: ctx.Params("filename"),
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// UploadPackage creates a new package
+func UploadPackage(ctx *context.Context) {
+ buf, err := packages_module.CreateHashedBufferFromReader(ctx.Req.Body)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ cp, err := composer_module.ParsePackage(buf, buf.Size())
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if cp.Version == "" {
+ v, err := version.NewVersion(ctx.FormTrim("version"))
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, composer_module.ErrInvalidVersion)
+ return
+ }
+ cp.Version = v.String()
+ }
+
+ _, _, err = packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeComposer,
+ Name: cp.Name,
+ Version: cp.Version,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: cp.Metadata,
+ VersionProperties: map[string]string{
+ composer_module.TypeProperty: cp.Type,
+ },
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(fmt.Sprintf("%s.%s.zip", strings.ReplaceAll(cp.Name, "/", "-"), cp.Version)),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
diff --git a/routers/api/packages/conan/auth.go b/routers/api/packages/conan/auth.go
new file mode 100644
index 0000000..e2e1901
--- /dev/null
+++ b/routers/api/packages/conan/auth.go
@@ -0,0 +1,48 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package conan
+
+import (
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/services/auth"
+ "code.gitea.io/gitea/services/packages"
+)
+
+var _ auth.Method = &Auth{}
+
+type Auth struct{}
+
+func (a *Auth) Name() string {
+ return "conan"
+}
+
+// Verify extracts the user from the Bearer token
+func (a *Auth) Verify(req *http.Request, w http.ResponseWriter, store auth.DataStore, sess auth.SessionStore) (*user_model.User, error) {
+ uid, scope, err := packages.ParseAuthorizationToken(req)
+ if err != nil {
+ log.Trace("ParseAuthorizationToken: %v", err)
+ return nil, err
+ }
+
+ if uid == 0 {
+ return nil, nil
+ }
+
+ // Propagate scope of the authorization token.
+ if scope != "" {
+ store.GetData()["IsApiToken"] = true
+ store.GetData()["ApiTokenScope"] = scope
+ }
+
+ u, err := user_model.GetUserByID(req.Context(), uid)
+ if err != nil {
+ log.Error("GetUserByID: %v", err)
+ return nil, err
+ }
+
+ return u, nil
+}
diff --git a/routers/api/packages/conan/conan.go b/routers/api/packages/conan/conan.go
new file mode 100644
index 0000000..e07907a
--- /dev/null
+++ b/routers/api/packages/conan/conan.go
@@ -0,0 +1,807 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package conan
+
+import (
+ std_ctx "context"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ conan_model "code.gitea.io/gitea/models/packages/conan"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ conan_module "code.gitea.io/gitea/modules/packages/conan"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ notify_service "code.gitea.io/gitea/services/notify"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+const (
+ conanfileFile = "conanfile.py"
+ conaninfoFile = "conaninfo.txt"
+
+ recipeReferenceKey = "RecipeReference"
+ packageReferenceKey = "PackageReference"
+)
+
+var (
+ recipeFileList = container.SetOf(
+ conanfileFile,
+ "conanmanifest.txt",
+ "conan_sources.tgz",
+ "conan_export.tgz",
+ )
+ packageFileList = container.SetOf(
+ conaninfoFile,
+ "conanmanifest.txt",
+ "conan_package.tgz",
+ )
+)
+
+func jsonResponse(ctx *context.Context, status int, obj any) {
+ // https://github.com/conan-io/conan/issues/6613
+ ctx.Resp.Header().Set("Content-Type", "application/json")
+ ctx.Status(status)
+ if err := json.NewEncoder(ctx.Resp).Encode(obj); err != nil {
+ log.Error("JSON encode: %v", err)
+ }
+}
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ jsonResponse(ctx, status, map[string]string{
+ "message": message,
+ })
+ })
+}
+
+func baseURL(ctx *context.Context) string {
+ return setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/conan"
+}
+
+// ExtractPathParameters is a middleware to extract common parameters from path
+func ExtractPathParameters(ctx *context.Context) {
+ rref, err := conan_module.NewRecipeReference(
+ ctx.Params("name"),
+ ctx.Params("version"),
+ ctx.Params("user"),
+ ctx.Params("channel"),
+ ctx.Params("recipe_revision"),
+ )
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ ctx.Data[recipeReferenceKey] = rref
+
+ reference := ctx.Params("package_reference")
+
+ var pref *conan_module.PackageReference
+ if reference != "" {
+ pref, err = conan_module.NewPackageReference(
+ rref,
+ reference,
+ ctx.Params("package_revision"),
+ )
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ }
+
+ ctx.Data[packageReferenceKey] = pref
+}
+
+// Ping reports the server capabilities
+func Ping(ctx *context.Context) {
+ ctx.RespHeader().Add("X-Conan-Server-Capabilities", "revisions") // complex_search,checksum_deploy,matrix_params
+
+ ctx.Status(http.StatusOK)
+}
+
+// Authenticate creates an authentication token for the user
+func Authenticate(ctx *context.Context) {
+ if ctx.Doer == nil {
+ apiError(ctx, http.StatusBadRequest, nil)
+ return
+ }
+
+ // If there's an API scope, ensure it propagates.
+ scope, _ := ctx.Data.GetData()["ApiTokenScope"].(auth_model.AccessTokenScope)
+
+ token, err := packages_service.CreateAuthorizationToken(ctx.Doer, scope)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.PlainText(http.StatusOK, token)
+}
+
+// CheckCredentials tests if the provided authentication token is valid
+func CheckCredentials(ctx *context.Context) {
+ if ctx.Doer == nil {
+ ctx.Status(http.StatusUnauthorized)
+ } else {
+ ctx.Status(http.StatusOK)
+ }
+}
+
+// RecipeSnapshot displays the recipe files with their md5 hash
+func RecipeSnapshot(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ serveSnapshot(ctx, rref.AsKey())
+}
+
+// RecipeSnapshot displays the package files with their md5 hash
+func PackageSnapshot(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ serveSnapshot(ctx, pref.AsKey())
+}
+
+func serveSnapshot(ctx *context.Context, fileKey string) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeConan, rref.Name, rref.Version)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ VersionID: pv.ID,
+ CompositeKey: fileKey,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pfs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ files := make(map[string]string)
+ for _, pf := range pfs {
+ pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ files[pf.Name] = pb.HashMD5
+ }
+
+ jsonResponse(ctx, http.StatusOK, files)
+}
+
+// RecipeDownloadURLs displays the recipe files with their download url
+func RecipeDownloadURLs(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ serveDownloadURLs(
+ ctx,
+ rref.AsKey(),
+ fmt.Sprintf(baseURL(ctx)+"/v1/files/%s/recipe", rref.LinkName()),
+ )
+}
+
+// PackageDownloadURLs displays the package files with their download url
+func PackageDownloadURLs(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ serveDownloadURLs(
+ ctx,
+ pref.AsKey(),
+ fmt.Sprintf(baseURL(ctx)+"/v1/files/%s/package/%s", pref.Recipe.LinkName(), pref.LinkName()),
+ )
+}
+
+func serveDownloadURLs(ctx *context.Context, fileKey, downloadURL string) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeConan, rref.Name, rref.Version)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ VersionID: pv.ID,
+ CompositeKey: fileKey,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pfs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ urls := make(map[string]string)
+ for _, pf := range pfs {
+ urls[pf.Name] = fmt.Sprintf("%s/%s", downloadURL, pf.Name)
+ }
+
+ jsonResponse(ctx, http.StatusOK, urls)
+}
+
+// RecipeUploadURLs displays the upload urls for the provided recipe files
+func RecipeUploadURLs(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ serveUploadURLs(
+ ctx,
+ recipeFileList,
+ fmt.Sprintf(baseURL(ctx)+"/v1/files/%s/recipe", rref.LinkName()),
+ )
+}
+
+// PackageUploadURLs displays the upload urls for the provided package files
+func PackageUploadURLs(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ serveUploadURLs(
+ ctx,
+ packageFileList,
+ fmt.Sprintf(baseURL(ctx)+"/v1/files/%s/package/%s", pref.Recipe.LinkName(), pref.LinkName()),
+ )
+}
+
+func serveUploadURLs(ctx *context.Context, fileFilter container.Set[string], uploadURL string) {
+ defer ctx.Req.Body.Close()
+
+ var files map[string]int64
+ if err := json.NewDecoder(ctx.Req.Body).Decode(&files); err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ urls := make(map[string]string)
+ for file := range files {
+ if fileFilter.Contains(file) {
+ urls[file] = fmt.Sprintf("%s/%s", uploadURL, file)
+ }
+ }
+
+ jsonResponse(ctx, http.StatusOK, urls)
+}
+
+// UploadRecipeFile handles the upload of a recipe file
+func UploadRecipeFile(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ uploadFile(ctx, recipeFileList, rref.AsKey())
+}
+
+// UploadPackageFile handles the upload of a package file
+func UploadPackageFile(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ uploadFile(ctx, packageFileList, pref.AsKey())
+}
+
+func uploadFile(ctx *context.Context, fileFilter container.Set[string], fileKey string) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ filename := ctx.Params("filename")
+ if !fileFilter.Contains(filename) {
+ apiError(ctx, http.StatusBadRequest, nil)
+ return
+ }
+
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ isConanfileFile := filename == conanfileFile
+ isConaninfoFile := filename == conaninfoFile
+
+ pci := &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeConan,
+ Name: rref.Name,
+ Version: rref.Version,
+ },
+ Creator: ctx.Doer,
+ }
+ pfci := &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(filename),
+ CompositeKey: fileKey,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: isConanfileFile,
+ Properties: map[string]string{
+ conan_module.PropertyRecipeUser: rref.User,
+ conan_module.PropertyRecipeChannel: rref.Channel,
+ conan_module.PropertyRecipeRevision: rref.RevisionOrDefault(),
+ },
+ OverwriteExisting: true,
+ }
+
+ if pref != nil {
+ pfci.Properties[conan_module.PropertyPackageReference] = pref.Reference
+ pfci.Properties[conan_module.PropertyPackageRevision] = pref.RevisionOrDefault()
+ }
+
+ if isConanfileFile || isConaninfoFile {
+ if isConanfileFile {
+ metadata, err := conan_module.ParseConanfile(buf)
+ if err != nil {
+ log.Error("Error parsing package metadata: %v", err)
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, pci.Owner.ID, pci.PackageType, pci.Name, pci.Version)
+ if err != nil && err != packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if pv != nil {
+ raw, err := json.Marshal(metadata)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ pv.MetadataJSON = string(raw)
+ if err := packages_model.UpdateVersion(ctx, pv); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ } else {
+ pci.Metadata = metadata
+ }
+ } else {
+ info, err := conan_module.ParseConaninfo(buf)
+ if err != nil {
+ log.Error("Error parsing conan info: %v", err)
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ raw, err := json.Marshal(info)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ pfci.Properties[conan_module.PropertyPackageInfo] = string(raw)
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ pci,
+ pfci,
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// DownloadRecipeFile serves the content of the requested recipe file
+func DownloadRecipeFile(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ downloadFile(ctx, recipeFileList, rref.AsKey())
+}
+
+// DownloadPackageFile serves the content of the requested package file
+func DownloadPackageFile(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ downloadFile(ctx, packageFileList, pref.AsKey())
+}
+
+func downloadFile(ctx *context.Context, fileFilter container.Set[string], fileKey string) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ filename := ctx.Params("filename")
+ if !fileFilter.Contains(filename) {
+ apiError(ctx, http.StatusBadRequest, nil)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeConan,
+ Name: rref.Name,
+ Version: rref.Version,
+ },
+ &packages_service.PackageFileInfo{
+ Filename: filename,
+ CompositeKey: fileKey,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// DeleteRecipeV1 deletes the requested recipe(s)
+func DeleteRecipeV1(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ if err := deleteRecipeOrPackage(ctx, rref, true, nil, false); err != nil {
+ if err == packages_model.ErrPackageNotExist || err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ ctx.Status(http.StatusOK)
+}
+
+// DeleteRecipeV2 deletes the requested recipe(s) respecting its revisions
+func DeleteRecipeV2(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ if err := deleteRecipeOrPackage(ctx, rref, rref.Revision == "", nil, false); err != nil {
+ if err == packages_model.ErrPackageNotExist || err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ ctx.Status(http.StatusOK)
+}
+
+// DeletePackageV1 deletes the requested package(s)
+func DeletePackageV1(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ type PackageReferences struct {
+ References []string `json:"package_ids"`
+ }
+
+ var ids *PackageReferences
+ if err := json.NewDecoder(ctx.Req.Body).Decode(&ids); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ revisions, err := conan_model.GetRecipeRevisions(ctx, ctx.Package.Owner.ID, rref)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ for _, revision := range revisions {
+ currentRref := rref.WithRevision(revision.Value)
+
+ var references []*conan_model.PropertyValue
+ if len(ids.References) == 0 {
+ if references, err = conan_model.GetPackageReferences(ctx, ctx.Package.Owner.ID, currentRref); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ } else {
+ for _, reference := range ids.References {
+ references = append(references, &conan_model.PropertyValue{Value: reference})
+ }
+ }
+
+ for _, reference := range references {
+ pref, _ := conan_module.NewPackageReference(currentRref, reference.Value, conan_module.DefaultRevision)
+ if err := deleteRecipeOrPackage(ctx, currentRref, true, pref, true); err != nil {
+ if err == packages_model.ErrPackageNotExist || err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ }
+ }
+ ctx.Status(http.StatusOK)
+}
+
+// DeletePackageV2 deletes the requested package(s) respecting its revisions
+func DeletePackageV2(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ if pref != nil { // has package reference
+ if err := deleteRecipeOrPackage(ctx, rref, false, pref, pref.Revision == ""); err != nil {
+ if err == packages_model.ErrPackageNotExist || err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ } else {
+ ctx.Status(http.StatusOK)
+ }
+ return
+ }
+
+ references, err := conan_model.GetPackageReferences(ctx, ctx.Package.Owner.ID, rref)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(references) == 0 {
+ apiError(ctx, http.StatusNotFound, conan_model.ErrPackageReferenceNotExist)
+ return
+ }
+
+ for _, reference := range references {
+ pref, _ := conan_module.NewPackageReference(rref, reference.Value, conan_module.DefaultRevision)
+
+ if err := deleteRecipeOrPackage(ctx, rref, false, pref, true); err != nil {
+ if err == packages_model.ErrPackageNotExist || err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+func deleteRecipeOrPackage(apictx *context.Context, rref *conan_module.RecipeReference, ignoreRecipeRevision bool, pref *conan_module.PackageReference, ignorePackageRevision bool) error {
+ var pd *packages_model.PackageDescriptor
+ versionDeleted := false
+
+ err := db.WithTx(apictx, func(ctx std_ctx.Context) error {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, apictx.Package.Owner.ID, packages_model.TypeConan, rref.Name, rref.Version)
+ if err != nil {
+ return err
+ }
+
+ pd, err = packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ return err
+ }
+
+ filter := map[string]string{
+ conan_module.PropertyRecipeUser: rref.User,
+ conan_module.PropertyRecipeChannel: rref.Channel,
+ }
+ if !ignoreRecipeRevision {
+ filter[conan_module.PropertyRecipeRevision] = rref.RevisionOrDefault()
+ }
+ if pref != nil {
+ filter[conan_module.PropertyPackageReference] = pref.Reference
+ if !ignorePackageRevision {
+ filter[conan_module.PropertyPackageRevision] = pref.RevisionOrDefault()
+ }
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ VersionID: pv.ID,
+ Properties: filter,
+ })
+ if err != nil {
+ return err
+ }
+ if len(pfs) == 0 {
+ return conan_model.ErrPackageReferenceNotExist
+ }
+
+ for _, pf := range pfs {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+ has, err := packages_model.HasVersionFileReferences(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+ if !has {
+ versionDeleted = true
+
+ return packages_service.DeletePackageVersionAndReferences(ctx, pv)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if versionDeleted {
+ notify_service.PackageDelete(apictx, apictx.Doer, pd)
+ }
+
+ return nil
+}
+
+// ListRecipeRevisions gets a list of all recipe revisions
+func ListRecipeRevisions(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ revisions, err := conan_model.GetRecipeRevisions(ctx, ctx.Package.Owner.ID, rref)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ listRevisions(ctx, revisions)
+}
+
+// ListPackageRevisions gets a list of all package revisions
+func ListPackageRevisions(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ revisions, err := conan_model.GetPackageRevisions(ctx, ctx.Package.Owner.ID, pref)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ listRevisions(ctx, revisions)
+}
+
+type revisionInfo struct {
+ Revision string `json:"revision"`
+ Time time.Time `json:"time"`
+}
+
+func listRevisions(ctx *context.Context, revisions []*conan_model.PropertyValue) {
+ if len(revisions) == 0 {
+ apiError(ctx, http.StatusNotFound, conan_model.ErrRecipeReferenceNotExist)
+ return
+ }
+
+ type RevisionList struct {
+ Revisions []*revisionInfo `json:"revisions"`
+ }
+
+ revs := make([]*revisionInfo, 0, len(revisions))
+ for _, rev := range revisions {
+ revs = append(revs, &revisionInfo{Revision: rev.Value, Time: rev.CreatedUnix.AsLocalTime()})
+ }
+
+ jsonResponse(ctx, http.StatusOK, &RevisionList{revs})
+}
+
+// LatestRecipeRevision gets the latest recipe revision
+func LatestRecipeRevision(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ revision, err := conan_model.GetLastRecipeRevision(ctx, ctx.Package.Owner.ID, rref)
+ if err != nil {
+ if err == conan_model.ErrRecipeReferenceNotExist || err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ jsonResponse(ctx, http.StatusOK, &revisionInfo{Revision: revision.Value, Time: revision.CreatedUnix.AsLocalTime()})
+}
+
+// LatestPackageRevision gets the latest package revision
+func LatestPackageRevision(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ revision, err := conan_model.GetLastPackageRevision(ctx, ctx.Package.Owner.ID, pref)
+ if err != nil {
+ if err == conan_model.ErrRecipeReferenceNotExist || err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ jsonResponse(ctx, http.StatusOK, &revisionInfo{Revision: revision.Value, Time: revision.CreatedUnix.AsLocalTime()})
+}
+
+// ListRecipeRevisionFiles gets a list of all recipe revision files
+func ListRecipeRevisionFiles(ctx *context.Context) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ listRevisionFiles(ctx, rref.AsKey())
+}
+
+// ListPackageRevisionFiles gets a list of all package revision files
+func ListPackageRevisionFiles(ctx *context.Context) {
+ pref := ctx.Data[packageReferenceKey].(*conan_module.PackageReference)
+
+ listRevisionFiles(ctx, pref.AsKey())
+}
+
+func listRevisionFiles(ctx *context.Context, fileKey string) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeConan, rref.Name, rref.Version)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ VersionID: pv.ID,
+ CompositeKey: fileKey,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pfs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ files := make(map[string]any)
+ for _, pf := range pfs {
+ files[pf.Name] = nil
+ }
+
+ type FileList struct {
+ Files map[string]any `json:"files"`
+ }
+
+ jsonResponse(ctx, http.StatusOK, &FileList{
+ Files: files,
+ })
+}
diff --git a/routers/api/packages/conan/search.go b/routers/api/packages/conan/search.go
new file mode 100644
index 0000000..7370c70
--- /dev/null
+++ b/routers/api/packages/conan/search.go
@@ -0,0 +1,163 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package conan
+
+import (
+ "net/http"
+ "strings"
+
+ conan_model "code.gitea.io/gitea/models/packages/conan"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ conan_module "code.gitea.io/gitea/modules/packages/conan"
+ "code.gitea.io/gitea/services/context"
+)
+
+// SearchResult contains the found recipe names
+type SearchResult struct {
+ Results []string `json:"results"`
+}
+
+// SearchRecipes searches all recipes matching the query
+func SearchRecipes(ctx *context.Context) {
+ q := ctx.FormTrim("q")
+
+ opts := parseQuery(ctx.Package.Owner, q)
+
+ results, err := conan_model.SearchRecipes(ctx, opts)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ jsonResponse(ctx, http.StatusOK, &SearchResult{
+ Results: results,
+ })
+}
+
+// parseQuery creates search options for the given query
+func parseQuery(owner *user_model.User, query string) *conan_model.RecipeSearchOptions {
+ opts := &conan_model.RecipeSearchOptions{
+ OwnerID: owner.ID,
+ }
+
+ if query != "" {
+ parts := strings.Split(strings.ReplaceAll(query, "@", "/"), "/")
+
+ opts.Name = parts[0]
+ if len(parts) > 1 && parts[1] != "*" {
+ opts.Version = parts[1]
+ }
+ if len(parts) > 2 && parts[2] != "*" {
+ opts.User = parts[2]
+ }
+ if len(parts) > 3 && parts[3] != "*" {
+ opts.Channel = parts[3]
+ }
+ }
+
+ return opts
+}
+
+// SearchPackagesV1 searches all packages of a recipe (Conan v1 endpoint)
+func SearchPackagesV1(ctx *context.Context) {
+ searchPackages(ctx, true)
+}
+
+// SearchPackagesV2 searches all packages of a recipe (Conan v2 endpoint)
+func SearchPackagesV2(ctx *context.Context) {
+ searchPackages(ctx, false)
+}
+
+func searchPackages(ctx *context.Context, searchAllRevisions bool) {
+ rref := ctx.Data[recipeReferenceKey].(*conan_module.RecipeReference)
+
+ if !searchAllRevisions && rref.Revision == "" {
+ lastRevision, err := conan_model.GetLastRecipeRevision(ctx, ctx.Package.Owner.ID, rref)
+ if err != nil {
+ if err == conan_model.ErrRecipeReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ rref = rref.WithRevision(lastRevision.Value)
+ } else {
+ has, err := conan_model.RecipeExists(ctx, ctx.Package.Owner.ID, rref)
+ if err != nil {
+ if err == conan_model.ErrRecipeReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ if !has {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+ }
+
+ recipeRevisions := []*conan_model.PropertyValue{{Value: rref.Revision}}
+ if searchAllRevisions {
+ var err error
+ recipeRevisions, err = conan_model.GetRecipeRevisions(ctx, ctx.Package.Owner.ID, rref)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ result := make(map[string]*conan_module.Conaninfo)
+
+ for _, recipeRevision := range recipeRevisions {
+ currentRef := rref
+ if recipeRevision.Value != "" {
+ currentRef = rref.WithRevision(recipeRevision.Value)
+ }
+ packageReferences, err := conan_model.GetPackageReferences(ctx, ctx.Package.Owner.ID, currentRef)
+ if err != nil {
+ if err == conan_model.ErrRecipeReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ for _, packageReference := range packageReferences {
+ if _, ok := result[packageReference.Value]; ok {
+ continue
+ }
+ pref, _ := conan_module.NewPackageReference(currentRef, packageReference.Value, "")
+ lastPackageRevision, err := conan_model.GetLastPackageRevision(ctx, ctx.Package.Owner.ID, pref)
+ if err != nil {
+ if err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ pref = pref.WithRevision(lastPackageRevision.Value)
+ infoRaw, err := conan_model.GetPackageInfo(ctx, ctx.Package.Owner.ID, pref)
+ if err != nil {
+ if err == conan_model.ErrPackageReferenceNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ var info *conan_module.Conaninfo
+ if err := json.Unmarshal([]byte(infoRaw), &info); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ result[pref.Reference] = info
+ }
+ }
+
+ jsonResponse(ctx, http.StatusOK, result)
+}
diff --git a/routers/api/packages/conda/conda.go b/routers/api/packages/conda/conda.go
new file mode 100644
index 0000000..c7e4544
--- /dev/null
+++ b/routers/api/packages/conda/conda.go
@@ -0,0 +1,303 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package conda
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ conda_model "code.gitea.io/gitea/models/packages/conda"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ conda_module "code.gitea.io/gitea/modules/packages/conda"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/dsnet/compress/bzip2"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.JSON(status, struct {
+ Reason string `json:"reason"`
+ Message string `json:"message"`
+ }{
+ Reason: http.StatusText(status),
+ Message: message,
+ })
+ })
+}
+
+func EnumeratePackages(ctx *context.Context) {
+ type Info struct {
+ Subdir string `json:"subdir"`
+ }
+
+ type PackageInfo struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ NoArch string `json:"noarch"`
+ Subdir string `json:"subdir"`
+ Timestamp int64 `json:"timestamp"`
+ Build string `json:"build"`
+ BuildNumber int64 `json:"build_number"`
+ Dependencies []string `json:"depends"`
+ License string `json:"license"`
+ LicenseFamily string `json:"license_family"`
+ HashMD5 string `json:"md5"`
+ HashSHA256 string `json:"sha256"`
+ Size int64 `json:"size"`
+ }
+
+ type RepoData struct {
+ Info Info `json:"info"`
+ Packages map[string]*PackageInfo `json:"packages"`
+ PackagesConda map[string]*PackageInfo `json:"packages.conda"`
+ Removed map[string]*PackageInfo `json:"removed"`
+ }
+
+ repoData := &RepoData{
+ Info: Info{
+ Subdir: ctx.Params("architecture"),
+ },
+ Packages: make(map[string]*PackageInfo),
+ PackagesConda: make(map[string]*PackageInfo),
+ Removed: make(map[string]*PackageInfo),
+ }
+
+ pfs, err := conda_model.SearchFiles(ctx, &conda_model.FileSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Channel: ctx.Params("channel"),
+ Subdir: repoData.Info.Subdir,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pfs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pds := make(map[int64]*packages_model.PackageDescriptor)
+
+ for _, pf := range pfs {
+ pd, exists := pds[pf.VersionID]
+ if !exists {
+ pv, err := packages_model.GetVersionByID(ctx, pf.VersionID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pd, err = packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds[pf.VersionID] = pd
+ }
+
+ var pfd *packages_model.PackageFileDescriptor
+ for _, d := range pd.Files {
+ if d.File.ID == pf.ID {
+ pfd = d
+ break
+ }
+ }
+
+ var fileMetadata *conda_module.FileMetadata
+ if err := json.Unmarshal([]byte(pfd.Properties.GetByName(conda_module.PropertyMetadata)), &fileMetadata); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ versionMetadata := pd.Metadata.(*conda_module.VersionMetadata)
+
+ pi := &PackageInfo{
+ Name: pd.PackageProperties.GetByName(conda_module.PropertyName),
+ Version: pd.Version.Version,
+ NoArch: fileMetadata.NoArch,
+ Subdir: repoData.Info.Subdir,
+ Timestamp: fileMetadata.Timestamp,
+ Build: fileMetadata.Build,
+ BuildNumber: fileMetadata.BuildNumber,
+ Dependencies: fileMetadata.Dependencies,
+ License: versionMetadata.License,
+ LicenseFamily: versionMetadata.LicenseFamily,
+ HashMD5: pfd.Blob.HashMD5,
+ HashSHA256: pfd.Blob.HashSHA256,
+ Size: pfd.Blob.Size,
+ }
+
+ if fileMetadata.IsCondaPackage {
+ repoData.PackagesConda[pfd.File.Name] = pi
+ } else {
+ repoData.Packages[pfd.File.Name] = pi
+ }
+ }
+
+ resp := ctx.Resp
+
+ var w io.Writer = resp
+
+ if strings.HasSuffix(ctx.Params("filename"), ".json") {
+ resp.Header().Set("Content-Type", "application/json")
+ } else {
+ resp.Header().Set("Content-Type", "application/x-bzip2")
+
+ zw, err := bzip2.NewWriter(w, nil)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer zw.Close()
+
+ w = zw
+ }
+
+ resp.WriteHeader(http.StatusOK)
+
+ if err := json.NewEncoder(w).Encode(repoData); err != nil {
+ log.Error("JSON encode: %v", err)
+ }
+}
+
+func UploadPackageFile(ctx *context.Context) {
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ var pck *conda_module.Package
+ if strings.HasSuffix(strings.ToLower(ctx.Params("filename")), ".tar.bz2") {
+ pck, err = conda_module.ParsePackageBZ2(buf)
+ } else {
+ pck, err = conda_module.ParsePackageConda(buf, buf.Size())
+ }
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ fullName := pck.Name
+
+ channel := ctx.Params("channel")
+ if channel != "" {
+ fullName = channel + "/" + pck.Name
+ }
+
+ extension := ".tar.bz2"
+ if pck.FileMetadata.IsCondaPackage {
+ extension = ".conda"
+ }
+
+ fileMetadataRaw, err := json.Marshal(pck.FileMetadata)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeConda,
+ Name: fullName,
+ Version: pck.Version,
+ },
+ SemverCompatible: false,
+ Creator: ctx.Doer,
+ Metadata: pck.VersionMetadata,
+ PackageProperties: map[string]string{
+ conda_module.PropertyName: pck.Name,
+ conda_module.PropertyChannel: channel,
+ },
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s-%s-%s%s", pck.Name, pck.Version, pck.FileMetadata.Build, extension),
+ CompositeKey: pck.Subdir,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ Properties: map[string]string{
+ conda_module.PropertySubdir: pck.Subdir,
+ conda_module.PropertyMetadata: string(fileMetadataRaw),
+ },
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func DownloadPackageFile(ctx *context.Context) {
+ pfs, err := conda_model.SearchFiles(ctx, &conda_model.FileSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Channel: ctx.Params("channel"),
+ Subdir: ctx.Params("architecture"),
+ Filename: ctx.Params("filename"),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pfs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pf := pfs[0]
+
+ s, u, _, err := packages_service.GetPackageFileStream(ctx, pf)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
diff --git a/routers/api/packages/container/auth.go b/routers/api/packages/container/auth.go
new file mode 100644
index 0000000..a8b3ec1
--- /dev/null
+++ b/routers/api/packages/container/auth.go
@@ -0,0 +1,49 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/services/auth"
+ "code.gitea.io/gitea/services/packages"
+)
+
+var _ auth.Method = &Auth{}
+
+type Auth struct{}
+
+func (a *Auth) Name() string {
+ return "container"
+}
+
+// Verify extracts the user from the Bearer token
+// If it's an anonymous session a ghost user is returned
+func (a *Auth) Verify(req *http.Request, w http.ResponseWriter, store auth.DataStore, sess auth.SessionStore) (*user_model.User, error) {
+ uid, scope, err := packages.ParseAuthorizationToken(req)
+ if err != nil {
+ log.Trace("ParseAuthorizationToken: %v", err)
+ return nil, err
+ }
+
+ if uid == 0 {
+ return nil, nil
+ }
+
+ // Propagate scope of the authorization token.
+ if scope != "" {
+ store.GetData()["IsApiToken"] = true
+ store.GetData()["ApiTokenScope"] = scope
+ }
+
+ u, err := user_model.GetPossibleUserByID(req.Context(), uid)
+ if err != nil {
+ log.Error("GetPossibleUserByID: %v", err)
+ return nil, err
+ }
+
+ return u, nil
+}
diff --git a/routers/api/packages/container/blob.go b/routers/api/packages/container/blob.go
new file mode 100644
index 0000000..9e3a470
--- /dev/null
+++ b/routers/api/packages/container/blob.go
@@ -0,0 +1,202 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ container_model "code.gitea.io/gitea/models/packages/container"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/util"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+var uploadVersionMutex sync.Mutex
+
+// saveAsPackageBlob creates a package blob from an upload
+// The uploaded blob gets stored in a special upload version to link them to the package/image
+func saveAsPackageBlob(ctx context.Context, hsr packages_module.HashedSizeReader, pci *packages_service.PackageCreationInfo) (*packages_model.PackageBlob, error) { //nolint:unparam
+ pb := packages_service.NewPackageBlob(hsr)
+
+ exists := false
+
+ contentStore := packages_module.NewContentStore()
+
+ uploadVersion, err := getOrCreateUploadVersion(ctx, &pci.PackageInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ err = db.WithTx(ctx, func(ctx context.Context) error {
+ if err := packages_service.CheckSizeQuotaExceeded(ctx, pci.Creator, pci.Owner, packages_model.TypeContainer, hsr.Size()); err != nil {
+ return err
+ }
+
+ pb, exists, err = packages_model.GetOrInsertBlob(ctx, pb)
+ if err != nil {
+ log.Error("Error inserting package blob: %v", err)
+ return err
+ }
+ // FIXME: Workaround to be removed in v1.20
+ // https://github.com/go-gitea/gitea/issues/19586
+ if exists {
+ err = contentStore.Has(packages_module.BlobHash256Key(pb.HashSHA256))
+ if err != nil && (errors.Is(err, util.ErrNotExist) || errors.Is(err, os.ErrNotExist)) {
+ log.Debug("Package registry inconsistent: blob %s does not exist on file system", pb.HashSHA256)
+ exists = false
+ }
+ }
+ if !exists {
+ if err := contentStore.Save(packages_module.BlobHash256Key(pb.HashSHA256), hsr, hsr.Size()); err != nil {
+ log.Error("Error saving package blob in content store: %v", err)
+ return err
+ }
+ }
+
+ return createFileForBlob(ctx, uploadVersion, pb)
+ })
+ if err != nil {
+ if !exists {
+ if err := contentStore.Delete(packages_module.BlobHash256Key(pb.HashSHA256)); err != nil {
+ log.Error("Error deleting package blob from content store: %v", err)
+ }
+ }
+ return nil, err
+ }
+
+ return pb, nil
+}
+
+// mountBlob mounts the specific blob to a different package
+func mountBlob(ctx context.Context, pi *packages_service.PackageInfo, pb *packages_model.PackageBlob) error {
+ uploadVersion, err := getOrCreateUploadVersion(ctx, pi)
+ if err != nil {
+ return err
+ }
+
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ return createFileForBlob(ctx, uploadVersion, pb)
+ })
+}
+
+func getOrCreateUploadVersion(ctx context.Context, pi *packages_service.PackageInfo) (*packages_model.PackageVersion, error) {
+ var uploadVersion *packages_model.PackageVersion
+
+ // FIXME: Replace usage of mutex with database transaction
+ // https://github.com/go-gitea/gitea/pull/21862
+ uploadVersionMutex.Lock()
+ err := db.WithTx(ctx, func(ctx context.Context) error {
+ created := true
+ p := &packages_model.Package{
+ OwnerID: pi.Owner.ID,
+ Type: packages_model.TypeContainer,
+ Name: strings.ToLower(pi.Name),
+ LowerName: strings.ToLower(pi.Name),
+ }
+ var err error
+ if p, err = packages_model.TryInsertPackage(ctx, p); err != nil {
+ if err == packages_model.ErrDuplicatePackage {
+ created = false
+ } else {
+ log.Error("Error inserting package: %v", err)
+ return err
+ }
+ }
+
+ if created {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository, strings.ToLower(pi.Owner.LowerName+"/"+pi.Name)); err != nil {
+ log.Error("Error setting package property: %v", err)
+ return err
+ }
+ }
+
+ pv := &packages_model.PackageVersion{
+ PackageID: p.ID,
+ CreatorID: pi.Owner.ID,
+ Version: container_model.UploadVersion,
+ LowerVersion: container_model.UploadVersion,
+ IsInternal: true,
+ MetadataJSON: "null",
+ }
+ if pv, err = packages_model.GetOrInsertVersion(ctx, pv); err != nil {
+ if err != packages_model.ErrDuplicatePackageVersion {
+ log.Error("Error inserting package: %v", err)
+ return err
+ }
+ }
+
+ uploadVersion = pv
+
+ return nil
+ })
+ uploadVersionMutex.Unlock()
+
+ return uploadVersion, err
+}
+
+func createFileForBlob(ctx context.Context, pv *packages_model.PackageVersion, pb *packages_model.PackageBlob) error {
+ filename := strings.ToLower(fmt.Sprintf("sha256_%s", pb.HashSHA256))
+
+ pf := &packages_model.PackageFile{
+ VersionID: pv.ID,
+ BlobID: pb.ID,
+ Name: filename,
+ LowerName: filename,
+ CompositeKey: packages_model.EmptyFileKey,
+ }
+ var err error
+ if pf, err = packages_model.TryInsertFile(ctx, pf); err != nil {
+ if err == packages_model.ErrDuplicatePackageFile {
+ return nil
+ }
+ log.Error("Error inserting package file: %v", err)
+ return err
+ }
+
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypeFile, pf.ID, container_module.PropertyDigest, digestFromPackageBlob(pb)); err != nil {
+ log.Error("Error setting package file property: %v", err)
+ return err
+ }
+
+ return nil
+}
+
+func deleteBlob(ctx context.Context, ownerID int64, image, digest string) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ pfds, err := container_model.GetContainerBlobs(ctx, &container_model.BlobSearchOptions{
+ OwnerID: ownerID,
+ Image: image,
+ Digest: digest,
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, file := range pfds {
+ if err := packages_service.DeletePackageFile(ctx, file.File); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func digestFromHashSummer(h packages_module.HashSummer) string {
+ _, _, hashSHA256, _ := h.Sums()
+ return "sha256:" + hex.EncodeToString(hashSHA256)
+}
+
+func digestFromPackageBlob(pb *packages_model.PackageBlob) string {
+ return "sha256:" + pb.HashSHA256
+}
diff --git a/routers/api/packages/container/container.go b/routers/api/packages/container/container.go
new file mode 100644
index 0000000..f376e7b
--- /dev/null
+++ b/routers/api/packages/container/container.go
@@ -0,0 +1,785 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ packages_model "code.gitea.io/gitea/models/packages"
+ container_model "code.gitea.io/gitea/models/packages/container"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+ container_service "code.gitea.io/gitea/services/packages/container"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+// maximum size of a container manifest
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-manifests
+const maxManifestSize = 10 * 1024 * 1024
+
+var (
+ imageNamePattern = regexp.MustCompile(`\A[a-z0-9]+([._-][a-z0-9]+)*(/[a-z0-9]+([._-][a-z0-9]+)*)*\z`)
+ referencePattern = regexp.MustCompile(`\A[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}\z`)
+)
+
+type containerHeaders struct {
+ Status int
+ ContentDigest string
+ UploadUUID string
+ Range string
+ Location string
+ ContentType string
+ ContentLength int64
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#legacy-docker-support-http-headers
+func setResponseHeaders(resp http.ResponseWriter, h *containerHeaders) {
+ if h.Location != "" {
+ resp.Header().Set("Location", h.Location)
+ }
+ if h.Range != "" {
+ resp.Header().Set("Range", h.Range)
+ }
+ if h.ContentType != "" {
+ resp.Header().Set("Content-Type", h.ContentType)
+ }
+ if h.ContentLength != 0 {
+ resp.Header().Set("Content-Length", strconv.FormatInt(h.ContentLength, 10))
+ }
+ if h.UploadUUID != "" {
+ resp.Header().Set("Docker-Upload-Uuid", h.UploadUUID)
+ }
+ if h.ContentDigest != "" {
+ resp.Header().Set("Docker-Content-Digest", h.ContentDigest)
+ resp.Header().Set("ETag", fmt.Sprintf(`"%s"`, h.ContentDigest))
+ }
+ resp.Header().Set("Docker-Distribution-Api-Version", "registry/2.0")
+ resp.WriteHeader(h.Status)
+}
+
+func jsonResponse(ctx *context.Context, status int, obj any) {
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Status: status,
+ ContentType: "application/json",
+ })
+ if err := json.NewEncoder(ctx.Resp).Encode(obj); err != nil {
+ log.Error("JSON encode: %v", err)
+ }
+}
+
+func apiError(ctx *context.Context, status int, err error) {
+ helper.LogAndProcessError(ctx, status, err, func(message string) {
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Status: status,
+ })
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#error-codes
+func apiErrorDefined(ctx *context.Context, err *namedError) {
+ type ContainerError struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ }
+
+ type ContainerErrors struct {
+ Errors []ContainerError `json:"errors"`
+ }
+
+ jsonResponse(ctx, err.StatusCode, ContainerErrors{
+ Errors: []ContainerError{
+ {
+ Code: err.Code,
+ Message: err.Message,
+ },
+ },
+ })
+}
+
+func apiUnauthorizedError(ctx *context.Context) {
+ ctx.Resp.Header().Add("WWW-Authenticate", `Bearer realm="`+setting.AppURL+`v2/token",service="container_registry",scope="*"`)
+ apiErrorDefined(ctx, errUnauthorized)
+}
+
+// ReqContainerAccess is a middleware which checks the current user valid (real user or ghost if anonymous access is enabled)
+func ReqContainerAccess(ctx *context.Context) {
+ if ctx.Doer == nil || (setting.Service.RequireSignInView && ctx.Doer.IsGhost()) {
+ apiUnauthorizedError(ctx)
+ }
+}
+
+// VerifyImageName is a middleware which checks if the image name is allowed
+func VerifyImageName(ctx *context.Context) {
+ if !imageNamePattern.MatchString(ctx.Params("image")) {
+ apiErrorDefined(ctx, errNameInvalid)
+ }
+}
+
+// DetermineSupport is used to test if the registry supports OCI
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#determining-support
+func DetermineSupport(ctx *context.Context) {
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Status: http.StatusOK,
+ })
+}
+
+// Authenticate creates a token for the current user
+// If the current user is anonymous, the ghost user is used unless RequireSignInView is enabled.
+func Authenticate(ctx *context.Context) {
+ u := ctx.Doer
+ if u == nil {
+ if setting.Service.RequireSignInView {
+ apiUnauthorizedError(ctx)
+ return
+ }
+
+ u = user_model.NewGhostUser()
+ }
+
+ // If there's an API scope, ensure it propagates.
+ scope, _ := ctx.Data["ApiTokenScope"].(auth_model.AccessTokenScope)
+
+ token, err := packages_service.CreateAuthorizationToken(u, scope)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, map[string]string{
+ "token": token,
+ })
+}
+
+// https://distribution.github.io/distribution/spec/auth/oauth/
+func AuthenticateNotImplemented(ctx *context.Context) {
+ // This optional endpoint can be used to authenticate a client.
+ // It must implement the specification described in:
+ // https://datatracker.ietf.org/doc/html/rfc6749
+ // https://distribution.github.io/distribution/spec/auth/oauth/
+ // Purpose of this stub is to respond with 404 Not Found instead of 405 Method Not Allowed.
+
+ ctx.Status(http.StatusNotFound)
+}
+
+// https://docs.docker.com/registry/spec/api/#listing-repositories
+func GetRepositoryList(ctx *context.Context) {
+ n := ctx.FormInt("n")
+ if n <= 0 || n > 100 {
+ n = 100
+ }
+ last := ctx.FormTrim("last")
+
+ repositories, err := container_model.GetRepositories(ctx, ctx.Doer, n, last)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ type RepositoryList struct {
+ Repositories []string `json:"repositories"`
+ }
+
+ if len(repositories) == n {
+ v := url.Values{}
+ if n > 0 {
+ v.Add("n", strconv.Itoa(n))
+ }
+ v.Add("last", repositories[len(repositories)-1])
+
+ ctx.Resp.Header().Set("Link", fmt.Sprintf(`</v2/_catalog?%s>; rel="next"`, v.Encode()))
+ }
+
+ jsonResponse(ctx, http.StatusOK, RepositoryList{
+ Repositories: repositories,
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#mounting-a-blob-from-another-repository
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#single-post
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-a-blob-in-chunks
+func InitiateUploadBlob(ctx *context.Context) {
+ image := ctx.Params("image")
+
+ mount := ctx.FormTrim("mount")
+ from := ctx.FormTrim("from")
+ if mount != "" {
+ blob, _ := workaroundGetContainerBlob(ctx, &container_model.BlobSearchOptions{
+ Repository: from,
+ Digest: mount,
+ })
+ if blob != nil {
+ accessible, err := packages_model.IsBlobAccessibleForUser(ctx, blob.Blob.ID, ctx.Doer)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if accessible {
+ if err := mountBlob(ctx, &packages_service.PackageInfo{Owner: ctx.Package.Owner, Name: image}, blob.Blob); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Location: fmt.Sprintf("/v2/%s/%s/blobs/%s", ctx.Package.Owner.LowerName, image, mount),
+ ContentDigest: mount,
+ Status: http.StatusCreated,
+ })
+ return
+ }
+ }
+ }
+
+ digest := ctx.FormTrim("digest")
+ if digest != "" {
+ buf, err := packages_module.CreateHashedBufferFromReader(ctx.Req.Body)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ if digest != digestFromHashSummer(buf) {
+ apiErrorDefined(ctx, errDigestInvalid)
+ return
+ }
+
+ if _, err := saveAsPackageBlob(ctx,
+ buf,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ Name: image,
+ },
+ Creator: ctx.Doer,
+ },
+ ); err != nil {
+ switch err {
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Location: fmt.Sprintf("/v2/%s/%s/blobs/%s", ctx.Package.Owner.LowerName, image, digest),
+ ContentDigest: digest,
+ Status: http.StatusCreated,
+ })
+ return
+ }
+
+ upload, err := packages_model.CreateBlobUpload(ctx)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Location: fmt.Sprintf("/v2/%s/%s/blobs/uploads/%s", ctx.Package.Owner.LowerName, image, upload.ID),
+ Range: "0-0",
+ UploadUUID: upload.ID,
+ Status: http.StatusAccepted,
+ })
+}
+
+// https://docs.docker.com/registry/spec/api/#get-blob-upload
+func GetUploadBlob(ctx *context.Context) {
+ uuid := ctx.Params("uuid")
+
+ upload, err := packages_model.GetBlobUploadByID(ctx, uuid)
+ if err != nil {
+ if err == packages_model.ErrPackageBlobUploadNotExist {
+ apiErrorDefined(ctx, errBlobUploadUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Range: fmt.Sprintf("0-%d", upload.BytesReceived),
+ UploadUUID: upload.ID,
+ Status: http.StatusNoContent,
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-a-blob-in-chunks
+func UploadBlob(ctx *context.Context) {
+ image := ctx.Params("image")
+
+ uploader, err := container_service.NewBlobUploader(ctx, ctx.Params("uuid"))
+ if err != nil {
+ if err == packages_model.ErrPackageBlobUploadNotExist {
+ apiErrorDefined(ctx, errBlobUploadUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ defer uploader.Close()
+
+ contentRange := ctx.Req.Header.Get("Content-Range")
+ if contentRange != "" {
+ start, end := 0, 0
+ if _, err := fmt.Sscanf(contentRange, "%d-%d", &start, &end); err != nil {
+ apiErrorDefined(ctx, errBlobUploadInvalid)
+ return
+ }
+
+ if int64(start) != uploader.Size() {
+ apiErrorDefined(ctx, errBlobUploadInvalid.WithStatusCode(http.StatusRequestedRangeNotSatisfiable))
+ return
+ }
+ } else if uploader.Size() != 0 {
+ apiErrorDefined(ctx, errBlobUploadInvalid.WithMessage("Stream uploads after first write are not allowed"))
+ return
+ }
+
+ if err := uploader.Append(ctx, ctx.Req.Body); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Location: fmt.Sprintf("/v2/%s/%s/blobs/uploads/%s", ctx.Package.Owner.LowerName, image, uploader.ID),
+ Range: fmt.Sprintf("0-%d", uploader.Size()-1),
+ UploadUUID: uploader.ID,
+ Status: http.StatusAccepted,
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-a-blob-in-chunks
+func EndUploadBlob(ctx *context.Context) {
+ image := ctx.Params("image")
+
+ digest := ctx.FormTrim("digest")
+ if digest == "" {
+ apiErrorDefined(ctx, errDigestInvalid)
+ return
+ }
+
+ uploader, err := container_service.NewBlobUploader(ctx, ctx.Params("uuid"))
+ if err != nil {
+ if err == packages_model.ErrPackageBlobUploadNotExist {
+ apiErrorDefined(ctx, errBlobUploadUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ doClose := true
+ defer func() {
+ if doClose {
+ uploader.Close()
+ }
+ }()
+
+ if ctx.Req.Body != nil {
+ if err := uploader.Append(ctx, ctx.Req.Body); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ if digest != digestFromHashSummer(uploader) {
+ apiErrorDefined(ctx, errDigestInvalid)
+ return
+ }
+
+ if _, err := saveAsPackageBlob(ctx,
+ uploader,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ Name: image,
+ },
+ Creator: ctx.Doer,
+ },
+ ); err != nil {
+ switch err {
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if err := uploader.Close(); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ doClose = false
+
+ if err := container_service.RemoveBlobUploadByID(ctx, uploader.ID); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Location: fmt.Sprintf("/v2/%s/%s/blobs/%s", ctx.Package.Owner.LowerName, image, digest),
+ ContentDigest: digest,
+ Status: http.StatusCreated,
+ })
+}
+
+// https://docs.docker.com/registry/spec/api/#delete-blob-upload
+func CancelUploadBlob(ctx *context.Context) {
+ uuid := ctx.Params("uuid")
+
+ _, err := packages_model.GetBlobUploadByID(ctx, uuid)
+ if err != nil {
+ if err == packages_model.ErrPackageBlobUploadNotExist {
+ apiErrorDefined(ctx, errBlobUploadUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if err := container_service.RemoveBlobUploadByID(ctx, uuid); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Status: http.StatusNoContent,
+ })
+}
+
+func getBlobFromContext(ctx *context.Context) (*packages_model.PackageFileDescriptor, error) {
+ d := ctx.Params("digest")
+
+ if digest.Digest(d).Validate() != nil {
+ return nil, container_model.ErrContainerBlobNotExist
+ }
+
+ return workaroundGetContainerBlob(ctx, &container_model.BlobSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Image: ctx.Params("image"),
+ Digest: d,
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#checking-if-content-exists-in-the-registry
+func HeadBlob(ctx *context.Context) {
+ blob, err := getBlobFromContext(ctx)
+ if err != nil {
+ if err == container_model.ErrContainerBlobNotExist {
+ apiErrorDefined(ctx, errBlobUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ ContentDigest: blob.Properties.GetByName(container_module.PropertyDigest),
+ ContentLength: blob.Blob.Size,
+ Status: http.StatusOK,
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-blobs
+func GetBlob(ctx *context.Context) {
+ blob, err := getBlobFromContext(ctx)
+ if err != nil {
+ if err == container_model.ErrContainerBlobNotExist {
+ apiErrorDefined(ctx, errBlobUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ serveBlob(ctx, blob)
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#deleting-blobs
+func DeleteBlob(ctx *context.Context) {
+ d := ctx.Params("digest")
+
+ if digest.Digest(d).Validate() != nil {
+ apiErrorDefined(ctx, errBlobUnknown)
+ return
+ }
+
+ if err := deleteBlob(ctx, ctx.Package.Owner.ID, ctx.Params("image"), d); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Status: http.StatusAccepted,
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-manifests
+func UploadManifest(ctx *context.Context) {
+ reference := ctx.Params("reference")
+
+ mci := &manifestCreationInfo{
+ MediaType: ctx.Req.Header.Get("Content-Type"),
+ Owner: ctx.Package.Owner,
+ Creator: ctx.Doer,
+ Image: ctx.Params("image"),
+ Reference: reference,
+ IsTagged: digest.Digest(reference).Validate() != nil,
+ }
+
+ if mci.IsTagged && !referencePattern.MatchString(reference) {
+ apiErrorDefined(ctx, errManifestInvalid.WithMessage("Tag is invalid"))
+ return
+ }
+
+ maxSize := maxManifestSize + 1
+ buf, err := packages_module.CreateHashedBufferFromReaderWithSize(&io.LimitedReader{R: ctx.Req.Body, N: int64(maxSize)}, maxSize)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ if buf.Size() > maxManifestSize {
+ apiErrorDefined(ctx, errManifestInvalid.WithMessage("Manifest exceeds maximum size").WithStatusCode(http.StatusRequestEntityTooLarge))
+ return
+ }
+
+ digest, err := processManifest(ctx, mci, buf)
+ if err != nil {
+ var namedError *namedError
+ if errors.As(err, &namedError) {
+ apiErrorDefined(ctx, namedError)
+ } else if errors.Is(err, container_model.ErrContainerBlobNotExist) {
+ apiErrorDefined(ctx, errBlobUnknown)
+ } else {
+ switch err {
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ }
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Location: fmt.Sprintf("/v2/%s/%s/manifests/%s", ctx.Package.Owner.LowerName, mci.Image, reference),
+ ContentDigest: digest,
+ Status: http.StatusCreated,
+ })
+}
+
+func getBlobSearchOptionsFromContext(ctx *context.Context) (*container_model.BlobSearchOptions, error) {
+ reference := ctx.Params("reference")
+
+ opts := &container_model.BlobSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Image: ctx.Params("image"),
+ IsManifest: true,
+ }
+
+ if digest.Digest(reference).Validate() == nil {
+ opts.Digest = reference
+ } else if referencePattern.MatchString(reference) {
+ opts.Tag = reference
+ } else {
+ return nil, container_model.ErrContainerBlobNotExist
+ }
+
+ return opts, nil
+}
+
+func getManifestFromContext(ctx *context.Context) (*packages_model.PackageFileDescriptor, error) {
+ opts, err := getBlobSearchOptionsFromContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return workaroundGetContainerBlob(ctx, opts)
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#checking-if-content-exists-in-the-registry
+func HeadManifest(ctx *context.Context) {
+ manifest, err := getManifestFromContext(ctx)
+ if err != nil {
+ if err == container_model.ErrContainerBlobNotExist {
+ apiErrorDefined(ctx, errManifestUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ ContentDigest: manifest.Properties.GetByName(container_module.PropertyDigest),
+ ContentType: manifest.Properties.GetByName(container_module.PropertyMediaType),
+ ContentLength: manifest.Blob.Size,
+ Status: http.StatusOK,
+ })
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests
+func GetManifest(ctx *context.Context) {
+ manifest, err := getManifestFromContext(ctx)
+ if err != nil {
+ if err == container_model.ErrContainerBlobNotExist {
+ apiErrorDefined(ctx, errManifestUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ serveBlob(ctx, manifest)
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#deleting-tags
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#deleting-manifests
+func DeleteManifest(ctx *context.Context) {
+ opts, err := getBlobSearchOptionsFromContext(ctx)
+ if err != nil {
+ apiErrorDefined(ctx, errManifestUnknown)
+ return
+ }
+
+ pvs, err := container_model.GetManifestVersions(ctx, opts)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pvs) == 0 {
+ apiErrorDefined(ctx, errManifestUnknown)
+ return
+ }
+
+ for _, pv := range pvs {
+ if err := packages_service.RemovePackageVersion(ctx, ctx.Doer, pv); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ setResponseHeaders(ctx.Resp, &containerHeaders{
+ Status: http.StatusAccepted,
+ })
+}
+
+func serveBlob(ctx *context.Context, pfd *packages_model.PackageFileDescriptor) {
+ s, u, _, err := packages_service.GetPackageBlobStream(ctx, pfd.File, pfd.Blob)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ headers := &containerHeaders{
+ ContentDigest: pfd.Properties.GetByName(container_module.PropertyDigest),
+ ContentType: pfd.Properties.GetByName(container_module.PropertyMediaType),
+ ContentLength: pfd.Blob.Size,
+ Status: http.StatusOK,
+ }
+
+ if u != nil {
+ headers.Status = http.StatusTemporaryRedirect
+ headers.Location = u.String()
+
+ setResponseHeaders(ctx.Resp, headers)
+ return
+ }
+
+ defer s.Close()
+
+ setResponseHeaders(ctx.Resp, headers)
+ if _, err := io.Copy(ctx.Resp, s); err != nil {
+ log.Error("Error whilst copying content to response: %v", err)
+ }
+}
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#content-discovery
+func GetTagList(ctx *context.Context) {
+ image := ctx.Params("image")
+
+ if _, err := packages_model.GetPackageByName(ctx, ctx.Package.Owner.ID, packages_model.TypeContainer, image); err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiErrorDefined(ctx, errNameUnknown)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ n := -1
+ if ctx.FormTrim("n") != "" {
+ n = ctx.FormInt("n")
+ }
+ last := ctx.FormTrim("last")
+
+ tags, err := container_model.GetImageTags(ctx, ctx.Package.Owner.ID, image, n, last)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ type TagList struct {
+ Name string `json:"name"`
+ Tags []string `json:"tags"`
+ }
+
+ if len(tags) > 0 {
+ v := url.Values{}
+ if n > 0 {
+ v.Add("n", strconv.Itoa(n))
+ }
+ v.Add("last", tags[len(tags)-1])
+
+ ctx.Resp.Header().Set("Link", fmt.Sprintf(`</v2/%s/%s/tags/list?%s>; rel="next"`, ctx.Package.Owner.LowerName, image, v.Encode()))
+ }
+
+ jsonResponse(ctx, http.StatusOK, TagList{
+ Name: strings.ToLower(ctx.Package.Owner.LowerName + "/" + image),
+ Tags: tags,
+ })
+}
+
+// FIXME: Workaround to be removed in v1.20
+// https://github.com/go-gitea/gitea/issues/19586
+func workaroundGetContainerBlob(ctx *context.Context, opts *container_model.BlobSearchOptions) (*packages_model.PackageFileDescriptor, error) {
+ blob, err := container_model.GetContainerBlob(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ err = packages_module.NewContentStore().Has(packages_module.BlobHash256Key(blob.Blob.HashSHA256))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) || errors.Is(err, os.ErrNotExist) {
+ log.Debug("Package registry inconsistent: blob %s does not exist on file system", blob.Blob.HashSHA256)
+ return nil, container_model.ErrContainerBlobNotExist
+ }
+ return nil, err
+ }
+
+ return blob, nil
+}
diff --git a/routers/api/packages/container/errors.go b/routers/api/packages/container/errors.go
new file mode 100644
index 0000000..1a9b0f3
--- /dev/null
+++ b/routers/api/packages/container/errors.go
@@ -0,0 +1,52 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "net/http"
+)
+
+// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#error-codes
+var (
+ errBlobUnknown = &namedError{Code: "BLOB_UNKNOWN", StatusCode: http.StatusNotFound}
+ errBlobUploadInvalid = &namedError{Code: "BLOB_UPLOAD_INVALID", StatusCode: http.StatusBadRequest}
+ errBlobUploadUnknown = &namedError{Code: "BLOB_UPLOAD_UNKNOWN", StatusCode: http.StatusNotFound}
+ errDigestInvalid = &namedError{Code: "DIGEST_INVALID", StatusCode: http.StatusBadRequest}
+ errManifestBlobUnknown = &namedError{Code: "MANIFEST_BLOB_UNKNOWN", StatusCode: http.StatusNotFound}
+ errManifestInvalid = &namedError{Code: "MANIFEST_INVALID", StatusCode: http.StatusBadRequest}
+ errManifestUnknown = &namedError{Code: "MANIFEST_UNKNOWN", StatusCode: http.StatusNotFound}
+ errNameInvalid = &namedError{Code: "NAME_INVALID", StatusCode: http.StatusBadRequest}
+ errNameUnknown = &namedError{Code: "NAME_UNKNOWN", StatusCode: http.StatusNotFound}
+ errSizeInvalid = &namedError{Code: "SIZE_INVALID", StatusCode: http.StatusBadRequest}
+ errUnauthorized = &namedError{Code: "UNAUTHORIZED", StatusCode: http.StatusUnauthorized}
+ errUnsupported = &namedError{Code: "UNSUPPORTED", StatusCode: http.StatusNotImplemented}
+)
+
+type namedError struct {
+ Code string
+ StatusCode int
+ Message string
+}
+
+func (e *namedError) Error() string {
+ return e.Message
+}
+
+// WithMessage creates a new instance of the error with a different message
+func (e *namedError) WithMessage(message string) *namedError {
+ return &namedError{
+ Code: e.Code,
+ StatusCode: e.StatusCode,
+ Message: message,
+ }
+}
+
+// WithStatusCode creates a new instance of the error with a different status code
+func (e *namedError) WithStatusCode(statusCode int) *namedError {
+ return &namedError{
+ Code: e.Code,
+ StatusCode: statusCode,
+ Message: e.Message,
+ }
+}
diff --git a/routers/api/packages/container/manifest.go b/routers/api/packages/container/manifest.go
new file mode 100644
index 0000000..4a79a58
--- /dev/null
+++ b/routers/api/packages/container/manifest.go
@@ -0,0 +1,483 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ container_model "code.gitea.io/gitea/models/packages/container"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/util"
+ notify_service "code.gitea.io/gitea/services/notify"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ digest "github.com/opencontainers/go-digest"
+ oci "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func isValidMediaType(mt string) bool {
+ return strings.HasPrefix(mt, "application/vnd.docker.") || strings.HasPrefix(mt, "application/vnd.oci.")
+}
+
+func isImageManifestMediaType(mt string) bool {
+ return strings.EqualFold(mt, oci.MediaTypeImageManifest) || strings.EqualFold(mt, "application/vnd.docker.distribution.manifest.v2+json")
+}
+
+func isImageIndexMediaType(mt string) bool {
+ return strings.EqualFold(mt, oci.MediaTypeImageIndex) || strings.EqualFold(mt, "application/vnd.docker.distribution.manifest.list.v2+json")
+}
+
+// manifestCreationInfo describes a manifest to create
+type manifestCreationInfo struct {
+ MediaType string
+ Owner *user_model.User
+ Creator *user_model.User
+ Image string
+ Reference string
+ IsTagged bool
+ Properties map[string]string
+}
+
+func processManifest(ctx context.Context, mci *manifestCreationInfo, buf *packages_module.HashedBuffer) (string, error) {
+ var index oci.Index
+ if err := json.NewDecoder(buf).Decode(&index); err != nil {
+ return "", err
+ }
+
+ if index.SchemaVersion != 2 {
+ return "", errUnsupported.WithMessage("Schema version is not supported")
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ return "", err
+ }
+
+ if !isValidMediaType(mci.MediaType) {
+ mci.MediaType = index.MediaType
+ if !isValidMediaType(mci.MediaType) {
+ return "", errManifestInvalid.WithMessage("MediaType not recognized")
+ }
+ }
+
+ if isImageManifestMediaType(mci.MediaType) {
+ return processImageManifest(ctx, mci, buf)
+ } else if isImageIndexMediaType(mci.MediaType) {
+ return processImageManifestIndex(ctx, mci, buf)
+ }
+ return "", errManifestInvalid
+}
+
+func processImageManifest(ctx context.Context, mci *manifestCreationInfo, buf *packages_module.HashedBuffer) (string, error) {
+ manifestDigest := ""
+
+ err := func() error {
+ var manifest oci.Manifest
+ if err := json.NewDecoder(buf).Decode(&manifest); err != nil {
+ return err
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ configDescriptor, err := container_model.GetContainerBlob(ctx, &container_model.BlobSearchOptions{
+ OwnerID: mci.Owner.ID,
+ Image: mci.Image,
+ Digest: string(manifest.Config.Digest),
+ })
+ if err != nil {
+ return err
+ }
+
+ configReader, err := packages_module.NewContentStore().Get(packages_module.BlobHash256Key(configDescriptor.Blob.HashSHA256))
+ if err != nil {
+ return err
+ }
+ defer configReader.Close()
+
+ metadata, err := container_module.ParseImageConfig(manifest.Config.MediaType, configReader)
+ if err != nil {
+ return err
+ }
+
+ blobReferences := make([]*blobReference, 0, 1+len(manifest.Layers))
+
+ blobReferences = append(blobReferences, &blobReference{
+ Digest: manifest.Config.Digest,
+ MediaType: manifest.Config.MediaType,
+ File: configDescriptor,
+ ExpectedSize: manifest.Config.Size,
+ })
+
+ for _, layer := range manifest.Layers {
+ pfd, err := container_model.GetContainerBlob(ctx, &container_model.BlobSearchOptions{
+ OwnerID: mci.Owner.ID,
+ Image: mci.Image,
+ Digest: string(layer.Digest),
+ })
+ if err != nil {
+ return err
+ }
+
+ blobReferences = append(blobReferences, &blobReference{
+ Digest: layer.Digest,
+ MediaType: layer.MediaType,
+ File: pfd,
+ ExpectedSize: layer.Size,
+ })
+ }
+
+ pv, err := createPackageAndVersion(ctx, mci, metadata)
+ if err != nil {
+ return err
+ }
+
+ uploadVersion, err := packages_model.GetInternalVersionByNameAndVersion(ctx, mci.Owner.ID, packages_model.TypeContainer, mci.Image, container_model.UploadVersion)
+ if err != nil && err != packages_model.ErrPackageNotExist {
+ return err
+ }
+
+ for _, ref := range blobReferences {
+ if err := createFileFromBlobReference(ctx, pv, uploadVersion, ref); err != nil {
+ return err
+ }
+ }
+
+ pb, created, digest, err := createManifestBlob(ctx, mci, pv, buf)
+ removeBlob := false
+ defer func() {
+ if removeBlob {
+ contentStore := packages_module.NewContentStore()
+ if err := contentStore.Delete(packages_module.BlobHash256Key(pb.HashSHA256)); err != nil {
+ log.Error("Error deleting package blob from content store: %v", err)
+ }
+ }
+ }()
+ if err != nil {
+ removeBlob = created
+ return err
+ }
+
+ if err := committer.Commit(); err != nil {
+ removeBlob = created
+ return err
+ }
+
+ if err := notifyPackageCreate(ctx, mci.Creator, pv); err != nil {
+ return err
+ }
+
+ manifestDigest = digest
+
+ return nil
+ }()
+ if err != nil {
+ return "", err
+ }
+
+ return manifestDigest, nil
+}
+
+func processImageManifestIndex(ctx context.Context, mci *manifestCreationInfo, buf *packages_module.HashedBuffer) (string, error) {
+ manifestDigest := ""
+
+ err := func() error {
+ var index oci.Index
+ if err := json.NewDecoder(buf).Decode(&index); err != nil {
+ return err
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ metadata := &container_module.Metadata{
+ Type: container_module.TypeOCI,
+ Manifests: make([]*container_module.Manifest, 0, len(index.Manifests)),
+ }
+
+ for _, manifest := range index.Manifests {
+ if !isImageManifestMediaType(manifest.MediaType) {
+ return errManifestInvalid
+ }
+
+ platform := container_module.DefaultPlatform
+ if manifest.Platform != nil {
+ platform = fmt.Sprintf("%s/%s", manifest.Platform.OS, manifest.Platform.Architecture)
+ if manifest.Platform.Variant != "" {
+ platform = fmt.Sprintf("%s/%s", platform, manifest.Platform.Variant)
+ }
+ }
+
+ pfd, err := container_model.GetContainerBlob(ctx, &container_model.BlobSearchOptions{
+ OwnerID: mci.Owner.ID,
+ Image: mci.Image,
+ Digest: string(manifest.Digest),
+ IsManifest: true,
+ })
+ if err != nil {
+ if err == container_model.ErrContainerBlobNotExist {
+ return errManifestBlobUnknown
+ }
+ return err
+ }
+
+ size, err := packages_model.CalculateFileSize(ctx, &packages_model.PackageFileSearchOptions{
+ VersionID: pfd.File.VersionID,
+ })
+ if err != nil {
+ return err
+ }
+
+ metadata.Manifests = append(metadata.Manifests, &container_module.Manifest{
+ Platform: platform,
+ Digest: string(manifest.Digest),
+ Size: size,
+ })
+ }
+
+ pv, err := createPackageAndVersion(ctx, mci, metadata)
+ if err != nil {
+ return err
+ }
+
+ pb, created, digest, err := createManifestBlob(ctx, mci, pv, buf)
+ removeBlob := false
+ defer func() {
+ if removeBlob {
+ contentStore := packages_module.NewContentStore()
+ if err := contentStore.Delete(packages_module.BlobHash256Key(pb.HashSHA256)); err != nil {
+ log.Error("Error deleting package blob from content store: %v", err)
+ }
+ }
+ }()
+ if err != nil {
+ removeBlob = created
+ return err
+ }
+
+ if err := committer.Commit(); err != nil {
+ removeBlob = created
+ return err
+ }
+
+ if err := notifyPackageCreate(ctx, mci.Creator, pv); err != nil {
+ return err
+ }
+
+ manifestDigest = digest
+
+ return nil
+ }()
+ if err != nil {
+ return "", err
+ }
+
+ return manifestDigest, nil
+}
+
+func notifyPackageCreate(ctx context.Context, doer *user_model.User, pv *packages_model.PackageVersion) error {
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ return err
+ }
+
+ notify_service.PackageCreate(ctx, doer, pd)
+
+ return nil
+}
+
+func createPackageAndVersion(ctx context.Context, mci *manifestCreationInfo, metadata *container_module.Metadata) (*packages_model.PackageVersion, error) {
+ created := true
+ p := &packages_model.Package{
+ OwnerID: mci.Owner.ID,
+ Type: packages_model.TypeContainer,
+ Name: strings.ToLower(mci.Image),
+ LowerName: strings.ToLower(mci.Image),
+ }
+ var err error
+ if p, err = packages_model.TryInsertPackage(ctx, p); err != nil {
+ if err == packages_model.ErrDuplicatePackage {
+ created = false
+ } else {
+ log.Error("Error inserting package: %v", err)
+ return nil, err
+ }
+ }
+
+ if created {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository, strings.ToLower(mci.Owner.LowerName+"/"+mci.Image)); err != nil {
+ log.Error("Error setting package property: %v", err)
+ return nil, err
+ }
+ }
+
+ metadata.IsTagged = mci.IsTagged
+
+ metadataJSON, err := json.Marshal(metadata)
+ if err != nil {
+ return nil, err
+ }
+
+ _pv := &packages_model.PackageVersion{
+ PackageID: p.ID,
+ CreatorID: mci.Creator.ID,
+ Version: strings.ToLower(mci.Reference),
+ LowerVersion: strings.ToLower(mci.Reference),
+ MetadataJSON: string(metadataJSON),
+ }
+ var pv *packages_model.PackageVersion
+ if pv, err = packages_model.GetOrInsertVersion(ctx, _pv); err != nil {
+ if err == packages_model.ErrDuplicatePackageVersion {
+ if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ return nil, err
+ }
+
+ // keep download count on overwrite
+ _pv.DownloadCount = pv.DownloadCount
+
+ if pv, err = packages_model.GetOrInsertVersion(ctx, _pv); err != nil {
+ log.Error("Error inserting package: %v", err)
+ return nil, err
+ }
+ } else {
+ log.Error("Error inserting package: %v", err)
+ return nil, err
+ }
+ }
+
+ if err := packages_service.CheckCountQuotaExceeded(ctx, mci.Creator, mci.Owner); err != nil {
+ return nil, err
+ }
+
+ if mci.IsTagged {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypeVersion, pv.ID, container_module.PropertyManifestTagged, ""); err != nil {
+ log.Error("Error setting package version property: %v", err)
+ return nil, err
+ }
+ }
+ for _, manifest := range metadata.Manifests {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypeVersion, pv.ID, container_module.PropertyManifestReference, manifest.Digest); err != nil {
+ log.Error("Error setting package version property: %v", err)
+ return nil, err
+ }
+ }
+
+ return pv, nil
+}
+
+type blobReference struct {
+ Digest digest.Digest
+ MediaType string
+ Name string
+ File *packages_model.PackageFileDescriptor
+ ExpectedSize int64
+ IsLead bool
+}
+
+func createFileFromBlobReference(ctx context.Context, pv, uploadVersion *packages_model.PackageVersion, ref *blobReference) error {
+ if ref.File.Blob.Size != ref.ExpectedSize {
+ return errSizeInvalid
+ }
+
+ if ref.Name == "" {
+ ref.Name = strings.ToLower(fmt.Sprintf("sha256_%s", ref.File.Blob.HashSHA256))
+ }
+
+ pf := &packages_model.PackageFile{
+ VersionID: pv.ID,
+ BlobID: ref.File.Blob.ID,
+ Name: ref.Name,
+ LowerName: ref.Name,
+ IsLead: ref.IsLead,
+ }
+ var err error
+ if pf, err = packages_model.TryInsertFile(ctx, pf); err != nil {
+ if err == packages_model.ErrDuplicatePackageFile {
+ // Skip this blob because the manifest contains the same filesystem layer multiple times.
+ return nil
+ }
+ log.Error("Error inserting package file: %v", err)
+ return err
+ }
+
+ props := map[string]string{
+ container_module.PropertyMediaType: ref.MediaType,
+ container_module.PropertyDigest: string(ref.Digest),
+ }
+ for name, value := range props {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypeFile, pf.ID, name, value); err != nil {
+ log.Error("Error setting package file property: %v", err)
+ return err
+ }
+ }
+
+ // Remove the file from the blob upload version
+ if uploadVersion != nil && ref.File.File != nil && uploadVersion.ID == ref.File.File.VersionID {
+ if err := packages_service.DeletePackageFile(ctx, ref.File.File); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createManifestBlob(ctx context.Context, mci *manifestCreationInfo, pv *packages_model.PackageVersion, buf *packages_module.HashedBuffer) (*packages_model.PackageBlob, bool, string, error) {
+ pb, exists, err := packages_model.GetOrInsertBlob(ctx, packages_service.NewPackageBlob(buf))
+ if err != nil {
+ log.Error("Error inserting package blob: %v", err)
+ return nil, false, "", err
+ }
+ // FIXME: Workaround to be removed in v1.20
+ // https://github.com/go-gitea/gitea/issues/19586
+ if exists {
+ err = packages_module.NewContentStore().Has(packages_module.BlobHash256Key(pb.HashSHA256))
+ if err != nil && (errors.Is(err, util.ErrNotExist) || errors.Is(err, os.ErrNotExist)) {
+ log.Debug("Package registry inconsistent: blob %s does not exist on file system", pb.HashSHA256)
+ exists = false
+ }
+ }
+ if !exists {
+ contentStore := packages_module.NewContentStore()
+ if err := contentStore.Save(packages_module.BlobHash256Key(pb.HashSHA256), buf, buf.Size()); err != nil {
+ log.Error("Error saving package blob in content store: %v", err)
+ return nil, false, "", err
+ }
+ }
+
+ manifestDigest := digestFromHashSummer(buf)
+ err = createFileFromBlobReference(ctx, pv, nil, &blobReference{
+ Digest: digest.Digest(manifestDigest),
+ MediaType: mci.MediaType,
+ Name: container_model.ManifestFilename,
+ File: &packages_model.PackageFileDescriptor{Blob: pb},
+ ExpectedSize: pb.Size,
+ IsLead: true,
+ })
+
+ return pb, !exists, manifestDigest, err
+}
diff --git a/routers/api/packages/cran/cran.go b/routers/api/packages/cran/cran.go
new file mode 100644
index 0000000..f1d6167
--- /dev/null
+++ b/routers/api/packages/cran/cran.go
@@ -0,0 +1,264 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package cran
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ cran_model "code.gitea.io/gitea/models/packages/cran"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ cran_module "code.gitea.io/gitea/modules/packages/cran"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+func EnumerateSourcePackages(ctx *context.Context) {
+ enumeratePackages(ctx, ctx.Params("format"), &cran_model.SearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ FileType: cran_module.TypeSource,
+ })
+}
+
+func EnumerateBinaryPackages(ctx *context.Context) {
+ enumeratePackages(ctx, ctx.Params("format"), &cran_model.SearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ FileType: cran_module.TypeBinary,
+ Platform: ctx.Params("platform"),
+ RVersion: ctx.Params("rversion"),
+ })
+}
+
+func enumeratePackages(ctx *context.Context, format string, opts *cran_model.SearchOptions) {
+ if format != "" && format != ".gz" {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pvs, err := cran_model.SearchLatestVersions(ctx, opts)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ var w io.Writer = ctx.Resp
+
+ if format == ".gz" {
+ ctx.Resp.Header().Set("Content-Type", "application/x-gzip")
+
+ gzw := gzip.NewWriter(w)
+ defer gzw.Close()
+
+ w = gzw
+ } else {
+ ctx.Resp.Header().Set("Content-Type", "text/plain;charset=utf-8")
+ }
+ ctx.Resp.WriteHeader(http.StatusOK)
+
+ for i, pd := range pds {
+ if i > 0 {
+ fmt.Fprintln(w)
+ }
+
+ var pfd *packages_model.PackageFileDescriptor
+ for _, d := range pd.Files {
+ if d.Properties.GetByName(cran_module.PropertyType) == opts.FileType &&
+ d.Properties.GetByName(cran_module.PropertyPlatform) == opts.Platform &&
+ d.Properties.GetByName(cran_module.PropertyRVersion) == opts.RVersion {
+ pfd = d
+ break
+ }
+ }
+
+ metadata := pd.Metadata.(*cran_module.Metadata)
+
+ fmt.Fprintln(w, "Package:", pd.Package.Name)
+ fmt.Fprintln(w, "Version:", pd.Version.Version)
+ if metadata.License != "" {
+ fmt.Fprintln(w, "License:", metadata.License)
+ }
+ if len(metadata.Depends) > 0 {
+ fmt.Fprintln(w, "Depends:", strings.Join(metadata.Depends, ", "))
+ }
+ if len(metadata.Imports) > 0 {
+ fmt.Fprintln(w, "Imports:", strings.Join(metadata.Imports, ", "))
+ }
+ if len(metadata.LinkingTo) > 0 {
+ fmt.Fprintln(w, "LinkingTo:", strings.Join(metadata.LinkingTo, ", "))
+ }
+ if len(metadata.Suggests) > 0 {
+ fmt.Fprintln(w, "Suggests:", strings.Join(metadata.Suggests, ", "))
+ }
+ needsCompilation := "no"
+ if metadata.NeedsCompilation {
+ needsCompilation = "yes"
+ }
+ fmt.Fprintln(w, "NeedsCompilation:", needsCompilation)
+ fmt.Fprintln(w, "MD5sum:", pfd.Blob.HashMD5)
+ }
+}
+
+func UploadSourcePackageFile(ctx *context.Context) {
+ uploadPackageFile(
+ ctx,
+ packages_model.EmptyFileKey,
+ map[string]string{
+ cran_module.PropertyType: cran_module.TypeSource,
+ },
+ )
+}
+
+func UploadBinaryPackageFile(ctx *context.Context) {
+ platform, rversion := ctx.FormTrim("platform"), ctx.FormTrim("rversion")
+ if platform == "" || rversion == "" {
+ apiError(ctx, http.StatusBadRequest, nil)
+ return
+ }
+
+ uploadPackageFile(
+ ctx,
+ platform+"|"+rversion,
+ map[string]string{
+ cran_module.PropertyType: cran_module.TypeBinary,
+ cran_module.PropertyPlatform: platform,
+ cran_module.PropertyRVersion: rversion,
+ },
+ )
+}
+
+func uploadPackageFile(ctx *context.Context, compositeKey string, properties map[string]string) {
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pck, err := cran_module.ParsePackage(buf, buf.Size())
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeCran,
+ Name: pck.Name,
+ Version: pck.Version,
+ },
+ SemverCompatible: false,
+ Creator: ctx.Doer,
+ Metadata: pck.Metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s_%s%s", pck.Name, pck.Version, pck.FileExtension),
+ CompositeKey: compositeKey,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ Properties: properties,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func DownloadSourcePackageFile(ctx *context.Context) {
+ downloadPackageFile(ctx, &cran_model.SearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ FileType: cran_module.TypeSource,
+ Filename: ctx.Params("filename"),
+ })
+}
+
+func DownloadBinaryPackageFile(ctx *context.Context) {
+ downloadPackageFile(ctx, &cran_model.SearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ FileType: cran_module.TypeBinary,
+ Platform: ctx.Params("platform"),
+ RVersion: ctx.Params("rversion"),
+ Filename: ctx.Params("filename"),
+ })
+}
+
+func downloadPackageFile(ctx *context.Context, opts *cran_model.SearchOptions) {
+ pf, err := cran_model.SearchFile(ctx, opts)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ s, u, _, err := packages_service.GetPackageFileStream(ctx, pf)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
diff --git a/routers/api/packages/debian/debian.go b/routers/api/packages/debian/debian.go
new file mode 100644
index 0000000..8c05476
--- /dev/null
+++ b/routers/api/packages/debian/debian.go
@@ -0,0 +1,309 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package debian
+
+import (
+ stdctx "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ debian_module "code.gitea.io/gitea/modules/packages/debian"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ notify_service "code.gitea.io/gitea/services/notify"
+ packages_service "code.gitea.io/gitea/services/packages"
+ debian_service "code.gitea.io/gitea/services/packages/debian"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+func GetRepositoryKey(ctx *context.Context) {
+ _, pub, err := debian_service.GetOrCreateKeyPair(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.ServeContent(strings.NewReader(pub), &context.ServeHeaderOptions{
+ ContentType: "application/pgp-keys",
+ Filename: "repository.key",
+ })
+}
+
+// https://wiki.debian.org/DebianRepository/Format#A.22Release.22_files
+// https://wiki.debian.org/DebianRepository/Format#A.22Packages.22_Indices
+func GetRepositoryFile(ctx *context.Context) {
+ pv, err := debian_service.GetOrCreateRepositoryVersion(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ key := ctx.Params("distribution")
+
+ component := ctx.Params("component")
+ architecture := strings.TrimPrefix(ctx.Params("architecture"), "binary-")
+ if component != "" && architecture != "" {
+ key += "|" + component + "|" + architecture
+ }
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageVersion(
+ ctx,
+ pv,
+ &packages_service.PackageFileInfo{
+ Filename: ctx.Params("filename"),
+ CompositeKey: key,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// https://wiki.debian.org/DebianRepository/Format#indices_acquisition_via_hashsums_.28by-hash.29
+func GetRepositoryFileByHash(ctx *context.Context) {
+ pv, err := debian_service.GetOrCreateRepositoryVersion(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ algorithm := strings.ToLower(ctx.Params("algorithm"))
+ if algorithm == "md5sum" {
+ algorithm = "md5"
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ VersionID: pv.ID,
+ Hash: strings.ToLower(ctx.Params("hash")),
+ HashAlgorithm: algorithm,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pfs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetPackageFileStream(ctx, pfs[0])
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+func UploadPackageFile(ctx *context.Context) {
+ distribution := strings.TrimSpace(ctx.Params("distribution"))
+ component := strings.TrimSpace(ctx.Params("component"))
+ if distribution == "" || component == "" {
+ apiError(ctx, http.StatusBadRequest, "invalid distribution or component")
+ return
+ }
+
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pck, err := debian_module.ParsePackage(buf)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeDebian,
+ Name: pck.Name,
+ Version: pck.Version,
+ },
+ Creator: ctx.Doer,
+ Metadata: pck.Metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s_%s_%s.deb", pck.Name, pck.Version, pck.Architecture),
+ CompositeKey: fmt.Sprintf("%s|%s", distribution, component),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ Properties: map[string]string{
+ debian_module.PropertyDistribution: distribution,
+ debian_module.PropertyComponent: component,
+ debian_module.PropertyArchitecture: pck.Architecture,
+ debian_module.PropertyControl: pck.Control,
+ },
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion, packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if err := debian_service.BuildSpecificRepositoryFiles(ctx, ctx.Package.Owner.ID, distribution, component, pck.Architecture); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func DownloadPackageFile(ctx *context.Context) {
+ name := ctx.Params("name")
+ version := ctx.Params("version")
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeDebian,
+ Name: name,
+ Version: version,
+ },
+ &packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s_%s_%s.deb", name, version, ctx.Params("architecture")),
+ CompositeKey: fmt.Sprintf("%s|%s", ctx.Params("distribution"), ctx.Params("component")),
+ },
+ )
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf, &context.ServeHeaderOptions{
+ ContentType: "application/vnd.debian.binary-package",
+ Filename: pf.Name,
+ LastModified: pf.CreatedUnix.AsLocalTime(),
+ })
+}
+
+func DeletePackageFile(ctx *context.Context) {
+ distribution := ctx.Params("distribution")
+ component := ctx.Params("component")
+ name := ctx.Params("name")
+ version := ctx.Params("version")
+ architecture := ctx.Params("architecture")
+
+ owner := ctx.Package.Owner
+
+ var pd *packages_model.PackageDescriptor
+
+ err := db.WithTx(ctx, func(ctx stdctx.Context) error {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, owner.ID, packages_model.TypeDebian, name, version)
+ if err != nil {
+ return err
+ }
+
+ pf, err := packages_model.GetFileForVersionByName(
+ ctx,
+ pv.ID,
+ fmt.Sprintf("%s_%s_%s.deb", name, version, architecture),
+ fmt.Sprintf("%s|%s", distribution, component),
+ )
+ if err != nil {
+ return err
+ }
+
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+
+ has, err := packages_model.HasVersionFileReferences(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+ if !has {
+ pd, err = packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ return err
+ }
+
+ if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if pd != nil {
+ notify_service.PackageDelete(ctx, ctx.Doer, pd)
+ }
+
+ if err := debian_service.BuildSpecificRepositoryFiles(ctx, ctx.Package.Owner.ID, distribution, component, architecture); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/packages/generic/generic.go b/routers/api/packages/generic/generic.go
new file mode 100644
index 0000000..e66f3ee
--- /dev/null
+++ b/routers/api/packages/generic/generic.go
@@ -0,0 +1,212 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package generic
+
+import (
+ "errors"
+ "net/http"
+ "regexp"
+ "strings"
+ "unicode"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+var (
+ packageNameRegex = regexp.MustCompile(`\A[-_+.\w]+\z`)
+ filenameRegex = regexp.MustCompile(`\A[-_+=:;.()\[\]{}~!@#$%^& \w]+\z`)
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+// DownloadPackageFile serves the specific generic package.
+func DownloadPackageFile(ctx *context.Context) {
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeGeneric,
+ Name: ctx.Params("packagename"),
+ Version: ctx.Params("packageversion"),
+ },
+ &packages_service.PackageFileInfo{
+ Filename: ctx.Params("filename"),
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+func isValidPackageName(packageName string) bool {
+ if len(packageName) == 1 && !unicode.IsLetter(rune(packageName[0])) && !unicode.IsNumber(rune(packageName[0])) {
+ return false
+ }
+ return packageNameRegex.MatchString(packageName) && packageName != ".."
+}
+
+func isValidFileName(filename string) bool {
+ return filenameRegex.MatchString(filename) &&
+ strings.TrimSpace(filename) == filename &&
+ filename != "." && filename != ".."
+}
+
+// UploadPackage uploads the specific generic package.
+// Duplicated packages get rejected.
+func UploadPackage(ctx *context.Context) {
+ packageName := ctx.Params("packagename")
+ filename := ctx.Params("filename")
+
+ if !isValidPackageName(packageName) {
+ apiError(ctx, http.StatusBadRequest, errors.New("invalid package name"))
+ return
+ }
+
+ if !isValidFileName(filename) {
+ apiError(ctx, http.StatusBadRequest, errors.New("invalid filename"))
+ return
+ }
+
+ packageVersion := ctx.Params("packageversion")
+ if packageVersion != strings.TrimSpace(packageVersion) {
+ apiError(ctx, http.StatusBadRequest, errors.New("invalid package version"))
+ return
+ }
+
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ log.Error("Error creating hashed buffer: %v", err)
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeGeneric,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ Creator: ctx.Doer,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// DeletePackage deletes the specific generic package.
+func DeletePackage(ctx *context.Context) {
+ err := packages_service.RemovePackageVersionByNameAndVersion(
+ ctx,
+ ctx.Doer,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeGeneric,
+ Name: ctx.Params("packagename"),
+ Version: ctx.Params("packageversion"),
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeletePackageFile deletes the specific file of a generic package.
+func DeletePackageFile(ctx *context.Context) {
+ pv, pf, err := func() (*packages_model.PackageVersion, *packages_model.PackageFile, error) {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeGeneric, ctx.Params("packagename"), ctx.Params("packageversion"))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pf, err := packages_model.GetFileForVersionByName(ctx, pv.ID, ctx.Params("filename"), packages_model.EmptyFileKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return pv, pf, nil
+ }()
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pfs) == 1 {
+ if err := packages_service.RemovePackageVersion(ctx, ctx.Doer, pv); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ } else {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/packages/generic/generic_test.go b/routers/api/packages/generic/generic_test.go
new file mode 100644
index 0000000..1acaafe
--- /dev/null
+++ b/routers/api/packages/generic/generic_test.go
@@ -0,0 +1,65 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package generic
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidatePackageName(t *testing.T) {
+ bad := []string{
+ "",
+ ".",
+ "..",
+ "-",
+ "a?b",
+ "a b",
+ "a/b",
+ }
+ for _, name := range bad {
+ assert.False(t, isValidPackageName(name), "bad=%q", name)
+ }
+
+ good := []string{
+ "a",
+ "1",
+ "a-",
+ "a_b",
+ "c.d+",
+ }
+ for _, name := range good {
+ assert.True(t, isValidPackageName(name), "good=%q", name)
+ }
+}
+
+func TestValidateFileName(t *testing.T) {
+ bad := []string{
+ "",
+ ".",
+ "..",
+ "a?b",
+ "a/b",
+ " a",
+ "a ",
+ }
+ for _, name := range bad {
+ assert.False(t, isValidFileName(name), "bad=%q", name)
+ }
+
+ good := []string{
+ "-",
+ "a",
+ "1",
+ "a-",
+ "a_b",
+ "a b",
+ "c.d+",
+ `-_+=:;.()[]{}~!@#$%^& aA1`,
+ }
+ for _, name := range good {
+ assert.True(t, isValidFileName(name), "good=%q", name)
+ }
+}
diff --git a/routers/api/packages/goproxy/goproxy.go b/routers/api/packages/goproxy/goproxy.go
new file mode 100644
index 0000000..56a07db
--- /dev/null
+++ b/routers/api/packages/goproxy/goproxy.go
@@ -0,0 +1,224 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package goproxy
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ goproxy_module "code.gitea.io/gitea/modules/packages/goproxy"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+func EnumeratePackageVersions(ctx *context.Context) {
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeGo, ctx.Params("name"))
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ sort.Slice(pvs, func(i, j int) bool {
+ return pvs[i].CreatedUnix < pvs[j].CreatedUnix
+ })
+
+ ctx.Resp.Header().Set("Content-Type", "text/plain;charset=utf-8")
+
+ for _, pv := range pvs {
+ fmt.Fprintln(ctx.Resp, pv.Version)
+ }
+}
+
+func PackageVersionMetadata(ctx *context.Context) {
+ pv, err := resolvePackage(ctx, ctx.Package.Owner.ID, ctx.Params("name"), ctx.Params("version"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusOK, struct {
+ Version string `json:"Version"`
+ Time time.Time `json:"Time"`
+ }{
+ Version: pv.Version,
+ Time: pv.CreatedUnix.AsLocalTime(),
+ })
+}
+
+func PackageVersionGoModContent(ctx *context.Context) {
+ pv, err := resolvePackage(ctx, ctx.Package.Owner.ID, ctx.Params("name"), ctx.Params("version"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pps, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeVersion, pv.ID, goproxy_module.PropertyGoMod)
+ if err != nil || len(pps) != 1 {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.PlainText(http.StatusOK, pps[0].Value)
+}
+
+func DownloadPackageFile(ctx *context.Context) {
+ pv, err := resolvePackage(ctx, ctx.Package.Owner.ID, ctx.Params("name"), ctx.Params("version"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil || len(pfs) != 1 {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ s, u, _, err := packages_service.GetPackageFileStream(ctx, pfs[0])
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pfs[0])
+}
+
+func resolvePackage(ctx *context.Context, ownerID int64, name, version string) (*packages_model.PackageVersion, error) {
+ var pv *packages_model.PackageVersion
+
+ if version == "latest" {
+ pvs, _, err := packages_model.SearchLatestVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ownerID,
+ Type: packages_model.TypeGo,
+ Name: packages_model.SearchValue{
+ Value: name,
+ ExactMatch: true,
+ },
+ IsInternal: optional.Some(false),
+ Sort: packages_model.SortCreatedDesc,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(pvs) != 1 {
+ return nil, packages_model.ErrPackageNotExist
+ }
+
+ pv = pvs[0]
+ } else {
+ var err error
+ pv, err = packages_model.GetVersionByNameAndVersion(ctx, ownerID, packages_model.TypeGo, name, version)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return pv, nil
+}
+
+func UploadPackage(ctx *context.Context) {
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pck, err := goproxy_module.ParsePackage(buf, buf.Size())
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeGo,
+ Name: pck.Name,
+ Version: pck.Version,
+ },
+ Creator: ctx.Doer,
+ VersionProperties: map[string]string{
+ goproxy_module.PropertyGoMod: pck.GoMod,
+ },
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%v.zip", pck.Version),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
diff --git a/routers/api/packages/helm/helm.go b/routers/api/packages/helm/helm.go
new file mode 100644
index 0000000..efdb83e
--- /dev/null
+++ b/routers/api/packages/helm/helm.go
@@ -0,0 +1,217 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package helm
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ helm_module "code.gitea.io/gitea/modules/packages/helm"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "gopkg.in/yaml.v3"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ type Error struct {
+ Error string `json:"error"`
+ }
+ ctx.JSON(status, Error{
+ Error: message,
+ })
+ })
+}
+
+// Index generates the Helm charts index
+func Index(ctx *context.Context) {
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeHelm,
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ baseURL := setting.AppURL + "api/packages/" + url.PathEscape(ctx.Package.Owner.Name) + "/helm"
+
+ type ChartVersion struct {
+ helm_module.Metadata `yaml:",inline"`
+ URLs []string `yaml:"urls"`
+ Created time.Time `yaml:"created,omitempty"`
+ Removed bool `yaml:"removed,omitempty"`
+ Digest string `yaml:"digest,omitempty"`
+ }
+
+ type ServerInfo struct {
+ ContextPath string `yaml:"contextPath,omitempty"`
+ }
+
+ type Index struct {
+ APIVersion string `yaml:"apiVersion"`
+ Entries map[string][]*ChartVersion `yaml:"entries"`
+ Generated time.Time `yaml:"generated,omitempty"`
+ ServerInfo *ServerInfo `yaml:"serverInfo,omitempty"`
+ }
+
+ entries := make(map[string][]*ChartVersion)
+ for _, pv := range pvs {
+ metadata := &helm_module.Metadata{}
+ if err := json.Unmarshal([]byte(pv.MetadataJSON), &metadata); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ entries[metadata.Name] = append(entries[metadata.Name], &ChartVersion{
+ Metadata: *metadata,
+ Created: pv.CreatedUnix.AsTime(),
+ URLs: []string{fmt.Sprintf("%s/%s", baseURL, url.PathEscape(createFilename(metadata)))},
+ })
+ }
+
+ ctx.Resp.WriteHeader(http.StatusOK)
+ if err := yaml.NewEncoder(ctx.Resp).Encode(&Index{
+ APIVersion: "v1",
+ Entries: entries,
+ Generated: time.Now(),
+ ServerInfo: &ServerInfo{
+ ContextPath: setting.AppSubURL + "/api/packages/" + url.PathEscape(ctx.Package.Owner.Name) + "/helm",
+ },
+ }); err != nil {
+ log.Error("YAML encode failed: %v", err)
+ }
+}
+
+// DownloadPackageFile serves the content of a package
+func DownloadPackageFile(ctx *context.Context) {
+ filename := ctx.Params("filename")
+
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeHelm,
+ Name: packages_model.SearchValue{
+ ExactMatch: true,
+ Value: ctx.Params("package"),
+ },
+ HasFileWithName: filename,
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageVersion(
+ ctx,
+ pvs[0],
+ &packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// UploadPackage creates a new package
+func UploadPackage(ctx *context.Context) {
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ metadata, err := helm_module.ParseChartArchive(buf)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeHelm,
+ Name: metadata.Name,
+ Version: metadata.Version,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: createFilename(metadata),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ OverwriteExisting: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func createFilename(metadata *helm_module.Metadata) string {
+ return strings.ToLower(fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version))
+}
diff --git a/routers/api/packages/helper/helper.go b/routers/api/packages/helper/helper.go
new file mode 100644
index 0000000..cdb6410
--- /dev/null
+++ b/routers/api/packages/helper/helper.go
@@ -0,0 +1,63 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package helper
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/services/context"
+)
+
+// LogAndProcessError logs an error and calls a custom callback with the processed error message.
+// If the error is an InternalServerError the message is stripped if the user is not an admin.
+func LogAndProcessError(ctx *context.Context, status int, obj any, cb func(string)) {
+ var message string
+ if err, ok := obj.(error); ok {
+ message = err.Error()
+ } else if obj != nil {
+ message = fmt.Sprintf("%s", obj)
+ }
+ if status == http.StatusInternalServerError {
+ log.ErrorWithSkip(1, message)
+
+ if setting.IsProd && (ctx.Doer == nil || !ctx.Doer.IsAdmin) {
+ message = ""
+ }
+ } else {
+ log.Debug(message)
+ }
+
+ if cb != nil {
+ cb(message)
+ }
+}
+
+// Serves the content of the package file
+// If the url is set it will redirect the request, otherwise the content is copied to the response.
+func ServePackageFile(ctx *context.Context, s io.ReadSeekCloser, u *url.URL, pf *packages_model.PackageFile, forceOpts ...*context.ServeHeaderOptions) {
+ if u != nil {
+ ctx.Redirect(u.String())
+ return
+ }
+
+ defer s.Close()
+
+ var opts *context.ServeHeaderOptions
+ if len(forceOpts) > 0 {
+ opts = forceOpts[0]
+ } else {
+ opts = &context.ServeHeaderOptions{
+ Filename: pf.Name,
+ LastModified: pf.CreatedUnix.AsLocalTime(),
+ }
+ }
+
+ ctx.ServeContent(s, opts)
+}
diff --git a/routers/api/packages/maven/api.go b/routers/api/packages/maven/api.go
new file mode 100644
index 0000000..167fe42
--- /dev/null
+++ b/routers/api/packages/maven/api.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package maven
+
+import (
+ "encoding/xml"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ maven_module "code.gitea.io/gitea/modules/packages/maven"
+)
+
+// MetadataResponse https://maven.apache.org/ref/3.2.5/maven-repository-metadata/repository-metadata.html
+type MetadataResponse struct {
+ XMLName xml.Name `xml:"metadata"`
+ GroupID string `xml:"groupId"`
+ ArtifactID string `xml:"artifactId"`
+ Release string `xml:"versioning>release,omitempty"`
+ Latest string `xml:"versioning>latest"`
+ Version []string `xml:"versioning>versions>version"`
+}
+
+// pds is expected to be sorted ascending by CreatedUnix
+func createMetadataResponse(pds []*packages_model.PackageDescriptor) *MetadataResponse {
+ var release *packages_model.PackageDescriptor
+
+ versions := make([]string, 0, len(pds))
+ for _, pd := range pds {
+ if !strings.HasSuffix(pd.Version.Version, "-SNAPSHOT") {
+ release = pd
+ }
+ versions = append(versions, pd.Version.Version)
+ }
+
+ latest := pds[len(pds)-1]
+
+ metadata := latest.Metadata.(*maven_module.Metadata)
+
+ resp := &MetadataResponse{
+ GroupID: metadata.GroupID,
+ ArtifactID: metadata.ArtifactID,
+ Latest: latest.Version.Version,
+ Version: versions,
+ }
+ if release != nil {
+ resp.Release = release.Version.Version
+ }
+ return resp
+}
diff --git a/routers/api/packages/maven/maven.go b/routers/api/packages/maven/maven.go
new file mode 100644
index 0000000..4181577
--- /dev/null
+++ b/routers/api/packages/maven/maven.go
@@ -0,0 +1,433 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package maven
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net/http"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ maven_module "code.gitea.io/gitea/modules/packages/maven"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+const (
+ mavenMetadataFile = "maven-metadata.xml"
+ extensionMD5 = ".md5"
+ extensionSHA1 = ".sha1"
+ extensionSHA256 = ".sha256"
+ extensionSHA512 = ".sha512"
+ extensionPom = ".pom"
+ extensionJar = ".jar"
+ contentTypeJar = "application/java-archive"
+ contentTypeXML = "text/xml"
+)
+
+var (
+ errInvalidParameters = errors.New("request parameters are invalid")
+ illegalCharacters = regexp.MustCompile(`[\\/:"<>|?\*]`)
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ // The maven client does not present the error message to the user. Log it for users with access to server logs.
+ switch status {
+ case http.StatusBadRequest:
+ log.Warn(message)
+ case http.StatusInternalServerError:
+ log.Error(message)
+ }
+
+ ctx.PlainText(status, message)
+ })
+}
+
+// DownloadPackageFile serves the content of a package
+func DownloadPackageFile(ctx *context.Context) {
+ handlePackageFile(ctx, true)
+}
+
+// ProvidePackageFileHeader provides only the headers describing a package
+func ProvidePackageFileHeader(ctx *context.Context) {
+ handlePackageFile(ctx, false)
+}
+
+func handlePackageFile(ctx *context.Context, serveContent bool) {
+ params, err := extractPathParameters(ctx)
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ if params.IsMeta && params.Version == "" {
+ serveMavenMetadata(ctx, params)
+ } else {
+ servePackageFile(ctx, params, serveContent)
+ }
+}
+
+func serveMavenMetadata(ctx *context.Context, params parameters) {
+ // /com/foo/project/maven-metadata.xml[.md5/.sha1/.sha256/.sha512]
+
+ packageName := params.GroupID + "-" + params.ArtifactID
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeMaven, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, packages_model.ErrPackageNotExist)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ sort.Slice(pds, func(i, j int) bool {
+ // Maven and Gradle order packages by their creation timestamp and not by their version string
+ return pds[i].Version.CreatedUnix < pds[j].Version.CreatedUnix
+ })
+
+ xmlMetadata, err := xml.Marshal(createMetadataResponse(pds))
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ xmlMetadataWithHeader := append([]byte(xml.Header), xmlMetadata...)
+
+ latest := pds[len(pds)-1]
+ // http.TimeFormat required a UTC time, refer to https://pkg.go.dev/net/http#TimeFormat
+ lastModifed := latest.Version.CreatedUnix.AsTime().UTC().Format(http.TimeFormat)
+ ctx.Resp.Header().Set("Last-Modified", lastModifed)
+
+ ext := strings.ToLower(filepath.Ext(params.Filename))
+ if isChecksumExtension(ext) {
+ var hash []byte
+ switch ext {
+ case extensionMD5:
+ tmp := md5.Sum(xmlMetadataWithHeader)
+ hash = tmp[:]
+ case extensionSHA1:
+ tmp := sha1.Sum(xmlMetadataWithHeader)
+ hash = tmp[:]
+ case extensionSHA256:
+ tmp := sha256.Sum256(xmlMetadataWithHeader)
+ hash = tmp[:]
+ case extensionSHA512:
+ tmp := sha512.Sum512(xmlMetadataWithHeader)
+ hash = tmp[:]
+ }
+ ctx.PlainText(http.StatusOK, hex.EncodeToString(hash))
+ return
+ }
+
+ ctx.Resp.Header().Set("Content-Length", strconv.Itoa(len(xmlMetadataWithHeader)))
+ ctx.Resp.Header().Set("Content-Type", contentTypeXML)
+
+ _, _ = ctx.Resp.Write(xmlMetadataWithHeader)
+}
+
+func servePackageFile(ctx *context.Context, params parameters, serveContent bool) {
+ packageName := params.GroupID + "-" + params.ArtifactID
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeMaven, packageName, params.Version)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ filename := params.Filename
+
+ ext := strings.ToLower(filepath.Ext(filename))
+ if isChecksumExtension(ext) {
+ filename = filename[:len(filename)-len(ext)]
+ }
+
+ pf, err := packages_model.GetFileForVersionByName(ctx, pv.ID, filename, packages_model.EmptyFileKey)
+ if err != nil {
+ if err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if isChecksumExtension(ext) {
+ var hash string
+ switch ext {
+ case extensionMD5:
+ hash = pb.HashMD5
+ case extensionSHA1:
+ hash = pb.HashSHA1
+ case extensionSHA256:
+ hash = pb.HashSHA256
+ case extensionSHA512:
+ hash = pb.HashSHA512
+ }
+ ctx.PlainText(http.StatusOK, hash)
+ return
+ }
+
+ opts := &context.ServeHeaderOptions{
+ ContentLength: &pb.Size,
+ LastModified: pf.CreatedUnix.AsLocalTime(),
+ }
+ switch ext {
+ case extensionJar:
+ opts.ContentType = contentTypeJar
+ case extensionPom:
+ opts.ContentType = contentTypeXML
+ }
+
+ if !serveContent {
+ ctx.SetServeHeaders(opts)
+ ctx.Status(http.StatusOK)
+ return
+ }
+
+ s, u, _, err := packages_service.GetPackageBlobStream(ctx, pf, pb)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ opts.Filename = pf.Name
+
+ helper.ServePackageFile(ctx, s, u, pf, opts)
+}
+
+// UploadPackageFile adds a file to the package. If the package does not exist, it gets created.
+func UploadPackageFile(ctx *context.Context) {
+ params, err := extractPathParameters(ctx)
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ log.Trace("Parameters: %+v", params)
+
+ // Ignore the package index /<name>/maven-metadata.xml
+ if params.IsMeta && params.Version == "" {
+ ctx.Status(http.StatusOK)
+ return
+ }
+
+ packageName := params.GroupID + "-" + params.ArtifactID
+
+ buf, err := packages_module.CreateHashedBufferFromReader(ctx.Req.Body)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pvci := &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeMaven,
+ Name: packageName,
+ Version: params.Version,
+ },
+ SemverCompatible: false,
+ Creator: ctx.Doer,
+ }
+
+ ext := filepath.Ext(params.Filename)
+
+ // Do not upload checksum files but compare the hashes.
+ if isChecksumExtension(ext) {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, pvci.Owner.ID, pvci.PackageType, pvci.Name, pvci.Version)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ pf, err := packages_model.GetFileForVersionByName(ctx, pv.ID, params.Filename[:len(params.Filename)-len(ext)], packages_model.EmptyFileKey)
+ if err != nil {
+ if err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ hash, err := io.ReadAll(buf)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if (ext == extensionMD5 && pb.HashMD5 != string(hash)) ||
+ (ext == extensionSHA1 && pb.HashSHA1 != string(hash)) ||
+ (ext == extensionSHA256 && pb.HashSHA256 != string(hash)) ||
+ (ext == extensionSHA512 && pb.HashSHA512 != string(hash)) {
+ apiError(ctx, http.StatusBadRequest, "hash mismatch")
+ return
+ }
+
+ ctx.Status(http.StatusOK)
+ return
+ }
+
+ pfci := &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: params.Filename,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: false,
+ OverwriteExisting: params.IsMeta,
+ }
+
+ // If it's the package pom file extract the metadata
+ if ext == extensionPom {
+ pfci.IsLead = true
+
+ var err error
+ pvci.Metadata, err = maven_module.ParsePackageMetaData(buf)
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ if pvci.Metadata != nil {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, pvci.Owner.ID, pvci.PackageType, pvci.Name, pvci.Version)
+ if err != nil && err != packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if pv != nil {
+ raw, err := json.Marshal(pvci.Metadata)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ pv.MetadataJSON = string(raw)
+ if err := packages_model.UpdateVersion(ctx, pv); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ pvci,
+ pfci,
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func isChecksumExtension(ext string) bool {
+ return ext == extensionMD5 || ext == extensionSHA1 || ext == extensionSHA256 || ext == extensionSHA512
+}
+
+type parameters struct {
+ GroupID string
+ ArtifactID string
+ Version string
+ Filename string
+ IsMeta bool
+}
+
+func extractPathParameters(ctx *context.Context) (parameters, error) {
+ parts := strings.Split(ctx.Params("*"), "/")
+
+ p := parameters{
+ Filename: parts[len(parts)-1],
+ }
+
+ p.IsMeta = p.Filename == mavenMetadataFile ||
+ p.Filename == mavenMetadataFile+extensionMD5 ||
+ p.Filename == mavenMetadataFile+extensionSHA1 ||
+ p.Filename == mavenMetadataFile+extensionSHA256 ||
+ p.Filename == mavenMetadataFile+extensionSHA512
+
+ parts = parts[:len(parts)-1]
+ if len(parts) == 0 {
+ return p, errInvalidParameters
+ }
+
+ p.Version = parts[len(parts)-1]
+ if p.IsMeta && !strings.HasSuffix(p.Version, "-SNAPSHOT") {
+ p.Version = ""
+ } else {
+ parts = parts[:len(parts)-1]
+ }
+
+ if illegalCharacters.MatchString(p.Version) {
+ return p, errInvalidParameters
+ }
+
+ if len(parts) < 2 {
+ return p, errInvalidParameters
+ }
+
+ p.ArtifactID = parts[len(parts)-1]
+ p.GroupID = strings.Join(parts[:len(parts)-1], ".")
+
+ if illegalCharacters.MatchString(p.GroupID) || illegalCharacters.MatchString(p.ArtifactID) {
+ return p, errInvalidParameters
+ }
+
+ return p, nil
+}
diff --git a/routers/api/packages/npm/api.go b/routers/api/packages/npm/api.go
new file mode 100644
index 0000000..b4379f3
--- /dev/null
+++ b/routers/api/packages/npm/api.go
@@ -0,0 +1,114 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package npm
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "net/url"
+ "sort"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ npm_module "code.gitea.io/gitea/modules/packages/npm"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+func createPackageMetadataResponse(registryURL string, pds []*packages_model.PackageDescriptor) *npm_module.PackageMetadata {
+ sort.Slice(pds, func(i, j int) bool {
+ return pds[i].SemVer.LessThan(pds[j].SemVer)
+ })
+
+ versions := make(map[string]*npm_module.PackageMetadataVersion)
+ distTags := make(map[string]string)
+ for _, pd := range pds {
+ versions[pd.SemVer.String()] = createPackageMetadataVersion(registryURL, pd)
+
+ for _, pvp := range pd.VersionProperties {
+ if pvp.Name == npm_module.TagProperty {
+ distTags[pvp.Value] = pd.Version.Version
+ }
+ }
+ }
+
+ latest := pds[len(pds)-1]
+
+ metadata := latest.Metadata.(*npm_module.Metadata)
+
+ return &npm_module.PackageMetadata{
+ ID: latest.Package.Name,
+ Name: latest.Package.Name,
+ DistTags: distTags,
+ Description: metadata.Description,
+ Readme: metadata.Readme,
+ Homepage: metadata.ProjectURL,
+ Author: npm_module.User{Name: metadata.Author},
+ License: metadata.License,
+ Versions: versions,
+ Repository: metadata.Repository,
+ }
+}
+
+func createPackageMetadataVersion(registryURL string, pd *packages_model.PackageDescriptor) *npm_module.PackageMetadataVersion {
+ hashBytes, _ := hex.DecodeString(pd.Files[0].Blob.HashSHA512)
+
+ metadata := pd.Metadata.(*npm_module.Metadata)
+
+ return &npm_module.PackageMetadataVersion{
+ ID: fmt.Sprintf("%s@%s", pd.Package.Name, pd.Version.Version),
+ Name: pd.Package.Name,
+ Version: pd.Version.Version,
+ Description: metadata.Description,
+ Author: npm_module.User{Name: metadata.Author},
+ Homepage: metadata.ProjectURL,
+ License: metadata.License,
+ Dependencies: metadata.Dependencies,
+ BundleDependencies: metadata.BundleDependencies,
+ DevDependencies: metadata.DevelopmentDependencies,
+ PeerDependencies: metadata.PeerDependencies,
+ OptionalDependencies: metadata.OptionalDependencies,
+ Readme: metadata.Readme,
+ Bin: metadata.Bin,
+ Dist: npm_module.PackageDistribution{
+ Shasum: pd.Files[0].Blob.HashSHA1,
+ Integrity: "sha512-" + base64.StdEncoding.EncodeToString(hashBytes),
+ Tarball: fmt.Sprintf("%s/%s/-/%s/%s", registryURL, url.QueryEscape(pd.Package.Name), url.PathEscape(pd.Version.Version), url.PathEscape(pd.Files[0].File.LowerName)),
+ },
+ }
+}
+
+func createPackageSearchResponse(pds []*packages_model.PackageDescriptor, total int64) *npm_module.PackageSearch {
+ objects := make([]*npm_module.PackageSearchObject, 0, len(pds))
+ for _, pd := range pds {
+ metadata := pd.Metadata.(*npm_module.Metadata)
+
+ scope := metadata.Scope
+ if scope == "" {
+ scope = "unscoped"
+ }
+
+ objects = append(objects, &npm_module.PackageSearchObject{
+ Package: &npm_module.PackageSearchPackage{
+ Scope: scope,
+ Name: metadata.Name,
+ Version: pd.Version.Version,
+ Date: pd.Version.CreatedUnix.AsLocalTime(),
+ Description: metadata.Description,
+ Author: npm_module.User{Name: metadata.Author},
+ Publisher: npm_module.User{Name: pd.Owner.Name},
+ Maintainers: []npm_module.User{}, // npm cli needs this field
+ Keywords: metadata.Keywords,
+ Links: &npm_module.PackageSearchPackageLinks{
+ Registry: setting.AppURL + "api/packages/" + pd.Owner.Name + "/npm",
+ Homepage: metadata.ProjectURL,
+ },
+ },
+ })
+ }
+
+ return &npm_module.PackageSearch{
+ Objects: objects,
+ Total: total,
+ }
+}
diff --git a/routers/api/packages/npm/npm.go b/routers/api/packages/npm/npm.go
new file mode 100644
index 0000000..84acfff
--- /dev/null
+++ b/routers/api/packages/npm/npm.go
@@ -0,0 +1,462 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package npm
+
+import (
+ "bytes"
+ std_ctx "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ npm_module "code.gitea.io/gitea/modules/packages/npm"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/hashicorp/go-version"
+)
+
+// errInvalidTagName indicates an invalid tag name
+var errInvalidTagName = errors.New("The tag name is invalid")
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.JSON(status, map[string]string{
+ "error": message,
+ })
+ })
+}
+
+// packageNameFromParams gets the package name from the url parameters
+// Variations: /name/, /@scope/name/, /@scope%2Fname/
+func packageNameFromParams(ctx *context.Context) string {
+ scope := ctx.Params("scope")
+ id := ctx.Params("id")
+ if scope != "" {
+ return fmt.Sprintf("@%s/%s", scope, id)
+ }
+ return id
+}
+
+// PackageMetadata returns the metadata for a single package
+func PackageMetadata(ctx *context.Context) {
+ packageName := packageNameFromParams(ctx)
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeNpm, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createPackageMetadataResponse(
+ setting.AppURL+"api/packages/"+ctx.Package.Owner.Name+"/npm",
+ pds,
+ )
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// DownloadPackageFile serves the content of a package
+func DownloadPackageFile(ctx *context.Context) {
+ packageName := packageNameFromParams(ctx)
+ packageVersion := ctx.Params("version")
+ filename := ctx.Params("filename")
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeNpm,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ &packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// DownloadPackageFileByName finds the version and serves the contents of a package
+func DownloadPackageFileByName(ctx *context.Context) {
+ filename := ctx.Params("filename")
+
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeNpm,
+ Name: packages_model.SearchValue{
+ ExactMatch: true,
+ Value: packageNameFromParams(ctx),
+ },
+ HasFileWithName: filename,
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageVersion(
+ ctx,
+ pvs[0],
+ &packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// UploadPackage creates a new package
+func UploadPackage(ctx *context.Context) {
+ npmPackage, err := npm_module.ParsePackage(ctx.Req.Body)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ repo, err := repo_model.GetRepositoryByURL(ctx, npmPackage.Metadata.Repository.URL)
+ if err == nil {
+ canWrite := repo.OwnerID == ctx.Doer.ID
+
+ if !canWrite {
+ perms, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ canWrite = perms.CanWrite(unit.TypePackages)
+ }
+
+ if !canWrite {
+ apiError(ctx, http.StatusForbidden, "no permission to upload this package")
+ return
+ }
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(bytes.NewReader(npmPackage.Data))
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pv, _, err := packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeNpm,
+ Name: npmPackage.Name,
+ Version: npmPackage.Version,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: npmPackage.Metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: npmPackage.Filename,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ for _, tag := range npmPackage.DistTags {
+ if err := setPackageTag(ctx, tag, pv, false); err != nil {
+ if err == errInvalidTagName {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ if repo != nil {
+ if err := packages_model.SetRepositoryLink(ctx, pv.PackageID, repo.ID); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// DeletePreview does nothing
+// The client tells the server what package version it knows about after deleting a version.
+func DeletePreview(ctx *context.Context) {
+ ctx.Status(http.StatusOK)
+}
+
+// DeletePackageVersion deletes the package version
+func DeletePackageVersion(ctx *context.Context) {
+ packageName := packageNameFromParams(ctx)
+ packageVersion := ctx.Params("version")
+
+ err := packages_service.RemovePackageVersionByNameAndVersion(
+ ctx,
+ ctx.Doer,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeNpm,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+// DeletePackage deletes the package and all versions
+func DeletePackage(ctx *context.Context) {
+ packageName := packageNameFromParams(ctx)
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeNpm, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ for _, pv := range pvs {
+ if err := packages_service.RemovePackageVersion(ctx, ctx.Doer, pv); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+// ListPackageTags returns all tags for a package
+func ListPackageTags(ctx *context.Context) {
+ packageName := packageNameFromParams(ctx)
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeNpm, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ tags := make(map[string]string)
+ for _, pv := range pvs {
+ pvps, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeVersion, pv.ID, npm_module.TagProperty)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ for _, pvp := range pvps {
+ tags[pvp.Value] = pv.Version
+ }
+ }
+
+ ctx.JSON(http.StatusOK, tags)
+}
+
+// AddPackageTag adds a tag to the package
+func AddPackageTag(ctx *context.Context) {
+ packageName := packageNameFromParams(ctx)
+
+ body, err := io.ReadAll(ctx.Req.Body)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ version := strings.Trim(string(body), "\"") // is as "version" in the body
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeNpm, packageName, version)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if err := setPackageTag(ctx, ctx.Params("tag"), pv, false); err != nil {
+ if err == errInvalidTagName {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+}
+
+// DeletePackageTag deletes a package tag
+func DeletePackageTag(ctx *context.Context) {
+ packageName := packageNameFromParams(ctx)
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeNpm, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pvs) != 0 {
+ if err := setPackageTag(ctx, ctx.Params("tag"), pvs[0], true); err != nil {
+ if err == errInvalidTagName {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ }
+}
+
+func setPackageTag(ctx std_ctx.Context, tag string, pv *packages_model.PackageVersion, deleteOnly bool) error {
+ if tag == "" {
+ return errInvalidTagName
+ }
+ _, err := version.NewVersion(tag)
+ if err == nil {
+ return errInvalidTagName
+ }
+
+ return db.WithTx(ctx, func(ctx std_ctx.Context) error {
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ PackageID: pv.PackageID,
+ Properties: map[string]string{
+ npm_module.TagProperty: tag,
+ },
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ return err
+ }
+
+ if len(pvs) == 1 {
+ pvps, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeVersion, pvs[0].ID, npm_module.TagProperty)
+ if err != nil {
+ return err
+ }
+
+ for _, pvp := range pvps {
+ if pvp.Value == tag {
+ if err := packages_model.DeletePropertyByID(ctx, pvp.ID); err != nil {
+ return err
+ }
+ break
+ }
+ }
+ }
+
+ if !deleteOnly {
+ _, err = packages_model.InsertProperty(ctx, packages_model.PropertyTypeVersion, pv.ID, npm_module.TagProperty, tag)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func PackageSearch(ctx *context.Context) {
+ pvs, total, err := packages_model.SearchLatestVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeNpm,
+ IsInternal: optional.Some(false),
+ Name: packages_model.SearchValue{
+ ExactMatch: false,
+ Value: ctx.FormTrim("text"),
+ },
+ Paginator: db.NewAbsoluteListOptions(
+ ctx.FormInt("from"),
+ ctx.FormInt("size"),
+ ),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createPackageSearchResponse(
+ pds,
+ total,
+ )
+
+ ctx.JSON(http.StatusOK, resp)
+}
diff --git a/routers/api/packages/nuget/api_v2.go b/routers/api/packages/nuget/api_v2.go
new file mode 100644
index 0000000..a726065
--- /dev/null
+++ b/routers/api/packages/nuget/api_v2.go
@@ -0,0 +1,402 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package nuget
+
+import (
+ "encoding/xml"
+ "strings"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ nuget_module "code.gitea.io/gitea/modules/packages/nuget"
+)
+
+type AtomTitle struct {
+ Type string `xml:"type,attr"`
+ Text string `xml:",chardata"`
+}
+
+type ServiceCollection struct {
+ Href string `xml:"href,attr"`
+ Title AtomTitle `xml:"atom:title"`
+}
+
+type ServiceWorkspace struct {
+ Title AtomTitle `xml:"atom:title"`
+ Collection ServiceCollection `xml:"collection"`
+}
+
+type ServiceIndexResponseV2 struct {
+ XMLName xml.Name `xml:"service"`
+ Base string `xml:"base,attr"`
+ Xmlns string `xml:"xmlns,attr"`
+ XmlnsAtom string `xml:"xmlns:atom,attr"`
+ Workspace ServiceWorkspace `xml:"workspace"`
+}
+
+type EdmxPropertyRef struct {
+ Name string `xml:"Name,attr"`
+}
+
+type EdmxProperty struct {
+ Name string `xml:"Name,attr"`
+ Type string `xml:"Type,attr"`
+ Nullable bool `xml:"Nullable,attr"`
+}
+
+type EdmxEntityType struct {
+ Name string `xml:"Name,attr"`
+ HasStream bool `xml:"m:HasStream,attr"`
+ Keys []EdmxPropertyRef `xml:"Key>PropertyRef"`
+ Properties []EdmxProperty `xml:"Property"`
+}
+
+type EdmxFunctionParameter struct {
+ Name string `xml:"Name,attr"`
+ Type string `xml:"Type,attr"`
+}
+
+type EdmxFunctionImport struct {
+ Name string `xml:"Name,attr"`
+ ReturnType string `xml:"ReturnType,attr"`
+ EntitySet string `xml:"EntitySet,attr"`
+ Parameter []EdmxFunctionParameter `xml:"Parameter"`
+}
+
+type EdmxEntitySet struct {
+ Name string `xml:"Name,attr"`
+ EntityType string `xml:"EntityType,attr"`
+}
+
+type EdmxEntityContainer struct {
+ Name string `xml:"Name,attr"`
+ IsDefaultEntityContainer bool `xml:"m:IsDefaultEntityContainer,attr"`
+ EntitySet EdmxEntitySet `xml:"EntitySet"`
+ FunctionImports []EdmxFunctionImport `xml:"FunctionImport"`
+}
+
+type EdmxSchema struct {
+ Xmlns string `xml:"xmlns,attr"`
+ Namespace string `xml:"Namespace,attr"`
+ EntityType *EdmxEntityType `xml:"EntityType,omitempty"`
+ EntityContainer *EdmxEntityContainer `xml:"EntityContainer,omitempty"`
+}
+
+type EdmxDataServices struct {
+ XmlnsM string `xml:"xmlns:m,attr"`
+ DataServiceVersion string `xml:"m:DataServiceVersion,attr"`
+ MaxDataServiceVersion string `xml:"m:MaxDataServiceVersion,attr"`
+ Schema []EdmxSchema `xml:"Schema"`
+}
+
+type EdmxMetadata struct {
+ XMLName xml.Name `xml:"edmx:Edmx"`
+ XmlnsEdmx string `xml:"xmlns:edmx,attr"`
+ Version string `xml:"Version,attr"`
+ DataServices EdmxDataServices `xml:"edmx:DataServices"`
+}
+
+var Metadata = &EdmxMetadata{
+ XmlnsEdmx: "http://schemas.microsoft.com/ado/2007/06/edmx",
+ Version: "1.0",
+ DataServices: EdmxDataServices{
+ XmlnsM: "http://schemas.microsoft.com/ado/2007/08/dataservices/metadata",
+ DataServiceVersion: "2.0",
+ MaxDataServiceVersion: "2.0",
+ Schema: []EdmxSchema{
+ {
+ Xmlns: "http://schemas.microsoft.com/ado/2006/04/edm",
+ Namespace: "NuGetGallery.OData",
+ EntityType: &EdmxEntityType{
+ Name: "V2FeedPackage",
+ HasStream: true,
+ Keys: []EdmxPropertyRef{
+ {Name: "Id"},
+ {Name: "Version"},
+ },
+ Properties: []EdmxProperty{
+ {
+ Name: "Id",
+ Type: "Edm.String",
+ },
+ {
+ Name: "Version",
+ Type: "Edm.String",
+ },
+ {
+ Name: "NormalizedVersion",
+ Type: "Edm.String",
+ Nullable: true,
+ },
+ {
+ Name: "Authors",
+ Type: "Edm.String",
+ Nullable: true,
+ },
+ {
+ Name: "Created",
+ Type: "Edm.DateTime",
+ },
+ {
+ Name: "Dependencies",
+ Type: "Edm.String",
+ },
+ {
+ Name: "Description",
+ Type: "Edm.String",
+ },
+ {
+ Name: "DownloadCount",
+ Type: "Edm.Int64",
+ },
+ {
+ Name: "LastUpdated",
+ Type: "Edm.DateTime",
+ },
+ {
+ Name: "Published",
+ Type: "Edm.DateTime",
+ },
+ {
+ Name: "PackageSize",
+ Type: "Edm.Int64",
+ },
+ {
+ Name: "ProjectUrl",
+ Type: "Edm.String",
+ Nullable: true,
+ },
+ {
+ Name: "ReleaseNotes",
+ Type: "Edm.String",
+ Nullable: true,
+ },
+ {
+ Name: "RequireLicenseAcceptance",
+ Type: "Edm.Boolean",
+ Nullable: false,
+ },
+ {
+ Name: "Title",
+ Type: "Edm.String",
+ Nullable: true,
+ },
+ {
+ Name: "VersionDownloadCount",
+ Type: "Edm.Int64",
+ Nullable: false,
+ },
+ },
+ },
+ },
+ {
+ Xmlns: "http://schemas.microsoft.com/ado/2006/04/edm",
+ Namespace: "NuGetGallery",
+ EntityContainer: &EdmxEntityContainer{
+ Name: "V2FeedContext",
+ IsDefaultEntityContainer: true,
+ EntitySet: EdmxEntitySet{
+ Name: "Packages",
+ EntityType: "NuGetGallery.OData.V2FeedPackage",
+ },
+ FunctionImports: []EdmxFunctionImport{
+ {
+ Name: "Search",
+ ReturnType: "Collection(NuGetGallery.OData.V2FeedPackage)",
+ EntitySet: "Packages",
+ Parameter: []EdmxFunctionParameter{
+ {
+ Name: "searchTerm",
+ Type: "Edm.String",
+ },
+ },
+ },
+ {
+ Name: "FindPackagesById",
+ ReturnType: "Collection(NuGetGallery.OData.V2FeedPackage)",
+ EntitySet: "Packages",
+ Parameter: []EdmxFunctionParameter{
+ {
+ Name: "id",
+ Type: "Edm.String",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+}
+
+type FeedEntryCategory struct {
+ Term string `xml:"term,attr"`
+ Scheme string `xml:"scheme,attr"`
+}
+
+type FeedEntryLink struct {
+ Rel string `xml:"rel,attr"`
+ Href string `xml:"href,attr"`
+}
+
+type TypedValue[T any] struct {
+ Type string `xml:"type,attr,omitempty"`
+ Value T `xml:",chardata"`
+}
+
+type FeedEntryProperties struct {
+ Version string `xml:"d:Version"`
+ NormalizedVersion string `xml:"d:NormalizedVersion"`
+ Authors string `xml:"d:Authors"`
+ Dependencies string `xml:"d:Dependencies"`
+ Description string `xml:"d:Description"`
+ VersionDownloadCount TypedValue[int64] `xml:"d:VersionDownloadCount"`
+ DownloadCount TypedValue[int64] `xml:"d:DownloadCount"`
+ PackageSize TypedValue[int64] `xml:"d:PackageSize"`
+ Created TypedValue[time.Time] `xml:"d:Created"`
+ LastUpdated TypedValue[time.Time] `xml:"d:LastUpdated"`
+ Published TypedValue[time.Time] `xml:"d:Published"`
+ ProjectURL string `xml:"d:ProjectUrl,omitempty"`
+ ReleaseNotes string `xml:"d:ReleaseNotes,omitempty"`
+ RequireLicenseAcceptance TypedValue[bool] `xml:"d:RequireLicenseAcceptance"`
+ Title string `xml:"d:Title"`
+}
+
+type FeedEntry struct {
+ XMLName xml.Name `xml:"entry"`
+ Xmlns string `xml:"xmlns,attr,omitempty"`
+ XmlnsD string `xml:"xmlns:d,attr,omitempty"`
+ XmlnsM string `xml:"xmlns:m,attr,omitempty"`
+ Base string `xml:"xml:base,attr,omitempty"`
+ ID string `xml:"id"`
+ Category FeedEntryCategory `xml:"category"`
+ Links []FeedEntryLink `xml:"link"`
+ Title TypedValue[string] `xml:"title"`
+ Updated time.Time `xml:"updated"`
+ Author string `xml:"author>name"`
+ Summary string `xml:"summary"`
+ Properties *FeedEntryProperties `xml:"m:properties"`
+ Content string `xml:",innerxml"`
+}
+
+type FeedResponse struct {
+ XMLName xml.Name `xml:"feed"`
+ Xmlns string `xml:"xmlns,attr,omitempty"`
+ XmlnsD string `xml:"xmlns:d,attr,omitempty"`
+ XmlnsM string `xml:"xmlns:m,attr,omitempty"`
+ Base string `xml:"xml:base,attr,omitempty"`
+ ID string `xml:"id"`
+ Title TypedValue[string] `xml:"title"`
+ Updated time.Time `xml:"updated"`
+ Links []FeedEntryLink `xml:"link"`
+ Entries []*FeedEntry `xml:"entry"`
+ Count int64 `xml:"m:count"`
+}
+
+func createFeedResponse(l *linkBuilder, totalEntries int64, pds []*packages_model.PackageDescriptor) *FeedResponse {
+ entries := make([]*FeedEntry, 0, len(pds))
+ for _, pd := range pds {
+ entries = append(entries, createEntry(l, pd, false))
+ }
+
+ links := []FeedEntryLink{
+ {Rel: "self", Href: l.Base},
+ }
+ if l.Next != nil {
+ links = append(links, FeedEntryLink{
+ Rel: "next",
+ Href: l.GetNextURL(),
+ })
+ }
+
+ return &FeedResponse{
+ Xmlns: "http://www.w3.org/2005/Atom",
+ Base: l.Base,
+ XmlnsD: "http://schemas.microsoft.com/ado/2007/08/dataservices",
+ XmlnsM: "http://schemas.microsoft.com/ado/2007/08/dataservices/metadata",
+ ID: "http://schemas.datacontract.org/2004/07/",
+ Updated: time.Now(),
+ Links: links,
+ Count: totalEntries,
+ Entries: entries,
+ }
+}
+
+func createEntryResponse(l *linkBuilder, pd *packages_model.PackageDescriptor) *FeedEntry {
+ return createEntry(l, pd, true)
+}
+
+func createEntry(l *linkBuilder, pd *packages_model.PackageDescriptor, withNamespace bool) *FeedEntry {
+ metadata := pd.Metadata.(*nuget_module.Metadata)
+
+ id := l.GetPackageMetadataURL(pd.Package.Name, pd.Version.Version)
+
+ // Workaround to force a self-closing tag to satisfy XmlReader.IsEmptyElement used by the NuGet client.
+ // https://learn.microsoft.com/en-us/dotnet/api/system.xml.xmlreader.isemptyelement
+ content := `<content type="application/zip" src="` + l.GetPackageDownloadURL(pd.Package.Name, pd.Version.Version) + `"/>`
+
+ createdValue := TypedValue[time.Time]{
+ Type: "Edm.DateTime",
+ Value: pd.Version.CreatedUnix.AsLocalTime(),
+ }
+
+ entry := &FeedEntry{
+ ID: id,
+ Category: FeedEntryCategory{Term: "NuGetGallery.OData.V2FeedPackage", Scheme: "http://schemas.microsoft.com/ado/2007/08/dataservices/scheme"},
+ Links: []FeedEntryLink{
+ {Rel: "self", Href: id},
+ {Rel: "edit", Href: id},
+ },
+ Title: TypedValue[string]{Type: "text", Value: pd.Package.Name},
+ Updated: pd.Version.CreatedUnix.AsLocalTime(),
+ Author: metadata.Authors,
+ Content: content,
+ Properties: &FeedEntryProperties{
+ Version: pd.Version.Version,
+ NormalizedVersion: pd.Version.Version,
+ Authors: metadata.Authors,
+ Dependencies: buildDependencyString(metadata),
+ Description: metadata.Description,
+ VersionDownloadCount: TypedValue[int64]{Type: "Edm.Int64", Value: pd.Version.DownloadCount},
+ DownloadCount: TypedValue[int64]{Type: "Edm.Int64", Value: pd.Version.DownloadCount},
+ PackageSize: TypedValue[int64]{Type: "Edm.Int64", Value: pd.CalculateBlobSize()},
+ Created: createdValue,
+ LastUpdated: createdValue,
+ Published: createdValue,
+ ProjectURL: metadata.ProjectURL,
+ ReleaseNotes: metadata.ReleaseNotes,
+ RequireLicenseAcceptance: TypedValue[bool]{Type: "Edm.Boolean", Value: metadata.RequireLicenseAcceptance},
+ Title: pd.Package.Name,
+ },
+ }
+
+ if withNamespace {
+ entry.Xmlns = "http://www.w3.org/2005/Atom"
+ entry.Base = l.Base
+ entry.XmlnsD = "http://schemas.microsoft.com/ado/2007/08/dataservices"
+ entry.XmlnsM = "http://schemas.microsoft.com/ado/2007/08/dataservices/metadata"
+ }
+
+ return entry
+}
+
+func buildDependencyString(metadata *nuget_module.Metadata) string {
+ var b strings.Builder
+ first := true
+ for group, deps := range metadata.Dependencies {
+ for _, dep := range deps {
+ if !first {
+ b.WriteByte('|')
+ }
+ first = false
+
+ b.WriteString(dep.ID)
+ b.WriteByte(':')
+ b.WriteString(dep.Version)
+ b.WriteByte(':')
+ b.WriteString(group)
+ }
+ }
+ return b.String()
+}
diff --git a/routers/api/packages/nuget/api_v3.go b/routers/api/packages/nuget/api_v3.go
new file mode 100644
index 0000000..2fe25dc
--- /dev/null
+++ b/routers/api/packages/nuget/api_v3.go
@@ -0,0 +1,255 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package nuget
+
+import (
+ "sort"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ nuget_module "code.gitea.io/gitea/modules/packages/nuget"
+
+ "golang.org/x/text/collate"
+ "golang.org/x/text/language"
+)
+
+// https://docs.microsoft.com/en-us/nuget/api/service-index#resources
+type ServiceIndexResponseV3 struct {
+ Version string `json:"version"`
+ Resources []ServiceResource `json:"resources"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/service-index#resource
+type ServiceResource struct {
+ ID string `json:"@id"`
+ Type string `json:"@type"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#response
+type RegistrationIndexResponse struct {
+ RegistrationIndexURL string `json:"@id"`
+ Type []string `json:"@type"`
+ Count int `json:"count"`
+ Pages []*RegistrationIndexPage `json:"items"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#registration-page-object
+type RegistrationIndexPage struct {
+ RegistrationPageURL string `json:"@id"`
+ Lower string `json:"lower"`
+ Upper string `json:"upper"`
+ Count int `json:"count"`
+ Items []*RegistrationIndexPageItem `json:"items"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#registration-leaf-object-in-a-page
+type RegistrationIndexPageItem struct {
+ RegistrationLeafURL string `json:"@id"`
+ PackageContentURL string `json:"packageContent"`
+ CatalogEntry *CatalogEntry `json:"catalogEntry"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#catalog-entry
+type CatalogEntry struct {
+ CatalogLeafURL string `json:"@id"`
+ PackageContentURL string `json:"packageContent"`
+ ID string `json:"id"`
+ Version string `json:"version"`
+ Description string `json:"description"`
+ ReleaseNotes string `json:"releaseNotes"`
+ Authors string `json:"authors"`
+ RequireLicenseAcceptance bool `json:"requireLicenseAcceptance"`
+ ProjectURL string `json:"projectURL"`
+ DependencyGroups []*PackageDependencyGroup `json:"dependencyGroups"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#package-dependency-group
+type PackageDependencyGroup struct {
+ TargetFramework string `json:"targetFramework"`
+ Dependencies []*PackageDependency `json:"dependencies"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#package-dependency
+type PackageDependency struct {
+ ID string `json:"id"`
+ Range string `json:"range"`
+}
+
+func createRegistrationIndexResponse(l *linkBuilder, pds []*packages_model.PackageDescriptor) *RegistrationIndexResponse {
+ sort.Slice(pds, func(i, j int) bool {
+ return pds[i].SemVer.LessThan(pds[j].SemVer)
+ })
+
+ items := make([]*RegistrationIndexPageItem, 0, len(pds))
+ for _, p := range pds {
+ items = append(items, createRegistrationIndexPageItem(l, p))
+ }
+
+ return &RegistrationIndexResponse{
+ RegistrationIndexURL: l.GetRegistrationIndexURL(pds[0].Package.Name),
+ Type: []string{"catalog:CatalogRoot", "PackageRegistration", "catalog:Permalink"},
+ Count: 1,
+ Pages: []*RegistrationIndexPage{
+ {
+ RegistrationPageURL: l.GetRegistrationIndexURL(pds[0].Package.Name),
+ Count: len(pds),
+ Lower: pds[0].Version.Version,
+ Upper: pds[len(pds)-1].Version.Version,
+ Items: items,
+ },
+ },
+ }
+}
+
+func createRegistrationIndexPageItem(l *linkBuilder, pd *packages_model.PackageDescriptor) *RegistrationIndexPageItem {
+ metadata := pd.Metadata.(*nuget_module.Metadata)
+
+ return &RegistrationIndexPageItem{
+ RegistrationLeafURL: l.GetRegistrationLeafURL(pd.Package.Name, pd.Version.Version),
+ PackageContentURL: l.GetPackageDownloadURL(pd.Package.Name, pd.Version.Version),
+ CatalogEntry: &CatalogEntry{
+ CatalogLeafURL: l.GetRegistrationLeafURL(pd.Package.Name, pd.Version.Version),
+ PackageContentURL: l.GetPackageDownloadURL(pd.Package.Name, pd.Version.Version),
+ ID: pd.Package.Name,
+ Version: pd.Version.Version,
+ Description: metadata.Description,
+ ReleaseNotes: metadata.ReleaseNotes,
+ Authors: metadata.Authors,
+ ProjectURL: metadata.ProjectURL,
+ DependencyGroups: createDependencyGroups(pd),
+ },
+ }
+}
+
+func createDependencyGroups(pd *packages_model.PackageDescriptor) []*PackageDependencyGroup {
+ metadata := pd.Metadata.(*nuget_module.Metadata)
+
+ dependencyGroups := make([]*PackageDependencyGroup, 0, len(metadata.Dependencies))
+ for k, v := range metadata.Dependencies {
+ dependencies := make([]*PackageDependency, 0, len(v))
+ for _, dep := range v {
+ dependencies = append(dependencies, &PackageDependency{
+ ID: dep.ID,
+ Range: dep.Version,
+ })
+ }
+
+ dependencyGroups = append(dependencyGroups, &PackageDependencyGroup{
+ TargetFramework: k,
+ Dependencies: dependencies,
+ })
+ }
+ return dependencyGroups
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#registration-leaf
+type RegistrationLeafResponse struct {
+ RegistrationLeafURL string `json:"@id"`
+ Type []string `json:"@type"`
+ Listed bool `json:"listed"`
+ PackageContentURL string `json:"packageContent"`
+ Published time.Time `json:"published"`
+ RegistrationIndexURL string `json:"registration"`
+}
+
+func createRegistrationLeafResponse(l *linkBuilder, pd *packages_model.PackageDescriptor) *RegistrationLeafResponse {
+ return &RegistrationLeafResponse{
+ Type: []string{"Package", "http://schema.nuget.org/catalog#Permalink"},
+ Listed: true,
+ Published: pd.Version.CreatedUnix.AsLocalTime(),
+ RegistrationLeafURL: l.GetRegistrationLeafURL(pd.Package.Name, pd.Version.Version),
+ PackageContentURL: l.GetPackageDownloadURL(pd.Package.Name, pd.Version.Version),
+ RegistrationIndexURL: l.GetRegistrationIndexURL(pd.Package.Name),
+ }
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/package-base-address-resource#response
+type PackageVersionsResponse struct {
+ Versions []string `json:"versions"`
+}
+
+func createPackageVersionsResponse(pvs []*packages_model.PackageVersion) *PackageVersionsResponse {
+ versions := make([]string, 0, len(pvs))
+ for _, pv := range pvs {
+ versions = append(versions, pv.Version)
+ }
+
+ return &PackageVersionsResponse{
+ Versions: versions,
+ }
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/search-query-service-resource#response
+type SearchResultResponse struct {
+ TotalHits int64 `json:"totalHits"`
+ Data []*SearchResult `json:"data"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/search-query-service-resource#search-result
+type SearchResult struct {
+ ID string `json:"id"`
+ Version string `json:"version"`
+ Versions []*SearchResultVersion `json:"versions"`
+ Description string `json:"description"`
+ Authors string `json:"authors"`
+ ProjectURL string `json:"projectURL"`
+ RegistrationIndexURL string `json:"registration"`
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/search-query-service-resource#search-result
+type SearchResultVersion struct {
+ RegistrationLeafURL string `json:"@id"`
+ Version string `json:"version"`
+ Downloads int64 `json:"downloads"`
+}
+
+func createSearchResultResponse(l *linkBuilder, totalHits int64, pds []*packages_model.PackageDescriptor) *SearchResultResponse {
+ grouped := make(map[string][]*packages_model.PackageDescriptor)
+ for _, pd := range pds {
+ grouped[pd.Package.Name] = append(grouped[pd.Package.Name], pd)
+ }
+
+ keys := make([]string, 0, len(grouped))
+ for key := range grouped {
+ keys = append(keys, key)
+ }
+ collate.New(language.English, collate.IgnoreCase).SortStrings(keys)
+
+ data := make([]*SearchResult, 0, len(pds))
+ for _, key := range keys {
+ data = append(data, createSearchResult(l, grouped[key]))
+ }
+
+ return &SearchResultResponse{
+ TotalHits: totalHits,
+ Data: data,
+ }
+}
+
+func createSearchResult(l *linkBuilder, pds []*packages_model.PackageDescriptor) *SearchResult {
+ latest := pds[0]
+ versions := make([]*SearchResultVersion, 0, len(pds))
+ for _, pd := range pds {
+ if latest.SemVer.LessThan(pd.SemVer) {
+ latest = pd
+ }
+
+ versions = append(versions, &SearchResultVersion{
+ RegistrationLeafURL: l.GetRegistrationLeafURL(pd.Package.Name, pd.Version.Version),
+ Version: pd.Version.Version,
+ })
+ }
+
+ metadata := latest.Metadata.(*nuget_module.Metadata)
+
+ return &SearchResult{
+ ID: latest.Package.Name,
+ Version: latest.Version.Version,
+ Versions: versions,
+ Description: metadata.Description,
+ Authors: metadata.Authors,
+ ProjectURL: metadata.ProjectURL,
+ RegistrationIndexURL: l.GetRegistrationIndexURL(latest.Package.Name),
+ }
+}
diff --git a/routers/api/packages/nuget/auth.go b/routers/api/packages/nuget/auth.go
new file mode 100644
index 0000000..1bb68d0
--- /dev/null
+++ b/routers/api/packages/nuget/auth.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package nuget
+
+import (
+ "net/http"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/services/auth"
+)
+
+var _ auth.Method = &Auth{}
+
+type Auth struct{}
+
+func (a *Auth) Name() string {
+ return "nuget"
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/package-publish-resource#request-parameters
+func (a *Auth) Verify(req *http.Request, w http.ResponseWriter, store auth.DataStore, sess auth.SessionStore) (*user_model.User, error) {
+ token, err := auth_model.GetAccessTokenBySHA(req.Context(), req.Header.Get("X-NuGet-ApiKey"))
+ if err != nil {
+ if !(auth_model.IsErrAccessTokenNotExist(err) || auth_model.IsErrAccessTokenEmpty(err)) {
+ log.Error("GetAccessTokenBySHA: %v", err)
+ return nil, err
+ }
+ return nil, nil
+ }
+
+ u, err := user_model.GetUserByID(req.Context(), token.UID)
+ if err != nil {
+ log.Error("GetUserByID: %v", err)
+ return nil, err
+ }
+
+ token.UpdatedUnix = timeutil.TimeStampNow()
+ if err := auth_model.UpdateAccessToken(req.Context(), token); err != nil {
+ log.Error("UpdateAccessToken: %v", err)
+ }
+
+ return u, nil
+}
diff --git a/routers/api/packages/nuget/links.go b/routers/api/packages/nuget/links.go
new file mode 100644
index 0000000..4c573fe
--- /dev/null
+++ b/routers/api/packages/nuget/links.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package nuget
+
+import (
+ "fmt"
+ "net/url"
+)
+
+type nextOptions struct {
+ Path string
+ Query url.Values
+}
+
+type linkBuilder struct {
+ Base string
+ Next *nextOptions
+}
+
+// GetRegistrationIndexURL builds the registration index url
+func (l *linkBuilder) GetRegistrationIndexURL(id string) string {
+ return fmt.Sprintf("%s/registration/%s/index.json", l.Base, id)
+}
+
+// GetRegistrationLeafURL builds the registration leaf url
+func (l *linkBuilder) GetRegistrationLeafURL(id, version string) string {
+ return fmt.Sprintf("%s/registration/%s/%s.json", l.Base, id, version)
+}
+
+// GetPackageDownloadURL builds the download url
+func (l *linkBuilder) GetPackageDownloadURL(id, version string) string {
+ return fmt.Sprintf("%s/package/%s/%s/%s.%s.nupkg", l.Base, id, version, id, version)
+}
+
+// GetPackageMetadataURL builds the package metadata url
+func (l *linkBuilder) GetPackageMetadataURL(id, version string) string {
+ return fmt.Sprintf("%s/Packages(Id='%s',Version='%s')", l.Base, id, version)
+}
+
+func (l *linkBuilder) GetNextURL() string {
+ u, _ := url.Parse(l.Base)
+ u = u.JoinPath(l.Next.Path)
+ q := u.Query()
+ for k, vs := range l.Next.Query {
+ for _, v := range vs {
+ q.Add(k, v)
+ }
+ }
+ u.RawQuery = q.Encode()
+ return u.String()
+}
diff --git a/routers/api/packages/nuget/nuget.go b/routers/api/packages/nuget/nuget.go
new file mode 100644
index 0000000..0d7212d
--- /dev/null
+++ b/routers/api/packages/nuget/nuget.go
@@ -0,0 +1,710 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package nuget
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ nuget_model "code.gitea.io/gitea/models/packages/nuget"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ nuget_module "code.gitea.io/gitea/modules/packages/nuget"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.JSON(status, map[string]string{
+ "Message": message,
+ })
+ })
+}
+
+func xmlResponse(ctx *context.Context, status int, obj any) { //nolint:unparam
+ ctx.Resp.Header().Set("Content-Type", "application/atom+xml; charset=utf-8")
+ ctx.Resp.WriteHeader(status)
+ if _, err := ctx.Resp.Write([]byte(xml.Header)); err != nil {
+ log.Error("Write failed: %v", err)
+ }
+ if err := xml.NewEncoder(ctx.Resp).Encode(obj); err != nil {
+ log.Error("XML encode failed: %v", err)
+ }
+}
+
+// https://github.com/NuGet/NuGet.Client/blob/dev/src/NuGet.Core/NuGet.Protocol/LegacyFeed/V2FeedQueryBuilder.cs
+func ServiceIndexV2(ctx *context.Context) {
+ base := setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget"
+
+ xmlResponse(ctx, http.StatusOK, &ServiceIndexResponseV2{
+ Base: base,
+ Xmlns: "http://www.w3.org/2007/app",
+ XmlnsAtom: "http://www.w3.org/2005/Atom",
+ Workspace: ServiceWorkspace{
+ Title: AtomTitle{
+ Type: "text",
+ Text: "Default",
+ },
+ Collection: ServiceCollection{
+ Href: "Packages",
+ Title: AtomTitle{
+ Type: "text",
+ Text: "Packages",
+ },
+ },
+ },
+ })
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/service-index
+func ServiceIndexV3(ctx *context.Context) {
+ root := setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget"
+
+ ctx.JSON(http.StatusOK, &ServiceIndexResponseV3{
+ Version: "3.0.0",
+ Resources: []ServiceResource{
+ {ID: root + "/query", Type: "SearchQueryService"},
+ {ID: root + "/query", Type: "SearchQueryService/3.0.0-beta"},
+ {ID: root + "/query", Type: "SearchQueryService/3.0.0-rc"},
+ {ID: root + "/registration", Type: "RegistrationsBaseUrl"},
+ {ID: root + "/registration", Type: "RegistrationsBaseUrl/3.0.0-beta"},
+ {ID: root + "/registration", Type: "RegistrationsBaseUrl/3.0.0-rc"},
+ {ID: root + "/package", Type: "PackageBaseAddress/3.0.0"},
+ {ID: root, Type: "PackagePublish/2.0.0"},
+ {ID: root + "/symbolpackage", Type: "SymbolPackagePublish/4.9.0"},
+ },
+ })
+}
+
+// https://github.com/NuGet/NuGet.Client/blob/dev/src/NuGet.Core/NuGet.Protocol/LegacyFeed/LegacyFeedCapabilityResourceV2Feed.cs
+func FeedCapabilityResource(ctx *context.Context) {
+ xmlResponse(ctx, http.StatusOK, Metadata)
+}
+
+var (
+ searchTermExtract = regexp.MustCompile(`'([^']+)'`)
+ searchTermExact = regexp.MustCompile(`\s+eq\s+'`)
+)
+
+func getSearchTerm(ctx *context.Context) packages_model.SearchValue {
+ searchTerm := strings.Trim(ctx.FormTrim("searchTerm"), "'")
+ if searchTerm != "" {
+ return packages_model.SearchValue{
+ Value: searchTerm,
+ ExactMatch: false,
+ }
+ }
+
+ // $filter contains a query like:
+ // (((Id ne null) and substringof('microsoft',tolower(Id)))
+ // https://www.odata.org/documentation/odata-version-2-0/uri-conventions/ section 4.5
+ // We don't support these queries, just extract the search term.
+ filter := ctx.FormTrim("$filter")
+ match := searchTermExtract.FindStringSubmatch(filter)
+ if len(match) == 2 {
+ return packages_model.SearchValue{
+ Value: strings.TrimSpace(match[1]),
+ ExactMatch: searchTermExact.MatchString(filter),
+ }
+ }
+
+ return packages_model.SearchValue{}
+}
+
+// https://github.com/NuGet/NuGet.Client/blob/dev/src/NuGet.Core/NuGet.Protocol/LegacyFeed/V2FeedQueryBuilder.cs
+func SearchServiceV2(ctx *context.Context) {
+ skip, take := ctx.FormInt("$skip"), ctx.FormInt("$top")
+ paginator := db.NewAbsoluteListOptions(skip, take)
+
+ pvs, total, err := packages_model.SearchLatestVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeNuGet,
+ Name: getSearchTerm(ctx),
+ IsInternal: optional.Some(false),
+ Paginator: paginator,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ skip, take = paginator.GetSkipTake()
+
+ var next *nextOptions
+ if len(pvs) == take {
+ next = &nextOptions{
+ Path: "Search()",
+ Query: url.Values{},
+ }
+ searchTerm := ctx.FormTrim("searchTerm")
+ if searchTerm != "" {
+ next.Query.Set("searchTerm", searchTerm)
+ }
+ filter := ctx.FormTrim("$filter")
+ if filter != "" {
+ next.Query.Set("$filter", filter)
+ }
+ next.Query.Set("$skip", strconv.Itoa(skip+take))
+ next.Query.Set("$top", strconv.Itoa(take))
+ }
+
+ resp := createFeedResponse(
+ &linkBuilder{Base: setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget", Next: next},
+ total,
+ pds,
+ )
+
+ xmlResponse(ctx, http.StatusOK, resp)
+}
+
+// http://docs.oasis-open.org/odata/odata/v4.0/errata03/os/complete/part2-url-conventions/odata-v4.0-errata03-os-part2-url-conventions-complete.html#_Toc453752351
+func SearchServiceV2Count(ctx *context.Context) {
+ count, err := nuget_model.CountPackages(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Name: getSearchTerm(ctx),
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.PlainText(http.StatusOK, strconv.FormatInt(count, 10))
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/search-query-service-resource#search-for-packages
+func SearchServiceV3(ctx *context.Context) {
+ pvs, count, err := nuget_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Name: packages_model.SearchValue{Value: ctx.FormTrim("q")},
+ IsInternal: optional.Some(false),
+ Paginator: db.NewAbsoluteListOptions(
+ ctx.FormInt("skip"),
+ ctx.FormInt("take"),
+ ),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createSearchResultResponse(
+ &linkBuilder{Base: setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget"},
+ count,
+ pds,
+ )
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#registration-index
+func RegistrationIndex(ctx *context.Context) {
+ packageName := ctx.Params("id")
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeNuGet, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createRegistrationIndexResponse(
+ &linkBuilder{Base: setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget"},
+ pds,
+ )
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// https://github.com/NuGet/NuGet.Client/blob/dev/src/NuGet.Core/NuGet.Protocol/LegacyFeed/V2FeedQueryBuilder.cs
+func RegistrationLeafV2(ctx *context.Context) {
+ packageName := ctx.Params("id")
+ packageVersion := ctx.Params("version")
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeNuGet, packageName, packageVersion)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createEntryResponse(
+ &linkBuilder{Base: setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget"},
+ pd,
+ )
+
+ xmlResponse(ctx, http.StatusOK, resp)
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/registration-base-url-resource#registration-leaf
+func RegistrationLeafV3(ctx *context.Context) {
+ packageName := ctx.Params("id")
+ packageVersion := strings.TrimSuffix(ctx.Params("version"), ".json")
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeNuGet, packageName, packageVersion)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ resp := createRegistrationLeafResponse(
+ &linkBuilder{Base: setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget"},
+ pd,
+ )
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// https://github.com/NuGet/NuGet.Client/blob/dev/src/NuGet.Core/NuGet.Protocol/LegacyFeed/V2FeedQueryBuilder.cs
+func EnumeratePackageVersionsV2(ctx *context.Context) {
+ packageName := strings.Trim(ctx.FormTrim("id"), "'")
+
+ skip, take := ctx.FormInt("$skip"), ctx.FormInt("$top")
+ paginator := db.NewAbsoluteListOptions(skip, take)
+
+ pvs, total, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeNuGet,
+ Name: packages_model.SearchValue{
+ ExactMatch: true,
+ Value: packageName,
+ },
+ IsInternal: optional.Some(false),
+ Paginator: paginator,
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ skip, take = paginator.GetSkipTake()
+
+ var next *nextOptions
+ if len(pvs) == take {
+ next = &nextOptions{
+ Path: "FindPackagesById()",
+ Query: url.Values{},
+ }
+ next.Query.Set("id", packageName)
+ next.Query.Set("$skip", strconv.Itoa(skip+take))
+ next.Query.Set("$top", strconv.Itoa(take))
+ }
+
+ resp := createFeedResponse(
+ &linkBuilder{Base: setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/nuget", Next: next},
+ total,
+ pds,
+ )
+
+ xmlResponse(ctx, http.StatusOK, resp)
+}
+
+// http://docs.oasis-open.org/odata/odata/v4.0/errata03/os/complete/part2-url-conventions/odata-v4.0-errata03-os-part2-url-conventions-complete.html#_Toc453752351
+func EnumeratePackageVersionsV2Count(ctx *context.Context) {
+ count, err := packages_model.CountVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeNuGet,
+ Name: packages_model.SearchValue{
+ ExactMatch: true,
+ Value: strings.Trim(ctx.FormTrim("id"), "'"),
+ },
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.PlainText(http.StatusOK, strconv.FormatInt(count, 10))
+}
+
+// https://docs.microsoft.com/en-us/nuget/api/package-base-address-resource#enumerate-package-versions
+func EnumeratePackageVersionsV3(ctx *context.Context) {
+ packageName := ctx.Params("id")
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeNuGet, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ resp := createPackageVersionsResponse(pvs)
+
+ ctx.JSON(http.StatusOK, resp)
+}
+
+// https://learn.microsoft.com/en-us/nuget/api/package-base-address-resource#download-package-manifest-nuspec
+// https://learn.microsoft.com/en-us/nuget/api/package-base-address-resource#download-package-content-nupkg
+func DownloadPackageFile(ctx *context.Context) {
+ packageName := ctx.Params("id")
+ packageVersion := ctx.Params("version")
+ filename := ctx.Params("filename")
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeNuGet,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ &packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// UploadPackage creates a new package with the metadata contained in the uploaded nupgk file
+// https://docs.microsoft.com/en-us/nuget/api/package-publish-resource#push-a-package
+func UploadPackage(ctx *context.Context) {
+ np, buf, closables := processUploadedFile(ctx, nuget_module.DependencyPackage)
+ defer func() {
+ for _, c := range closables {
+ c.Close()
+ }
+ }()
+ if np == nil {
+ return
+ }
+
+ pv, _, err := packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeNuGet,
+ Name: np.ID,
+ Version: np.Version,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: np.Metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(fmt.Sprintf("%s.%s.nupkg", np.ID, np.Version)),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ nuspecBuf, err := packages_module.CreateHashedBufferFromReaderWithSize(np.NuspecContent, np.NuspecContent.Len())
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer nuspecBuf.Close()
+
+ _, err = packages_service.AddFileToPackageVersionInternal(
+ ctx,
+ pv,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(fmt.Sprintf("%s.nuspec", np.ID)),
+ },
+ Data: nuspecBuf,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// UploadSymbolPackage adds a symbol package to an existing package
+// https://docs.microsoft.com/en-us/nuget/api/symbol-package-publish-resource
+func UploadSymbolPackage(ctx *context.Context) {
+ np, buf, closables := processUploadedFile(ctx, nuget_module.SymbolsPackage)
+ defer func() {
+ for _, c := range closables {
+ c.Close()
+ }
+ }()
+ if np == nil {
+ return
+ }
+
+ pdbs, err := nuget_module.ExtractPortablePdb(buf, buf.Size())
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ defer pdbs.Close()
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pi := &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeNuGet,
+ Name: np.ID,
+ Version: np.Version,
+ }
+
+ _, err = packages_service.AddFileToExistingPackage(
+ ctx,
+ pi,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(fmt.Sprintf("%s.%s.snupkg", np.ID, np.Version)),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: false,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrPackageNotExist:
+ apiError(ctx, http.StatusNotFound, err)
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ for _, pdb := range pdbs {
+ _, err := packages_service.AddFileToExistingPackage(
+ ctx,
+ pi,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(pdb.Name),
+ CompositeKey: strings.ToLower(pdb.ID),
+ },
+ Creator: ctx.Doer,
+ Data: pdb.Content,
+ IsLead: false,
+ Properties: map[string]string{
+ nuget_module.PropertySymbolID: strings.ToLower(pdb.ID),
+ },
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func processUploadedFile(ctx *context.Context, expectedType nuget_module.PackageType) (*nuget_module.Package, *packages_module.HashedBuffer, []io.Closer) {
+ closables := make([]io.Closer, 0, 2)
+
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return nil, nil, closables
+ }
+
+ if needToClose {
+ closables = append(closables, upload)
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return nil, nil, closables
+ }
+ closables = append(closables, buf)
+
+ np, err := nuget_module.ParsePackageMetaData(buf, buf.Size())
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return nil, nil, closables
+ }
+ if np.PackageType != expectedType {
+ apiError(ctx, http.StatusBadRequest, errors.New("unexpected package type"))
+ return nil, nil, closables
+ }
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return nil, nil, closables
+ }
+ return np, buf, closables
+}
+
+// https://github.com/dotnet/symstore/blob/main/docs/specs/Simple_Symbol_Query_Protocol.md#request
+func DownloadSymbolFile(ctx *context.Context) {
+ filename := ctx.Params("filename")
+ guid := ctx.Params("guid")[:32]
+ filename2 := ctx.Params("filename2")
+
+ if filename != filename2 {
+ apiError(ctx, http.StatusBadRequest, nil)
+ return
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ PackageType: packages_model.TypeNuGet,
+ Query: filename,
+ Properties: map[string]string{
+ nuget_module.PropertySymbolID: strings.ToLower(guid),
+ },
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pfs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetPackageFileStream(ctx, pfs[0])
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// DeletePackage hard deletes the package
+// https://docs.microsoft.com/en-us/nuget/api/package-publish-resource#delete-a-package
+func DeletePackage(ctx *context.Context) {
+ packageName := ctx.Params("id")
+ packageVersion := ctx.Params("version")
+
+ err := packages_service.RemovePackageVersionByNameAndVersion(
+ ctx,
+ ctx.Doer,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeNuGet,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/packages/pub/pub.go b/routers/api/packages/pub/pub.go
new file mode 100644
index 0000000..f87df52
--- /dev/null
+++ b/routers/api/packages/pub/pub.go
@@ -0,0 +1,284 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package pub
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ pub_module "code.gitea.io/gitea/modules/packages/pub"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+func jsonResponse(ctx *context.Context, status int, obj any) {
+ resp := ctx.Resp
+ resp.Header().Set("Content-Type", "application/vnd.pub.v2+json")
+ resp.WriteHeader(status)
+ if err := json.NewEncoder(resp).Encode(obj); err != nil {
+ log.Error("JSON encode: %v", err)
+ }
+}
+
+func apiError(ctx *context.Context, status int, obj any) {
+ type Error struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ }
+ type ErrorWrapper struct {
+ Error Error `json:"error"`
+ }
+
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ jsonResponse(ctx, status, ErrorWrapper{
+ Error: Error{
+ Code: http.StatusText(status),
+ Message: message,
+ },
+ })
+ })
+}
+
+type packageVersions struct {
+ Name string `json:"name"`
+ Latest *versionMetadata `json:"latest"`
+ Versions []*versionMetadata `json:"versions"`
+}
+
+type versionMetadata struct {
+ Version string `json:"version"`
+ ArchiveURL string `json:"archive_url"`
+ Published time.Time `json:"published"`
+ Pubspec any `json:"pubspec,omitempty"`
+}
+
+func packageDescriptorToMetadata(baseURL string, pd *packages_model.PackageDescriptor) *versionMetadata {
+ return &versionMetadata{
+ Version: pd.Version.Version,
+ ArchiveURL: fmt.Sprintf("%s/files/%s.tar.gz", baseURL, url.PathEscape(pd.Version.Version)),
+ Published: pd.Version.CreatedUnix.AsLocalTime(),
+ Pubspec: pd.Metadata.(*pub_module.Metadata).Pubspec,
+ }
+}
+
+func baseURL(ctx *context.Context) string {
+ return setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/pub/api/packages"
+}
+
+// https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md#list-all-versions-of-a-package
+func EnumeratePackageVersions(ctx *context.Context) {
+ packageName := ctx.Params("id")
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypePub, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ sort.Slice(pds, func(i, j int) bool {
+ return pds[i].SemVer.LessThan(pds[j].SemVer)
+ })
+
+ baseURL := fmt.Sprintf("%s/%s", baseURL(ctx), url.PathEscape(pds[0].Package.Name))
+
+ versions := make([]*versionMetadata, 0, len(pds))
+ for _, pd := range pds {
+ versions = append(versions, packageDescriptorToMetadata(baseURL, pd))
+ }
+
+ jsonResponse(ctx, http.StatusOK, &packageVersions{
+ Name: pds[0].Package.Name,
+ Latest: packageDescriptorToMetadata(baseURL, pds[0]),
+ Versions: versions,
+ })
+}
+
+// https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md#deprecated-inspect-a-specific-version-of-a-package
+func PackageVersionMetadata(ctx *context.Context) {
+ packageName := ctx.Params("id")
+ packageVersion := ctx.Params("version")
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypePub, packageName, packageVersion)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ jsonResponse(ctx, http.StatusOK, packageDescriptorToMetadata(
+ fmt.Sprintf("%s/%s", baseURL(ctx), url.PathEscape(pd.Package.Name)),
+ pd,
+ ))
+}
+
+// https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md#publishing-packages
+func RequestUpload(ctx *context.Context) {
+ type UploadRequest struct {
+ URL string `json:"url"`
+ Fields map[string]string `json:"fields"`
+ }
+
+ jsonResponse(ctx, http.StatusOK, UploadRequest{
+ URL: baseURL(ctx) + "/versions/new/upload",
+ Fields: make(map[string]string),
+ })
+}
+
+// https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md#publishing-packages
+func UploadPackageFile(ctx *context.Context) {
+ file, _, err := ctx.Req.FormFile("file")
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ defer file.Close()
+
+ buf, err := packages_module.CreateHashedBufferFromReader(file)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ pck, err := pub_module.ParsePackage(buf)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypePub,
+ Name: pck.Name,
+ Version: pck.Version,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: pck.Metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(pck.Version + ".tar.gz"),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Resp.Header().Set("Location", fmt.Sprintf("%s/versions/new/finalize/%s/%s", baseURL(ctx), url.PathEscape(pck.Name), url.PathEscape(pck.Version)))
+ ctx.Status(http.StatusNoContent)
+}
+
+// https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md#publishing-packages
+func FinalizePackage(ctx *context.Context) {
+ packageName := ctx.Params("id")
+ packageVersion := ctx.Params("version")
+
+ _, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypePub, packageName, packageVersion)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ type Success struct {
+ Message string `json:"message"`
+ }
+ type SuccessWrapper struct {
+ Success Success `json:"success"`
+ }
+
+ jsonResponse(ctx, http.StatusOK, SuccessWrapper{Success{}})
+}
+
+// https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md#deprecated-download-a-specific-version-of-a-package
+func DownloadPackageFile(ctx *context.Context) {
+ packageName := ctx.Params("id")
+ packageVersion := strings.TrimSuffix(ctx.Params("version"), ".tar.gz")
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypePub, packageName, packageVersion)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pf := pd.Files[0].File
+
+ s, u, _, err := packages_service.GetPackageFileStream(ctx, pf)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
diff --git a/routers/api/packages/pypi/pypi.go b/routers/api/packages/pypi/pypi.go
new file mode 100644
index 0000000..7824db1
--- /dev/null
+++ b/routers/api/packages/pypi/pypi.go
@@ -0,0 +1,194 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package pypi
+
+import (
+ "encoding/hex"
+ "io"
+ "net/http"
+ "regexp"
+ "sort"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ pypi_module "code.gitea.io/gitea/modules/packages/pypi"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/validation"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+// https://peps.python.org/pep-0426/#name
+var (
+ normalizer = strings.NewReplacer(".", "-", "_", "-")
+ nameMatcher = regexp.MustCompile(`\A(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\.\-_]*[a-zA-Z0-9])\z`)
+)
+
+// https://peps.python.org/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
+var versionMatcher = regexp.MustCompile(`\Av?` +
+ `(?:[0-9]+!)?` + // epoch
+ `[0-9]+(?:\.[0-9]+)*` + // release segment
+ `(?:[-_\.]?(?:a|b|c|rc|alpha|beta|pre|preview)[-_\.]?[0-9]*)?` + // pre-release
+ `(?:-[0-9]+|[-_\.]?(?:post|rev|r)[-_\.]?[0-9]*)?` + // post release
+ `(?:[-_\.]?dev[-_\.]?[0-9]*)?` + // dev release
+ `(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)?` + // local version
+ `\z`)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+// PackageMetadata returns the metadata for a single package
+func PackageMetadata(ctx *context.Context) {
+ packageName := normalizer.Replace(ctx.Params("id"))
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypePyPI, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ // sort package descriptors by version to mimic PyPI format
+ sort.Slice(pds, func(i, j int) bool {
+ return strings.Compare(pds[i].Version.Version, pds[j].Version.Version) < 0
+ })
+
+ ctx.Data["RegistryURL"] = setting.AppURL + "api/packages/" + ctx.Package.Owner.Name + "/pypi"
+ ctx.Data["PackageDescriptor"] = pds[0]
+ ctx.Data["PackageDescriptors"] = pds
+ ctx.HTML(http.StatusOK, "api/packages/pypi/simple")
+}
+
+// DownloadPackageFile serves the content of a package
+func DownloadPackageFile(ctx *context.Context) {
+ packageName := normalizer.Replace(ctx.Params("id"))
+ packageVersion := ctx.Params("version")
+ filename := ctx.Params("filename")
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypePyPI,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ &packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// UploadPackageFile adds a file to the package. If the package does not exist, it gets created.
+func UploadPackageFile(ctx *context.Context) {
+ file, fileHeader, err := ctx.Req.FormFile("content")
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ defer file.Close()
+
+ buf, err := packages_module.CreateHashedBufferFromReader(file)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ _, _, hashSHA256, _ := buf.Sums()
+
+ if !strings.EqualFold(ctx.Req.FormValue("sha256_digest"), hex.EncodeToString(hashSHA256)) {
+ apiError(ctx, http.StatusBadRequest, "hash mismatch")
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ packageName := normalizer.Replace(ctx.Req.FormValue("name"))
+ packageVersion := ctx.Req.FormValue("version")
+ if !isValidNameAndVersion(packageName, packageVersion) {
+ apiError(ctx, http.StatusBadRequest, "invalid name or version")
+ return
+ }
+
+ projectURL := ctx.Req.FormValue("home_page")
+ if !validation.IsValidURL(projectURL) {
+ projectURL = ""
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypePyPI,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ SemverCompatible: false,
+ Creator: ctx.Doer,
+ Metadata: &pypi_module.Metadata{
+ Author: ctx.Req.FormValue("author"),
+ Description: ctx.Req.FormValue("description"),
+ LongDescription: ctx.Req.FormValue("long_description"),
+ Summary: ctx.Req.FormValue("summary"),
+ ProjectURL: projectURL,
+ License: ctx.Req.FormValue("license"),
+ RequiresPython: ctx.Req.FormValue("requires_python"),
+ },
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fileHeader.Filename,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func isValidNameAndVersion(packageName, packageVersion string) bool {
+ return nameMatcher.MatchString(packageName) && versionMatcher.MatchString(packageVersion)
+}
diff --git a/routers/api/packages/pypi/pypi_test.go b/routers/api/packages/pypi/pypi_test.go
new file mode 100644
index 0000000..3023692
--- /dev/null
+++ b/routers/api/packages/pypi/pypi_test.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package pypi
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestIsValidNameAndVersion(t *testing.T) {
+ // The test cases below were created from the following Python PEPs:
+ // https://peps.python.org/pep-0426/#name
+ // https://peps.python.org/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
+
+ // Valid Cases
+ assert.True(t, isValidNameAndVersion("A", "1.0.1"))
+ assert.True(t, isValidNameAndVersion("Test.Name.1234", "1.0.1"))
+ assert.True(t, isValidNameAndVersion("test_name", "1.0.1"))
+ assert.True(t, isValidNameAndVersion("test-name", "1.0.1"))
+ assert.True(t, isValidNameAndVersion("test-name", "v1.0.1"))
+ assert.True(t, isValidNameAndVersion("test-name", "2012.4"))
+ assert.True(t, isValidNameAndVersion("test-name", "1.0.1-alpha"))
+ assert.True(t, isValidNameAndVersion("test-name", "1.0.1a1"))
+ assert.True(t, isValidNameAndVersion("test-name", "1.0b2.r345.dev456"))
+ assert.True(t, isValidNameAndVersion("test-name", "1!1.0.1"))
+ assert.True(t, isValidNameAndVersion("test-name", "1.0.1+local.1"))
+
+ // Invalid Cases
+ assert.False(t, isValidNameAndVersion(".test-name", "1.0.1"))
+ assert.False(t, isValidNameAndVersion("test!name", "1.0.1"))
+ assert.False(t, isValidNameAndVersion("-test-name", "1.0.1"))
+ assert.False(t, isValidNameAndVersion("test-name-", "1.0.1"))
+ assert.False(t, isValidNameAndVersion("test-name", "a1.0.1"))
+ assert.False(t, isValidNameAndVersion("test-name", "1.0.1aa"))
+ assert.False(t, isValidNameAndVersion("test-name", "1.0.0-alpha.beta"))
+}
diff --git a/routers/api/packages/rpm/rpm.go b/routers/api/packages/rpm/rpm.go
new file mode 100644
index 0000000..54fb01c
--- /dev/null
+++ b/routers/api/packages/rpm/rpm.go
@@ -0,0 +1,318 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package rpm
+
+import (
+ stdctx "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/json"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ rpm_module "code.gitea.io/gitea/modules/packages/rpm"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ notify_service "code.gitea.io/gitea/services/notify"
+ packages_service "code.gitea.io/gitea/services/packages"
+ rpm_service "code.gitea.io/gitea/services/packages/rpm"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+// https://dnf.readthedocs.io/en/latest/conf_ref.html
+func GetRepositoryConfig(ctx *context.Context) {
+ group := ctx.Params("group")
+
+ var groupParts []string
+ if group != "" {
+ groupParts = strings.Split(group, "/")
+ }
+
+ url := fmt.Sprintf("%sapi/packages/%s/rpm", setting.AppURL, ctx.Package.Owner.Name)
+
+ ctx.PlainText(http.StatusOK, `[gitea-`+strings.Join(append([]string{ctx.Package.Owner.LowerName}, groupParts...), "-")+`]
+name=`+strings.Join(append([]string{ctx.Package.Owner.Name, setting.AppName}, groupParts...), " - ")+`
+baseurl=`+strings.Join(append([]string{url}, groupParts...), "/")+`
+enabled=1
+gpgcheck=1
+gpgkey=`+url+`/repository.key`)
+}
+
+// Gets or creates the PGP public key used to sign repository metadata files
+func GetRepositoryKey(ctx *context.Context) {
+ _, pub, err := rpm_service.GetOrCreateKeyPair(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.ServeContent(strings.NewReader(pub), &context.ServeHeaderOptions{
+ ContentType: "application/pgp-keys",
+ Filename: "repository.key",
+ })
+}
+
+func CheckRepositoryFileExistence(ctx *context.Context) {
+ pv, err := rpm_service.GetOrCreateRepositoryVersion(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pf, err := packages_model.GetFileForVersionByName(ctx, pv.ID, ctx.Params("filename"), ctx.Params("group"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Status(http.StatusNotFound)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.SetServeHeaders(&context.ServeHeaderOptions{
+ Filename: pf.Name,
+ LastModified: pf.CreatedUnix.AsLocalTime(),
+ })
+ ctx.Status(http.StatusOK)
+}
+
+// Gets a pre-generated repository metadata file
+func GetRepositoryFile(ctx *context.Context) {
+ pv, err := rpm_service.GetOrCreateRepositoryVersion(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageVersion(
+ ctx,
+ pv,
+ &packages_service.PackageFileInfo{
+ Filename: ctx.Params("filename"),
+ CompositeKey: ctx.Params("group"),
+ },
+ )
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+func UploadPackageFile(ctx *context.Context) {
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+ // if rpm sign enabled
+ if setting.Packages.DefaultRPMSignEnabled || ctx.FormBool("sign") {
+ pri, _, err := rpm_service.GetOrCreateKeyPair(ctx, ctx.Package.Owner.ID)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ signedBuf, err := rpm_service.NewSignedRPMBuffer(buf, pri)
+ if err != nil {
+ // Not in rpm format, parsing failed.
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ defer signedBuf.Close()
+ buf = signedBuf
+ }
+
+ pck, err := rpm_module.ParsePackage(buf)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ fileMetadataRaw, err := json.Marshal(pck.FileMetadata)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ group := ctx.Params("group")
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeRpm,
+ Name: pck.Name,
+ Version: pck.Version,
+ },
+ Creator: ctx.Doer,
+ Metadata: pck.VersionMetadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s-%s.%s.rpm", pck.Name, pck.Version, pck.FileMetadata.Architecture),
+ CompositeKey: group,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ Properties: map[string]string{
+ rpm_module.PropertyGroup: group,
+ rpm_module.PropertyArchitecture: pck.FileMetadata.Architecture,
+ rpm_module.PropertyMetadata: string(fileMetadataRaw),
+ },
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion, packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if err := rpm_service.BuildSpecificRepositoryFiles(ctx, ctx.Package.Owner.ID, group); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func DownloadPackageFile(ctx *context.Context) {
+ name := ctx.Params("name")
+ version := ctx.Params("version")
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeRpm,
+ Name: name,
+ Version: version,
+ },
+ &packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s-%s.%s.rpm", name, version, ctx.Params("architecture")),
+ CompositeKey: ctx.Params("group"),
+ },
+ )
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+func DeletePackageFile(webctx *context.Context) {
+ group := webctx.Params("group")
+ name := webctx.Params("name")
+ version := webctx.Params("version")
+ architecture := webctx.Params("architecture")
+
+ var pd *packages_model.PackageDescriptor
+
+ err := db.WithTx(webctx, func(ctx stdctx.Context) error {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx,
+ webctx.Package.Owner.ID,
+ packages_model.TypeRpm,
+ name,
+ version,
+ )
+ if err != nil {
+ return err
+ }
+
+ pf, err := packages_model.GetFileForVersionByName(
+ ctx,
+ pv.ID,
+ fmt.Sprintf("%s-%s.%s.rpm", name, version, architecture),
+ group,
+ )
+ if err != nil {
+ return err
+ }
+
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+
+ has, err := packages_model.HasVersionFileReferences(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+ if !has {
+ pd, err = packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ return err
+ }
+
+ if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(webctx, http.StatusNotFound, err)
+ } else {
+ apiError(webctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if pd != nil {
+ notify_service.PackageDelete(webctx, webctx.Doer, pd)
+ }
+
+ if err := rpm_service.BuildSpecificRepositoryFiles(webctx, webctx.Package.Owner.ID, group); err != nil {
+ apiError(webctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ webctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/packages/rubygems/rubygems.go b/routers/api/packages/rubygems/rubygems.go
new file mode 100644
index 0000000..dfefe2c
--- /dev/null
+++ b/routers/api/packages/rubygems/rubygems.go
@@ -0,0 +1,451 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package rubygems
+
+import (
+ "compress/gzip"
+ "compress/zlib"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ rubygems_module "code.gitea.io/gitea/modules/packages/rubygems"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+const (
+ Sep = "---\n"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.PlainText(status, message)
+ })
+}
+
+// EnumeratePackages serves the package list
+func EnumeratePackages(ctx *context.Context) {
+ packages, err := packages_model.GetVersionsByPackageType(ctx, ctx.Package.Owner.ID, packages_model.TypeRubyGems)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ enumeratePackages(ctx, "specs.4.8", packages)
+}
+
+// EnumeratePackagesLatest serves the list of the latest version of every package
+func EnumeratePackagesLatest(ctx *context.Context) {
+ pvs, _, err := packages_model.SearchLatestVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeRubyGems,
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ enumeratePackages(ctx, "latest_specs.4.8", pvs)
+}
+
+// EnumeratePackagesPreRelease is not supported and serves an empty list
+func EnumeratePackagesPreRelease(ctx *context.Context) {
+ enumeratePackages(ctx, "prerelease_specs.4.8", []*packages_model.PackageVersion{})
+}
+
+func enumeratePackages(ctx *context.Context, filename string, pvs []*packages_model.PackageVersion) {
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ specs := make([]any, 0, len(pds))
+ for _, p := range pds {
+ specs = append(specs, []any{
+ p.Package.Name,
+ &rubygems_module.RubyUserMarshal{
+ Name: "Gem::Version",
+ Value: []string{p.Version.Version},
+ },
+ p.Metadata.(*rubygems_module.Metadata).Platform,
+ })
+ }
+
+ ctx.SetServeHeaders(&context.ServeHeaderOptions{
+ Filename: filename + ".gz",
+ })
+
+ zw := gzip.NewWriter(ctx.Resp)
+ defer zw.Close()
+
+ zw.Name = filename
+
+ if err := rubygems_module.NewMarshalEncoder(zw).Encode(specs); err != nil {
+ ctx.ServerError("Download file failed", err)
+ }
+}
+
+// Serves info file for rubygems.org compatible /info/{gem} file.
+// See also https://guides.rubygems.org/rubygems-org-compact-index-api/.
+func ServePackageInfo(ctx *context.Context) {
+ packageName := ctx.Params("package")
+ versions, err := packages_model.GetVersionsByPackageName(
+ ctx, ctx.Package.Owner.ID, packages_model.TypeRubyGems, packageName)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ if len(versions) == 0 {
+ apiError(ctx, http.StatusNotFound, fmt.Sprintf("Could not find package %s", packageName))
+ }
+
+ result, err := buildInfoFileForPackage(ctx, versions)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.PlainText(http.StatusOK, *result)
+}
+
+// ServeVersionsFile creates rubygems.org compatible /versions file.
+// See also https://guides.rubygems.org/rubygems-org-compact-index-api/.
+func ServeVersionsFile(ctx *context.Context) {
+ packages, err := packages_model.GetPackagesByType(
+ ctx, ctx.Package.Owner.ID, packages_model.TypeRubyGems)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ result := new(strings.Builder)
+ result.WriteString(Sep)
+ for _, pack := range packages {
+ versions, err := packages_model.GetVersionsByPackageName(
+ ctx, ctx.Package.Owner.ID, packages_model.TypeRubyGems, pack.Name)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ if len(versions) == 0 {
+ // No versions left for this package, we should continue.
+ continue
+ }
+
+ fmt.Fprintf(result, "%s ", pack.Name)
+ for i, v := range versions {
+ result.WriteString(v.Version)
+ if i != len(versions)-1 {
+ result.WriteString(",")
+ }
+ }
+
+ info, err := buildInfoFileForPackage(ctx, versions)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+
+ checksum := md5.Sum([]byte(*info))
+ fmt.Fprintf(result, " %x\n", checksum)
+ }
+ ctx.PlainText(http.StatusOK, result.String())
+}
+
+// ServePackageSpecification serves the compressed Gemspec file of a package
+func ServePackageSpecification(ctx *context.Context) {
+ filename := ctx.Params("filename")
+
+ if !strings.HasSuffix(filename, ".gemspec.rz") {
+ apiError(ctx, http.StatusNotImplemented, nil)
+ return
+ }
+
+ pvs, err := getVersionsByFilename(ctx, filename[:len(filename)-10]+"gem")
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pvs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pvs[0])
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ ctx.SetServeHeaders(&context.ServeHeaderOptions{
+ Filename: filename,
+ })
+
+ zw := zlib.NewWriter(ctx.Resp)
+ defer zw.Close()
+
+ metadata := pd.Metadata.(*rubygems_module.Metadata)
+
+ // create a Ruby Gem::Specification object
+ spec := &rubygems_module.RubyUserDef{
+ Name: "Gem::Specification",
+ Value: []any{
+ "3.2.3", // @rubygems_version
+ 4, // @specification_version,
+ pd.Package.Name,
+ &rubygems_module.RubyUserMarshal{
+ Name: "Gem::Version",
+ Value: []string{pd.Version.Version},
+ },
+ nil, // date
+ metadata.Summary, // @summary
+ nil, // @required_ruby_version
+ nil, // @required_rubygems_version
+ metadata.Platform, // @original_platform
+ []any{}, // @dependencies
+ nil, // rubyforge_project
+ "", // @email
+ metadata.Authors,
+ metadata.Description,
+ metadata.ProjectURL,
+ true, // has_rdoc
+ metadata.Platform, // @new_platform
+ nil,
+ metadata.Licenses,
+ },
+ }
+
+ if err := rubygems_module.NewMarshalEncoder(zw).Encode(spec); err != nil {
+ ctx.ServerError("Download file failed", err)
+ }
+}
+
+// DownloadPackageFile serves the content of a package
+func DownloadPackageFile(ctx *context.Context) {
+ filename := ctx.Params("filename")
+
+ pvs, err := getVersionsByFilename(ctx, filename)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pvs) != 1 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ s, u, pf, err := packages_service.GetFileStreamByPackageVersion(
+ ctx,
+ pvs[0],
+ &packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
+
+// UploadPackageFile adds a file to the package. If the package does not exist, it gets created.
+func UploadPackageFile(ctx *context.Context) {
+ upload, needToClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ if needToClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ rp, err := rubygems_module.ParsePackageMetaData(buf)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ filename := getFullFilename(rp.Name, rp.Version, rp.Metadata.Platform)
+
+ _, _, err = packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeRubyGems,
+ Name: rp.Name,
+ Version: rp.Version,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: rp.Metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: filename,
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// DeletePackage deletes a package
+func DeletePackage(ctx *context.Context) {
+ // Go populates the form only for POST, PUT and PATCH requests
+ if err := ctx.Req.ParseMultipartForm(32 << 20); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ packageName := ctx.FormString("gem_name")
+ packageVersion := ctx.FormString("version")
+
+ err := packages_service.RemovePackageVersionByNameAndVersion(
+ ctx,
+ ctx.Doer,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeRubyGems,
+ Name: packageName,
+ Version: packageVersion,
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+}
+
+func writeRequirements(reqs []rubygems_module.VersionRequirement, result *strings.Builder) {
+ if len(reqs) == 0 {
+ reqs = []rubygems_module.VersionRequirement{{Restriction: ">=", Version: "0"}}
+ }
+ for i, req := range reqs {
+ if i != 0 {
+ result.WriteString("&")
+ }
+ result.WriteString(req.Restriction)
+ result.WriteString(" ")
+ result.WriteString(req.Version)
+ }
+}
+
+func buildRequirementStringFromVersion(ctx *context.Context, version *packages_model.PackageVersion) (string, error) {
+ pd, err := packages_model.GetPackageDescriptor(ctx, version)
+ if err != nil {
+ return "", err
+ }
+ metadata := pd.Metadata.(*rubygems_module.Metadata)
+ dependencyRequirements := new(strings.Builder)
+ for i, dep := range metadata.RuntimeDependencies {
+ if i != 0 {
+ dependencyRequirements.WriteString(",")
+ }
+
+ dependencyRequirements.WriteString(dep.Name)
+ dependencyRequirements.WriteString(":")
+ reqs := dep.Version
+ writeRequirements(reqs, dependencyRequirements)
+ }
+ fullname := getFullFilename(pd.Package.Name, version.Version, metadata.Platform)
+ file, err := packages_model.GetFileForVersionByName(ctx, version.ID, fullname, "")
+ if err != nil {
+ return "", err
+ }
+ blob, err := packages_model.GetBlobByID(ctx, file.BlobID)
+ if err != nil {
+ return "", err
+ }
+ additionalRequirements := new(strings.Builder)
+ fmt.Fprintf(additionalRequirements, "checksum:%s", blob.HashSHA256)
+ if len(metadata.RequiredRubyVersion) != 0 {
+ additionalRequirements.WriteString(",ruby:")
+ writeRequirements(metadata.RequiredRubyVersion, additionalRequirements)
+ }
+ if len(metadata.RequiredRubygemsVersion) != 0 {
+ additionalRequirements.WriteString(",rubygems:")
+ writeRequirements(metadata.RequiredRubygemsVersion, additionalRequirements)
+ }
+ return fmt.Sprintf("%s %s|%s", version.Version, dependencyRequirements, additionalRequirements), nil
+}
+
+func buildInfoFileForPackage(ctx *context.Context, versions []*packages_model.PackageVersion) (*string, error) {
+ result := "---\n"
+ for _, v := range versions {
+ str, err := buildRequirementStringFromVersion(ctx, v)
+ if err != nil {
+ return nil, err
+ }
+ result += str
+ result += "\n"
+ }
+ return &result, nil
+}
+
+func getFullFilename(gemName, version, platform string) string {
+ return strings.ToLower(getFullName(gemName, version, platform)) + ".gem"
+}
+
+func getFullName(gemName, version, platform string) string {
+ if platform == "" || platform == "ruby" {
+ return fmt.Sprintf("%s-%s", gemName, version)
+ }
+ return fmt.Sprintf("%s-%s-%s", gemName, version, platform)
+}
+
+func getVersionsByFilename(ctx *context.Context, filename string) ([]*packages_model.PackageVersion, error) {
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeRubyGems,
+ HasFileWithName: filename,
+ IsInternal: optional.Some(false),
+ })
+ return pvs, err
+}
diff --git a/routers/api/packages/swift/swift.go b/routers/api/packages/swift/swift.go
new file mode 100644
index 0000000..a9da3ea
--- /dev/null
+++ b/routers/api/packages/swift/swift.go
@@ -0,0 +1,465 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swift
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "regexp"
+ "sort"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ swift_module "code.gitea.io/gitea/modules/packages/swift"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/hashicorp/go-version"
+)
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#35-api-versioning
+const (
+ AcceptJSON = "application/vnd.swift.registry.v1+json"
+ AcceptSwift = "application/vnd.swift.registry.v1+swift"
+ AcceptZip = "application/vnd.swift.registry.v1+zip"
+)
+
+var (
+ // https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#361-package-scope
+ scopePattern = regexp.MustCompile(`\A[a-zA-Z0-9][a-zA-Z0-9-]{0,38}\z`)
+ // https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#362-package-name
+ namePattern = regexp.MustCompile(`\A[a-zA-Z0-9][a-zA-Z0-9-_]{0,99}\z`)
+)
+
+type headers struct {
+ Status int
+ ContentType string
+ Digest string
+ Location string
+ Link string
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#35-api-versioning
+func setResponseHeaders(resp http.ResponseWriter, h *headers) {
+ if h.ContentType != "" {
+ resp.Header().Set("Content-Type", h.ContentType)
+ }
+ if h.Digest != "" {
+ resp.Header().Set("Digest", "sha256="+h.Digest)
+ }
+ if h.Location != "" {
+ resp.Header().Set("Location", h.Location)
+ }
+ if h.Link != "" {
+ resp.Header().Set("Link", h.Link)
+ }
+ resp.Header().Set("Content-Version", "1")
+ if h.Status != 0 {
+ resp.WriteHeader(h.Status)
+ }
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#33-error-handling
+func apiError(ctx *context.Context, status int, obj any) {
+ // https://www.rfc-editor.org/rfc/rfc7807
+ type Problem struct {
+ Status int `json:"status"`
+ Detail string `json:"detail"`
+ }
+
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ setResponseHeaders(ctx.Resp, &headers{
+ Status: status,
+ ContentType: "application/problem+json",
+ })
+ if err := json.NewEncoder(ctx.Resp).Encode(Problem{
+ Status: status,
+ Detail: message,
+ }); err != nil {
+ log.Error("JSON encode: %v", err)
+ }
+ })
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#35-api-versioning
+func CheckAcceptMediaType(requiredAcceptHeader string) func(ctx *context.Context) {
+ return func(ctx *context.Context) {
+ accept := ctx.Req.Header.Get("Accept")
+ if accept != "" && accept != requiredAcceptHeader {
+ apiError(ctx, http.StatusBadRequest, fmt.Sprintf("Unexpected accept header. Should be '%s'.", requiredAcceptHeader))
+ }
+ }
+}
+
+func buildPackageID(scope, name string) string {
+ return scope + "." + name
+}
+
+type Release struct {
+ URL string `json:"url"`
+}
+
+type EnumeratePackageVersionsResponse struct {
+ Releases map[string]Release `json:"releases"`
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#41-list-package-releases
+func EnumeratePackageVersions(ctx *context.Context) {
+ packageScope := ctx.Params("scope")
+ packageName := ctx.Params("name")
+
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeSwift, buildPackageID(packageScope, packageName))
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ sort.Slice(pds, func(i, j int) bool {
+ return pds[i].SemVer.LessThan(pds[j].SemVer)
+ })
+
+ baseURL := fmt.Sprintf("%sapi/packages/%s/swift/%s/%s/", setting.AppURL, ctx.Package.Owner.LowerName, packageScope, packageName)
+
+ releases := make(map[string]Release)
+ for _, pd := range pds {
+ version := pd.SemVer.String()
+ releases[version] = Release{
+ URL: baseURL + version,
+ }
+ }
+
+ setResponseHeaders(ctx.Resp, &headers{
+ Link: fmt.Sprintf(`<%s%s>; rel="latest-version"`, baseURL, pds[len(pds)-1].Version.Version),
+ })
+
+ ctx.JSON(http.StatusOK, EnumeratePackageVersionsResponse{
+ Releases: releases,
+ })
+}
+
+type Resource struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Checksum string `json:"checksum"`
+}
+
+type PackageVersionMetadataResponse struct {
+ ID string `json:"id"`
+ Version string `json:"version"`
+ Resources []Resource `json:"resources"`
+ Metadata *swift_module.SoftwareSourceCode `json:"metadata"`
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#endpoint-2
+func PackageVersionMetadata(ctx *context.Context) {
+ id := buildPackageID(ctx.Params("scope"), ctx.Params("name"))
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeSwift, id, ctx.Params("version"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ metadata := pd.Metadata.(*swift_module.Metadata)
+
+ setResponseHeaders(ctx.Resp, &headers{})
+
+ ctx.JSON(http.StatusOK, PackageVersionMetadataResponse{
+ ID: id,
+ Version: pd.Version.Version,
+ Resources: []Resource{
+ {
+ Name: "source-archive",
+ Type: "application/zip",
+ Checksum: pd.Files[0].Blob.HashSHA256,
+ },
+ },
+ Metadata: &swift_module.SoftwareSourceCode{
+ Context: []string{"http://schema.org/"},
+ Type: "SoftwareSourceCode",
+ Name: pd.PackageProperties.GetByName(swift_module.PropertyName),
+ Version: pd.Version.Version,
+ Description: metadata.Description,
+ Keywords: metadata.Keywords,
+ CodeRepository: metadata.RepositoryURL,
+ License: metadata.License,
+ ProgrammingLanguage: swift_module.ProgrammingLanguage{
+ Type: "ComputerLanguage",
+ Name: "Swift",
+ URL: "https://swift.org",
+ },
+ Author: swift_module.Person{
+ Type: "Person",
+ GivenName: metadata.Author.GivenName,
+ MiddleName: metadata.Author.MiddleName,
+ FamilyName: metadata.Author.FamilyName,
+ },
+ },
+ })
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#43-fetch-manifest-for-a-package-release
+func DownloadManifest(ctx *context.Context) {
+ packageScope := ctx.Params("scope")
+ packageName := ctx.Params("name")
+ packageVersion := ctx.Params("version")
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeSwift, buildPackageID(packageScope, packageName), packageVersion)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ swiftVersion := ctx.FormTrim("swift-version")
+ if swiftVersion != "" {
+ v, err := version.NewVersion(swiftVersion)
+ if err == nil {
+ swiftVersion = swift_module.TrimmedVersionString(v)
+ }
+ }
+ m, ok := pd.Metadata.(*swift_module.Metadata).Manifests[swiftVersion]
+ if !ok {
+ setResponseHeaders(ctx.Resp, &headers{
+ Status: http.StatusSeeOther,
+ Location: fmt.Sprintf("%sapi/packages/%s/swift/%s/%s/%s/Package.swift", setting.AppURL, ctx.Package.Owner.LowerName, packageScope, packageName, packageVersion),
+ })
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &headers{})
+
+ filename := "Package.swift"
+ if swiftVersion != "" {
+ filename = fmt.Sprintf("Package@swift-%s.swift", swiftVersion)
+ }
+
+ ctx.ServeContent(strings.NewReader(m.Content), &context.ServeHeaderOptions{
+ ContentType: "text/x-swift",
+ Filename: filename,
+ LastModified: pv.CreatedUnix.AsLocalTime(),
+ })
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#endpoint-6
+func UploadPackageFile(ctx *context.Context) {
+ packageScope := ctx.Params("scope")
+ packageName := ctx.Params("name")
+
+ v, err := version.NewVersion(ctx.Params("version"))
+
+ if !scopePattern.MatchString(packageScope) || !namePattern.MatchString(packageName) || err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ packageVersion := v.Core().String()
+
+ file, _, err := ctx.Req.FormFile("source-archive")
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ defer file.Close()
+
+ buf, err := packages_module.CreateHashedBufferFromReader(file)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ var mr io.Reader
+ metadata := ctx.Req.FormValue("metadata")
+ if metadata != "" {
+ mr = strings.NewReader(metadata)
+ }
+
+ pck, err := swift_module.ParsePackage(buf, buf.Size(), mr)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ apiError(ctx, http.StatusBadRequest, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pv, _, err := packages_service.CreatePackageAndAddFile(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeSwift,
+ Name: buildPackageID(packageScope, packageName),
+ Version: packageVersion,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: pck.Metadata,
+ PackageProperties: map[string]string{
+ swift_module.PropertyScope: packageScope,
+ swift_module.PropertyName: packageName,
+ },
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s-%s.zip", packageName, packageVersion),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageVersion:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ for _, url := range pck.RepositoryURLs {
+ _, err = packages_model.InsertProperty(ctx, packages_model.PropertyTypeVersion, pv.ID, swift_module.PropertyRepositoryURL, url)
+ if err != nil {
+ log.Error("InsertProperty failed: %v", err)
+ }
+ }
+
+ setResponseHeaders(ctx.Resp, &headers{})
+
+ ctx.Status(http.StatusCreated)
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#endpoint-4
+func DownloadPackageFile(ctx *context.Context) {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, ctx.Package.Owner.ID, packages_model.TypeSwift, buildPackageID(ctx.Params("scope"), ctx.Params("name")), ctx.Params("version"))
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ apiError(ctx, http.StatusNotFound, err)
+ } else {
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ pf := pd.Files[0].File
+
+ s, u, _, err := packages_service.GetPackageFileStream(ctx, pf)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ setResponseHeaders(ctx.Resp, &headers{
+ Digest: pd.Files[0].Blob.HashSHA256,
+ })
+
+ helper.ServePackageFile(ctx, s, u, pf, &context.ServeHeaderOptions{
+ Filename: pf.Name,
+ ContentType: "application/zip",
+ LastModified: pf.CreatedUnix.AsLocalTime(),
+ })
+}
+
+type LookupPackageIdentifiersResponse struct {
+ Identifiers []string `json:"identifiers"`
+}
+
+// https://github.com/apple/swift-package-manager/blob/main/Documentation/Registry.md#endpoint-5
+func LookupPackageIdentifiers(ctx *context.Context) {
+ url := ctx.FormTrim("url")
+ if url == "" {
+ apiError(ctx, http.StatusBadRequest, nil)
+ return
+ }
+
+ pvs, _, err := packages_model.SearchLatestVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages_model.TypeSwift,
+ Properties: map[string]string{
+ swift_module.PropertyRepositoryURL: url,
+ },
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, nil)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ identifiers := make([]string, 0, len(pds))
+ for _, pd := range pds {
+ identifiers = append(identifiers, pd.Package.Name)
+ }
+
+ setResponseHeaders(ctx.Resp, &headers{})
+
+ ctx.JSON(http.StatusOK, LookupPackageIdentifiersResponse{
+ Identifiers: identifiers,
+ })
+}
diff --git a/routers/api/packages/vagrant/vagrant.go b/routers/api/packages/vagrant/vagrant.go
new file mode 100644
index 0000000..98a81da
--- /dev/null
+++ b/routers/api/packages/vagrant/vagrant.go
@@ -0,0 +1,242 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package vagrant
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sort"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ vagrant_module "code.gitea.io/gitea/modules/packages/vagrant"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/routers/api/packages/helper"
+ "code.gitea.io/gitea/services/context"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/hashicorp/go-version"
+)
+
+func apiError(ctx *context.Context, status int, obj any) {
+ helper.LogAndProcessError(ctx, status, obj, func(message string) {
+ ctx.JSON(status, struct {
+ Errors []string `json:"errors"`
+ }{
+ Errors: []string{
+ message,
+ },
+ })
+ })
+}
+
+func CheckAuthenticate(ctx *context.Context) {
+ if ctx.Doer == nil {
+ apiError(ctx, http.StatusUnauthorized, "Invalid access token")
+ return
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+func CheckBoxAvailable(ctx *context.Context) {
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeVagrant, ctx.Params("name"))
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, nil) // needs to be Content-Type: application/json
+}
+
+type packageMetadata struct {
+ Name string `json:"name"`
+ Description string `json:"description,omitempty"`
+ ShortDescription string `json:"short_description,omitempty"`
+ Versions []*versionMetadata `json:"versions"`
+}
+
+type versionMetadata struct {
+ Version string `json:"version"`
+ Status string `json:"status"`
+ DescriptionHTML string `json:"description_html,omitempty"`
+ DescriptionMarkdown string `json:"description_markdown,omitempty"`
+ Providers []*providerData `json:"providers"`
+}
+
+type providerData struct {
+ Name string `json:"name"`
+ URL string `json:"url"`
+ Checksum string `json:"checksum"`
+ ChecksumType string `json:"checksum_type"`
+}
+
+func packageDescriptorToMetadata(baseURL string, pd *packages_model.PackageDescriptor) *versionMetadata {
+ versionURL := baseURL + "/" + url.PathEscape(pd.Version.Version)
+
+ providers := make([]*providerData, 0, len(pd.Files))
+
+ for _, f := range pd.Files {
+ providers = append(providers, &providerData{
+ Name: f.Properties.GetByName(vagrant_module.PropertyProvider),
+ URL: versionURL + "/" + url.PathEscape(f.File.Name),
+ Checksum: f.Blob.HashSHA512,
+ ChecksumType: "sha512",
+ })
+ }
+
+ return &versionMetadata{
+ Status: "active",
+ Version: pd.Version.Version,
+ Providers: providers,
+ }
+}
+
+func EnumeratePackageVersions(ctx *context.Context) {
+ pvs, err := packages_model.GetVersionsByPackageName(ctx, ctx.Package.Owner.ID, packages_model.TypeVagrant, ctx.Params("name"))
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if len(pvs) == 0 {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ sort.Slice(pds, func(i, j int) bool {
+ return pds[i].SemVer.LessThan(pds[j].SemVer)
+ })
+
+ baseURL := fmt.Sprintf("%sapi/packages/%s/vagrant/%s", setting.AppURL, url.PathEscape(ctx.Package.Owner.Name), url.PathEscape(pds[0].Package.Name))
+
+ versions := make([]*versionMetadata, 0, len(pds))
+ for _, pd := range pds {
+ versions = append(versions, packageDescriptorToMetadata(baseURL, pd))
+ }
+
+ ctx.JSON(http.StatusOK, &packageMetadata{
+ Name: pds[0].Package.Name,
+ Description: pds[len(pds)-1].Metadata.(*vagrant_module.Metadata).Description,
+ Versions: versions,
+ })
+}
+
+func UploadPackageFile(ctx *context.Context) {
+ boxName := ctx.Params("name")
+ boxVersion := ctx.Params("version")
+ _, err := version.NewSemver(boxVersion)
+ if err != nil {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+ boxProvider := ctx.Params("provider")
+ if !strings.HasSuffix(boxProvider, ".box") {
+ apiError(ctx, http.StatusBadRequest, err)
+ return
+ }
+
+ upload, needsClose, err := ctx.UploadStream()
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ if needsClose {
+ defer upload.Close()
+ }
+
+ buf, err := packages_module.CreateHashedBufferFromReader(upload)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+ defer buf.Close()
+
+ metadata, err := vagrant_module.ParseMetadataFromBox(buf)
+ if err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ if _, err := buf.Seek(0, io.SeekStart); err != nil {
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ _, _, err = packages_service.CreatePackageOrAddFileToExisting(
+ ctx,
+ &packages_service.PackageCreationInfo{
+ PackageInfo: packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeVagrant,
+ Name: boxName,
+ Version: boxVersion,
+ },
+ SemverCompatible: true,
+ Creator: ctx.Doer,
+ Metadata: metadata,
+ },
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: strings.ToLower(boxProvider),
+ },
+ Creator: ctx.Doer,
+ Data: buf,
+ IsLead: true,
+ Properties: map[string]string{
+ vagrant_module.PropertyProvider: strings.TrimSuffix(boxProvider, ".box"),
+ },
+ },
+ )
+ if err != nil {
+ switch err {
+ case packages_model.ErrDuplicatePackageFile:
+ apiError(ctx, http.StatusConflict, err)
+ case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
+ apiError(ctx, http.StatusForbidden, err)
+ default:
+ apiError(ctx, http.StatusInternalServerError, err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+func DownloadPackageFile(ctx *context.Context) {
+ s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
+ ctx,
+ &packages_service.PackageInfo{
+ Owner: ctx.Package.Owner,
+ PackageType: packages_model.TypeVagrant,
+ Name: ctx.Params("name"),
+ Version: ctx.Params("version"),
+ },
+ &packages_service.PackageFileInfo{
+ Filename: ctx.Params("provider"),
+ },
+ )
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist || err == packages_model.ErrPackageFileNotExist {
+ apiError(ctx, http.StatusNotFound, err)
+ return
+ }
+ apiError(ctx, http.StatusInternalServerError, err)
+ return
+ }
+
+ helper.ServePackageFile(ctx, s, u, pf)
+}
diff --git a/routers/api/shared/middleware.go b/routers/api/shared/middleware.go
new file mode 100644
index 0000000..e2ff004
--- /dev/null
+++ b/routers/api/shared/middleware.go
@@ -0,0 +1,152 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package shared
+
+import (
+ "net/http"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/routers/common"
+ "code.gitea.io/gitea/services/auth"
+ "code.gitea.io/gitea/services/context"
+
+ "github.com/go-chi/cors"
+)
+
+func Middlewares() (stack []any) {
+ stack = append(stack, securityHeaders())
+
+ if setting.CORSConfig.Enabled {
+ stack = append(stack, cors.Handler(cors.Options{
+ AllowedOrigins: setting.CORSConfig.AllowDomain,
+ AllowedMethods: setting.CORSConfig.Methods,
+ AllowCredentials: setting.CORSConfig.AllowCredentials,
+ AllowedHeaders: append([]string{"Authorization", "X-Gitea-OTP", "X-Forgejo-OTP"}, setting.CORSConfig.Headers...),
+ MaxAge: int(setting.CORSConfig.MaxAge.Seconds()),
+ }))
+ }
+ return append(stack,
+ context.APIContexter(),
+
+ checkDeprecatedAuthMethods,
+ // Get user from session if logged in.
+ apiAuth(buildAuthGroup()),
+ verifyAuthWithOptions(&common.VerifyOptions{
+ SignInRequired: setting.Service.RequireSignInView,
+ }),
+ )
+}
+
+func buildAuthGroup() *auth.Group {
+ group := auth.NewGroup(
+ &auth.OAuth2{},
+ &auth.HTTPSign{},
+ &auth.Basic{}, // FIXME: this should be removed once we don't allow basic auth in API
+ )
+ if setting.Service.EnableReverseProxyAuthAPI {
+ group.Add(&auth.ReverseProxy{})
+ }
+
+ if setting.IsWindows && auth_model.IsSSPIEnabled(db.DefaultContext) {
+ group.Add(&auth.SSPI{}) // it MUST be the last, see the comment of SSPI
+ }
+
+ return group
+}
+
+func apiAuth(authMethod auth.Method) func(*context.APIContext) {
+ return func(ctx *context.APIContext) {
+ ar, err := common.AuthShared(ctx.Base, nil, authMethod)
+ if err != nil {
+ ctx.Error(http.StatusUnauthorized, "APIAuth", err)
+ return
+ }
+ ctx.Doer = ar.Doer
+ ctx.IsSigned = ar.Doer != nil
+ ctx.IsBasicAuth = ar.IsBasicAuth
+ }
+}
+
+// verifyAuthWithOptions checks authentication according to options
+func verifyAuthWithOptions(options *common.VerifyOptions) func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ // Check prohibit login users.
+ if ctx.IsSigned {
+ if !ctx.Doer.IsActive && setting.Service.RegisterEmailConfirm {
+ ctx.Data["Title"] = ctx.Tr("auth.active_your_account")
+ ctx.JSON(http.StatusForbidden, map[string]string{
+ "message": "This account is not activated.",
+ })
+ return
+ }
+ if !ctx.Doer.IsActive || ctx.Doer.ProhibitLogin {
+ log.Info("Failed authentication attempt for %s from %s", ctx.Doer.Name, ctx.RemoteAddr())
+ ctx.Data["Title"] = ctx.Tr("auth.prohibit_login")
+ ctx.JSON(http.StatusForbidden, map[string]string{
+ "message": "This account is prohibited from signing in, please contact your site administrator.",
+ })
+ return
+ }
+
+ if ctx.Doer.MustChangePassword {
+ ctx.JSON(http.StatusForbidden, map[string]string{
+ "message": "You must change your password. Change it at: " + setting.AppURL + "/user/change_password",
+ })
+ return
+ }
+ }
+
+ // Redirect to dashboard if user tries to visit any non-login page.
+ if options.SignOutRequired && ctx.IsSigned && ctx.Req.URL.RequestURI() != "/" {
+ ctx.Redirect(setting.AppSubURL + "/")
+ return
+ }
+
+ if options.SignInRequired {
+ if !ctx.IsSigned {
+ // Restrict API calls with error message.
+ ctx.JSON(http.StatusForbidden, map[string]string{
+ "message": "Only signed in user is allowed to call APIs.",
+ })
+ return
+ } else if !ctx.Doer.IsActive && setting.Service.RegisterEmailConfirm {
+ ctx.Data["Title"] = ctx.Tr("auth.active_your_account")
+ ctx.JSON(http.StatusForbidden, map[string]string{
+ "message": "This account is not activated.",
+ })
+ return
+ }
+ }
+
+ if options.AdminRequired {
+ if !ctx.Doer.IsAdmin {
+ ctx.JSON(http.StatusForbidden, map[string]string{
+ "message": "You have no permission to request for this.",
+ })
+ return
+ }
+ }
+ }
+}
+
+// check for and warn against deprecated authentication options
+func checkDeprecatedAuthMethods(ctx *context.APIContext) {
+ if ctx.FormString("token") != "" || ctx.FormString("access_token") != "" {
+ ctx.Resp.Header().Set("Warning", "token and access_token API authentication is deprecated and will be removed in gitea 1.23. Please use AuthorizationHeaderToken instead. Existing queries will continue to work but without authorization.")
+ }
+}
+
+func securityHeaders() func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
+ // CORB: https://www.chromium.org/Home/chromium-security/corb-for-developers
+ // http://stackoverflow.com/a/3146618/244009
+ resp.Header().Set("x-content-type-options", "nosniff")
+ next.ServeHTTP(resp, req)
+ })
+ }
+}
diff --git a/routers/api/v1/activitypub/actor.go b/routers/api/v1/activitypub/actor.go
new file mode 100644
index 0000000..4f128e7
--- /dev/null
+++ b/routers/api/v1/activitypub/actor.go
@@ -0,0 +1,83 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activitypub
+
+import (
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/activitypub"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/services/context"
+
+ ap "github.com/go-ap/activitypub"
+ "github.com/go-ap/jsonld"
+)
+
+// Actor function returns the instance's Actor
+func Actor(ctx *context.APIContext) {
+ // swagger:operation GET /activitypub/actor activitypub activitypubInstanceActor
+ // ---
+ // summary: Returns the instance's Actor
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActivityPub"
+
+ link := user_model.APActorUserAPActorID()
+ actor := ap.ActorNew(ap.IRI(link), ap.ApplicationType)
+
+ actor.PreferredUsername = ap.NaturalLanguageValuesNew()
+ err := actor.PreferredUsername.Set("en", ap.Content(setting.Domain))
+ if err != nil {
+ ctx.ServerError("PreferredUsername.Set", err)
+ return
+ }
+
+ actor.URL = ap.IRI(setting.AppURL)
+
+ actor.Inbox = ap.IRI(link + "/inbox")
+ actor.Outbox = ap.IRI(link + "/outbox")
+
+ actor.PublicKey.ID = ap.IRI(link + "#main-key")
+ actor.PublicKey.Owner = ap.IRI(link)
+
+ publicKeyPem, err := activitypub.GetPublicKey(ctx, user_model.NewAPActorUser())
+ if err != nil {
+ ctx.ServerError("GetPublicKey", err)
+ return
+ }
+ actor.PublicKey.PublicKeyPem = publicKeyPem
+
+ binary, err := jsonld.WithContext(
+ jsonld.IRI(ap.ActivityBaseURI),
+ jsonld.IRI(ap.SecurityContextURI),
+ ).Marshal(actor)
+ if err != nil {
+ ctx.ServerError("MarshalJSON", err)
+ return
+ }
+ ctx.Resp.Header().Add("Content-Type", activitypub.ActivityStreamsContentType)
+ ctx.Resp.WriteHeader(http.StatusOK)
+ if _, err = ctx.Resp.Write(binary); err != nil {
+ log.Error("write to resp err: %v", err)
+ }
+}
+
+// ActorInbox function handles the incoming data for the instance Actor
+func ActorInbox(ctx *context.APIContext) {
+ // swagger:operation POST /activitypub/actor/inbox activitypub activitypubInstanceActorInbox
+ // ---
+ // summary: Send to the inbox
+ // produces:
+ // - application/json
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/activitypub/person.go b/routers/api/v1/activitypub/person.go
new file mode 100644
index 0000000..995a148
--- /dev/null
+++ b/routers/api/v1/activitypub/person.go
@@ -0,0 +1,106 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activitypub
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "code.gitea.io/gitea/modules/activitypub"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/services/context"
+
+ ap "github.com/go-ap/activitypub"
+ "github.com/go-ap/jsonld"
+)
+
+// Person function returns the Person actor for a user
+func Person(ctx *context.APIContext) {
+ // swagger:operation GET /activitypub/user-id/{user-id} activitypub activitypubPerson
+ // ---
+ // summary: Returns the Person actor for a user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: user-id
+ // in: path
+ // description: user ID of the user
+ // type: integer
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActivityPub"
+
+ // TODO: the setting.AppURL during the test doesn't follow the definition: "It always has a '/' suffix"
+ link := fmt.Sprintf("%s/api/v1/activitypub/user-id/%d", strings.TrimSuffix(setting.AppURL, "/"), ctx.ContextUser.ID)
+ person := ap.PersonNew(ap.IRI(link))
+
+ person.Name = ap.NaturalLanguageValuesNew()
+ err := person.Name.Set("en", ap.Content(ctx.ContextUser.FullName))
+ if err != nil {
+ ctx.ServerError("Set Name", err)
+ return
+ }
+
+ person.PreferredUsername = ap.NaturalLanguageValuesNew()
+ err = person.PreferredUsername.Set("en", ap.Content(ctx.ContextUser.Name))
+ if err != nil {
+ ctx.ServerError("Set PreferredUsername", err)
+ return
+ }
+
+ person.URL = ap.IRI(ctx.ContextUser.HTMLURL())
+
+ person.Icon = ap.Image{
+ Type: ap.ImageType,
+ MediaType: "image/png",
+ URL: ap.IRI(ctx.ContextUser.AvatarLink(ctx)),
+ }
+
+ person.Inbox = ap.IRI(link + "/inbox")
+ person.Outbox = ap.IRI(link + "/outbox")
+
+ person.PublicKey.ID = ap.IRI(link + "#main-key")
+ person.PublicKey.Owner = ap.IRI(link)
+
+ publicKeyPem, err := activitypub.GetPublicKey(ctx, ctx.ContextUser)
+ if err != nil {
+ ctx.ServerError("GetPublicKey", err)
+ return
+ }
+ person.PublicKey.PublicKeyPem = publicKeyPem
+
+ binary, err := jsonld.WithContext(jsonld.IRI(ap.ActivityBaseURI), jsonld.IRI(ap.SecurityContextURI)).Marshal(person)
+ if err != nil {
+ ctx.ServerError("MarshalJSON", err)
+ return
+ }
+ ctx.Resp.Header().Add("Content-Type", activitypub.ActivityStreamsContentType)
+ ctx.Resp.WriteHeader(http.StatusOK)
+ if _, err = ctx.Resp.Write(binary); err != nil {
+ log.Error("write to resp err: %v", err)
+ }
+}
+
+// PersonInbox function handles the incoming data for a user inbox
+func PersonInbox(ctx *context.APIContext) {
+ // swagger:operation POST /activitypub/user-id/{user-id}/inbox activitypub activitypubPersonInbox
+ // ---
+ // summary: Send to the inbox
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: user-id
+ // in: path
+ // description: user ID of the user
+ // type: integer
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/activitypub/repository.go b/routers/api/v1/activitypub/repository.go
new file mode 100644
index 0000000..bc6e790
--- /dev/null
+++ b/routers/api/v1/activitypub/repository.go
@@ -0,0 +1,80 @@
+// Copyright 2023, 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activitypub
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "code.gitea.io/gitea/modules/forgefed"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/federation"
+
+ ap "github.com/go-ap/activitypub"
+)
+
+// Repository function returns the Repository actor for a repo
+func Repository(ctx *context.APIContext) {
+ // swagger:operation GET /activitypub/repository-id/{repository-id} activitypub activitypubRepository
+ // ---
+ // summary: Returns the Repository actor for a repo
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: repository-id
+ // in: path
+ // description: repository ID of the repo
+ // type: integer
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActivityPub"
+
+ link := fmt.Sprintf("%s/api/v1/activitypub/repository-id/%d", strings.TrimSuffix(setting.AppURL, "/"), ctx.Repo.Repository.ID)
+ repo := forgefed.RepositoryNew(ap.IRI(link))
+
+ repo.Name = ap.NaturalLanguageValuesNew()
+ err := repo.Name.Set("en", ap.Content(ctx.Repo.Repository.Name))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "Set Name", err)
+ return
+ }
+ response(ctx, repo)
+}
+
+// PersonInbox function handles the incoming data for a repository inbox
+func RepositoryInbox(ctx *context.APIContext) {
+ // swagger:operation POST /activitypub/repository-id/{repository-id}/inbox activitypub activitypubRepositoryInbox
+ // ---
+ // summary: Send to the inbox
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: repository-id
+ // in: path
+ // description: repository ID of the repo
+ // type: integer
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/ForgeLike"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+
+ repository := ctx.Repo.Repository
+ log.Info("RepositoryInbox: repo: %v", repository)
+
+ form := web.GetForm(ctx)
+ httpStatus, title, err := federation.ProcessLikeActivity(ctx, form, repository.ID)
+ if err != nil {
+ ctx.Error(httpStatus, title, err)
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/activitypub/repository_test.go b/routers/api/v1/activitypub/repository_test.go
new file mode 100644
index 0000000..acd588d
--- /dev/null
+++ b/routers/api/v1/activitypub/repository_test.go
@@ -0,0 +1,27 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activitypub
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/user"
+)
+
+func Test_UserEmailValidate(t *testing.T) {
+ sut := "ab@cd.ef"
+ if err := user.ValidateEmail(sut); err != nil {
+ t.Errorf("sut should be valid, %v, %v", sut, err)
+ }
+
+ sut = "83ce13c8-af0b-4112-8327-55a54e54e664@code.cartoon-aa.xyz"
+ if err := user.ValidateEmail(sut); err != nil {
+ t.Errorf("sut should be valid, %v, %v", sut, err)
+ }
+
+ sut = "1"
+ if err := user.ValidateEmail(sut); err == nil {
+ t.Errorf("sut should not be valid, %v", sut)
+ }
+}
diff --git a/routers/api/v1/activitypub/reqsignature.go b/routers/api/v1/activitypub/reqsignature.go
new file mode 100644
index 0000000..6003f66
--- /dev/null
+++ b/routers/api/v1/activitypub/reqsignature.go
@@ -0,0 +1,99 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activitypub
+
+import (
+ "crypto"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ "code.gitea.io/gitea/modules/activitypub"
+ "code.gitea.io/gitea/modules/httplib"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ gitea_context "code.gitea.io/gitea/services/context"
+
+ ap "github.com/go-ap/activitypub"
+ "github.com/go-fed/httpsig"
+)
+
+func getPublicKeyFromResponse(b []byte, keyID *url.URL) (p crypto.PublicKey, err error) {
+ person := ap.PersonNew(ap.IRI(keyID.String()))
+ err = person.UnmarshalJSON(b)
+ if err != nil {
+ return nil, fmt.Errorf("ActivityStreams type cannot be converted to one known to have publicKey property: %w", err)
+ }
+ pubKey := person.PublicKey
+ if pubKey.ID.String() != keyID.String() {
+ return nil, fmt.Errorf("cannot find publicKey with id: %s in %s", keyID, string(b))
+ }
+ pubKeyPem := pubKey.PublicKeyPem
+ block, _ := pem.Decode([]byte(pubKeyPem))
+ if block == nil || block.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("could not decode publicKeyPem to PUBLIC KEY pem block type")
+ }
+ p, err = x509.ParsePKIXPublicKey(block.Bytes)
+ return p, err
+}
+
+func fetch(iri *url.URL) (b []byte, err error) {
+ req := httplib.NewRequest(iri.String(), http.MethodGet)
+ req.Header("Accept", activitypub.ActivityStreamsContentType)
+ req.Header("User-Agent", "Gitea/"+setting.AppVer)
+ resp, err := req.Response()
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("url IRI fetch [%s] failed with status (%d): %s", iri, resp.StatusCode, resp.Status)
+ }
+ b, err = io.ReadAll(io.LimitReader(resp.Body, setting.Federation.MaxSize))
+ return b, err
+}
+
+func verifyHTTPSignatures(ctx *gitea_context.APIContext) (authenticated bool, err error) {
+ r := ctx.Req
+
+ // 1. Figure out what key we need to verify
+ v, err := httpsig.NewVerifier(r)
+ if err != nil {
+ return false, err
+ }
+ ID := v.KeyId()
+ idIRI, err := url.Parse(ID)
+ if err != nil {
+ return false, err
+ }
+ // 2. Fetch the public key of the other actor
+ b, err := fetch(idIRI)
+ if err != nil {
+ return false, err
+ }
+ pubKey, err := getPublicKeyFromResponse(b, idIRI)
+ if err != nil {
+ return false, err
+ }
+ // 3. Verify the other actor's key
+ algo := httpsig.Algorithm(setting.Federation.Algorithms[0])
+ authenticated = v.Verify(pubKey, algo) == nil
+ return authenticated, err
+}
+
+// ReqHTTPSignature function
+func ReqHTTPSignature() func(ctx *gitea_context.APIContext) {
+ return func(ctx *gitea_context.APIContext) {
+ if authenticated, err := verifyHTTPSignatures(ctx); err != nil {
+ log.Warn("verifyHttpSignatures failed: %v", err)
+ ctx.Error(http.StatusBadRequest, "reqSignature", "request signature verification failed")
+ } else if !authenticated {
+ ctx.Error(http.StatusForbidden, "reqSignature", "request signature verification failed")
+ }
+ }
+}
diff --git a/routers/api/v1/activitypub/response.go b/routers/api/v1/activitypub/response.go
new file mode 100644
index 0000000..42ef375
--- /dev/null
+++ b/routers/api/v1/activitypub/response.go
@@ -0,0 +1,35 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package activitypub
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/activitypub"
+ "code.gitea.io/gitea/modules/forgefed"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/services/context"
+
+ ap "github.com/go-ap/activitypub"
+ "github.com/go-ap/jsonld"
+)
+
+// Respond with an ActivityStreams object
+func response(ctx *context.APIContext, v any) {
+ binary, err := jsonld.WithContext(
+ jsonld.IRI(ap.ActivityBaseURI),
+ jsonld.IRI(ap.SecurityContextURI),
+ jsonld.IRI(forgefed.ForgeFedNamespaceURI),
+ ).Marshal(v)
+ if err != nil {
+ ctx.ServerError("Marshal", err)
+ return
+ }
+
+ ctx.Resp.Header().Add("Content-Type", activitypub.ActivityStreamsContentType)
+ ctx.Resp.WriteHeader(http.StatusOK)
+ if _, err = ctx.Resp.Write(binary); err != nil {
+ log.Error("write to resp err: %v", err)
+ }
+}
diff --git a/routers/api/v1/admin/adopt.go b/routers/api/v1/admin/adopt.go
new file mode 100644
index 0000000..a4708fe
--- /dev/null
+++ b/routers/api/v1/admin/adopt.go
@@ -0,0 +1,180 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "net/http"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// ListUnadoptedRepositories lists the unadopted repositories that match the provided names
+func ListUnadoptedRepositories(ctx *context.APIContext) {
+ // swagger:operation GET /admin/unadopted admin adminUnadoptedList
+ // ---
+ // summary: List unadopted repositories
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // - name: pattern
+ // in: query
+ // description: pattern of repositories to search for
+ // type: string
+ // responses:
+ // "200":
+ // "$ref": "#/responses/StringSlice"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listOptions := utils.GetListOptions(ctx)
+ if listOptions.Page == 0 {
+ listOptions.Page = 1
+ }
+ repoNames, count, err := repo_service.ListUnadoptedRepositories(ctx, ctx.FormString("query"), &listOptions)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(int64(count))
+
+ ctx.JSON(http.StatusOK, repoNames)
+}
+
+// AdoptRepository will adopt an unadopted repository
+func AdoptRepository(ctx *context.APIContext) {
+ // swagger:operation POST /admin/unadopted/{owner}/{repo} admin adminAdoptRepository
+ // ---
+ // summary: Adopt unadopted files as a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ ownerName := ctx.Params(":username")
+ repoName := ctx.Params(":reponame")
+
+ ctxUser, err := user_model.GetUserByName(ctx, ownerName)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.InternalServerError(err)
+ return
+ }
+
+ // check not a repo
+ has, err := repo_model.IsRepositoryModelExist(ctx, ctxUser, repoName)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ isDir, err := util.IsDir(repo_model.RepoPath(ctxUser.Name, repoName))
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if has || !isDir {
+ ctx.NotFound()
+ return
+ }
+ if _, err := repo_service.AdoptRepository(ctx, ctx.Doer, ctxUser, repo_service.CreateRepoOptions{
+ Name: repoName,
+ IsPrivate: true,
+ }); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteUnadoptedRepository will delete an unadopted repository
+func DeleteUnadoptedRepository(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/unadopted/{owner}/{repo} admin adminDeleteUnadoptedRepository
+ // ---
+ // summary: Delete unadopted files
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ ownerName := ctx.Params(":username")
+ repoName := ctx.Params(":reponame")
+
+ ctxUser, err := user_model.GetUserByName(ctx, ownerName)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.InternalServerError(err)
+ return
+ }
+
+ // check not a repo
+ has, err := repo_model.IsRepositoryModelExist(ctx, ctxUser, repoName)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ isDir, err := util.IsDir(repo_model.RepoPath(ctxUser.Name, repoName))
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if has || !isDir {
+ ctx.NotFound()
+ return
+ }
+
+ if err := repo_service.DeleteUnadoptedRepository(ctx, ctx.Doer, ctxUser, repoName); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/admin/cron.go b/routers/api/v1/admin/cron.go
new file mode 100644
index 0000000..e1ca604
--- /dev/null
+++ b/routers/api/v1/admin/cron.go
@@ -0,0 +1,86 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/cron"
+)
+
+// ListCronTasks api for getting cron tasks
+func ListCronTasks(ctx *context.APIContext) {
+ // swagger:operation GET /admin/cron admin adminCronList
+ // ---
+ // summary: List cron tasks
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CronList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ tasks := cron.ListTasks()
+ count := len(tasks)
+
+ listOpts := utils.GetListOptions(ctx)
+ tasks = util.PaginateSlice(tasks, listOpts.Page, listOpts.PageSize).(cron.TaskTable)
+
+ res := make([]structs.Cron, len(tasks))
+ for i, task := range tasks {
+ res[i] = structs.Cron{
+ Name: task.Name,
+ Schedule: task.Spec,
+ Next: task.Next,
+ Prev: task.Prev,
+ ExecTimes: task.ExecTimes,
+ }
+ }
+
+ ctx.SetTotalCountHeader(int64(count))
+ ctx.JSON(http.StatusOK, res)
+}
+
+// PostCronTask api for getting cron tasks
+func PostCronTask(ctx *context.APIContext) {
+ // swagger:operation POST /admin/cron/{task} admin adminCronRun
+ // ---
+ // summary: Run cron task
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: task
+ // in: path
+ // description: task to run
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ task := cron.GetTask(ctx.Params(":task"))
+ if task == nil {
+ ctx.NotFound()
+ return
+ }
+ task.Run()
+ log.Trace("Cron Task %s started by admin(%s)", task.Name, ctx.Doer.Name)
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/admin/email.go b/routers/api/v1/admin/email.go
new file mode 100644
index 0000000..ba963e9
--- /dev/null
+++ b/routers/api/v1/admin/email.go
@@ -0,0 +1,87 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// GetAllEmails
+func GetAllEmails(ctx *context.APIContext) {
+ // swagger:operation GET /admin/emails admin adminGetAllEmails
+ // ---
+ // summary: List all emails
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/EmailList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ emails, maxResults, err := user_model.SearchEmails(ctx, &user_model.SearchEmailOptions{
+ Keyword: ctx.Params(":email"),
+ ListOptions: listOptions,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetAllEmails", err)
+ return
+ }
+
+ results := make([]*api.Email, len(emails))
+ for i := range emails {
+ results[i] = convert.ToEmailSearch(emails[i])
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+ ctx.JSON(http.StatusOK, &results)
+}
+
+// SearchEmail
+func SearchEmail(ctx *context.APIContext) {
+ // swagger:operation GET /admin/emails/search admin adminSearchEmails
+ // ---
+ // summary: Search all emails
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: q
+ // in: query
+ // description: keyword
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/EmailList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ ctx.SetParams(":email", ctx.FormTrim("q"))
+ GetAllEmails(ctx)
+}
diff --git a/routers/api/v1/admin/hooks.go b/routers/api/v1/admin/hooks.go
new file mode 100644
index 0000000..b246cb6
--- /dev/null
+++ b/routers/api/v1/admin/hooks.go
@@ -0,0 +1,176 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "errors"
+ "net/http"
+
+ "code.gitea.io/gitea/models/webhook"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ webhook_service "code.gitea.io/gitea/services/webhook"
+)
+
+// ListHooks list system's webhooks
+func ListHooks(ctx *context.APIContext) {
+ // swagger:operation GET /admin/hooks admin adminListHooks
+ // ---
+ // summary: List system's webhooks
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/HookList"
+
+ sysHooks, err := webhook.GetSystemWebhooks(ctx, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetSystemWebhooks", err)
+ return
+ }
+ hooks := make([]*api.Hook, len(sysHooks))
+ for i, hook := range sysHooks {
+ h, err := webhook_service.ToHook(setting.AppURL+"/admin", hook)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToHook", err)
+ return
+ }
+ hooks[i] = h
+ }
+ ctx.JSON(http.StatusOK, hooks)
+}
+
+// GetHook get an organization's hook by id
+func GetHook(ctx *context.APIContext) {
+ // swagger:operation GET /admin/hooks/{id} admin adminGetHook
+ // ---
+ // summary: Get a hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the hook to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+
+ hookID := ctx.ParamsInt64(":id")
+ hook, err := webhook.GetSystemOrDefaultWebhook(ctx, hookID)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetSystemOrDefaultWebhook", err)
+ }
+ return
+ }
+ h, err := webhook_service.ToHook("/admin/", hook)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToHook", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, h)
+}
+
+// CreateHook create a hook for an organization
+func CreateHook(ctx *context.APIContext) {
+ // swagger:operation POST /admin/hooks admin adminCreateHook
+ // ---
+ // summary: Create a hook
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreateHookOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Hook"
+
+ form := web.GetForm(ctx).(*api.CreateHookOption)
+
+ utils.AddSystemHook(ctx, form)
+}
+
+// EditHook modify a hook of a repository
+func EditHook(ctx *context.APIContext) {
+ // swagger:operation PATCH /admin/hooks/{id} admin adminEditHook
+ // ---
+ // summary: Update a hook
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the hook to update
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditHookOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+
+ form := web.GetForm(ctx).(*api.EditHookOption)
+
+ // TODO in body params
+ hookID := ctx.ParamsInt64(":id")
+ utils.EditSystemHook(ctx, form, hookID)
+}
+
+// DeleteHook delete a system hook
+func DeleteHook(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/hooks/{id} admin adminDeleteHook
+ // ---
+ // summary: Delete a hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the hook to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+
+ hookID := ctx.ParamsInt64(":id")
+ if err := webhook.DeleteDefaultSystemWebhook(ctx, hookID); err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteDefaultSystemWebhook", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/admin/org.go b/routers/api/v1/admin/org.go
new file mode 100644
index 0000000..a5c299b
--- /dev/null
+++ b/routers/api/v1/admin/org.go
@@ -0,0 +1,123 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// CreateOrg api for create organization
+func CreateOrg(ctx *context.APIContext) {
+ // swagger:operation POST /admin/users/{username}/orgs admin adminCreateOrg
+ // ---
+ // summary: Create an organization
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of the user that will own the created organization
+ // type: string
+ // required: true
+ // - name: organization
+ // in: body
+ // required: true
+ // schema: { "$ref": "#/definitions/CreateOrgOption" }
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Organization"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateOrgOption)
+
+ visibility := api.VisibleTypePublic
+ if form.Visibility != "" {
+ visibility = api.VisibilityModes[form.Visibility]
+ }
+
+ org := &organization.Organization{
+ Name: form.UserName,
+ FullName: form.FullName,
+ Description: form.Description,
+ Website: form.Website,
+ Location: form.Location,
+ IsActive: true,
+ Type: user_model.UserTypeOrganization,
+ Visibility: visibility,
+ }
+
+ if err := organization.CreateOrganization(ctx, org, ctx.ContextUser); err != nil {
+ if user_model.IsErrUserAlreadyExist(err) ||
+ db.IsErrNameReserved(err) ||
+ db.IsErrNameCharsNotAllowed(err) ||
+ db.IsErrNamePatternNotAllowed(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateOrganization", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToOrganization(ctx, org))
+}
+
+// GetAllOrgs API for getting information of all the organizations
+func GetAllOrgs(ctx *context.APIContext) {
+ // swagger:operation GET /admin/orgs admin adminGetAllOrgs
+ // ---
+ // summary: List all organizations
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OrganizationList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ users, maxResults, err := user_model.SearchUsers(ctx, &user_model.SearchUserOptions{
+ Actor: ctx.Doer,
+ Type: user_model.UserTypeOrganization,
+ OrderBy: db.SearchOrderByAlphabetically,
+ ListOptions: listOptions,
+ Visible: []api.VisibleType{api.VisibleTypePublic, api.VisibleTypeLimited, api.VisibleTypePrivate},
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchOrganizations", err)
+ return
+ }
+ orgs := make([]*api.Organization, len(users))
+ for i := range users {
+ orgs[i] = convert.ToOrganization(ctx, organization.OrgFromUser(users[i]))
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+ ctx.JSON(http.StatusOK, &orgs)
+}
diff --git a/routers/api/v1/admin/quota.go b/routers/api/v1/admin/quota.go
new file mode 100644
index 0000000..1e7c11e
--- /dev/null
+++ b/routers/api/v1/admin/quota.go
@@ -0,0 +1,53 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "net/http"
+
+ quota_model "code.gitea.io/gitea/models/quota"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// GetUserQuota return information about a user's quota
+func GetUserQuota(ctx *context.APIContext) {
+ // swagger:operation GET /admin/users/{username}/quota admin adminGetUserQuota
+ // ---
+ // summary: Get the user's quota info
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user to query
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaInfo"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ used, err := quota_model.GetUsedForUser(ctx, ctx.ContextUser.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.GetUsedForUser", err)
+ return
+ }
+
+ groups, err := quota_model.GetGroupsForUser(ctx, ctx.ContextUser.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.GetGroupsForUser", err)
+ return
+ }
+
+ result := convert.ToQuotaInfo(used, groups, true)
+ ctx.JSON(http.StatusOK, &result)
+}
diff --git a/routers/api/v1/admin/quota_group.go b/routers/api/v1/admin/quota_group.go
new file mode 100644
index 0000000..e20b361
--- /dev/null
+++ b/routers/api/v1/admin/quota_group.go
@@ -0,0 +1,436 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ go_context "context"
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ quota_model "code.gitea.io/gitea/models/quota"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListQuotaGroups returns all the quota groups
+func ListQuotaGroups(ctx *context.APIContext) {
+ // swagger:operation GET /admin/quota/groups admin adminListQuotaGroups
+ // ---
+ // summary: List the available quota groups
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaGroupList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ groups, err := quota_model.ListGroups(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.ListGroups", err)
+ return
+ }
+ for _, group := range groups {
+ if err = group.LoadRules(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.group.LoadRules", err)
+ return
+ }
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToQuotaGroupList(groups, true))
+}
+
+func createQuotaGroupWithRules(ctx go_context.Context, opts *api.CreateQuotaGroupOptions) (*quota_model.Group, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ group, err := quota_model.CreateGroup(ctx, opts.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, rule := range opts.Rules {
+ exists, err := quota_model.DoesRuleExist(ctx, rule.Name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ var limit int64
+ if rule.Limit != nil {
+ limit = *rule.Limit
+ }
+
+ subjects, err := toLimitSubjects(rule.Subjects)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = quota_model.CreateRule(ctx, rule.Name, limit, *subjects)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err = group.AddRuleByName(ctx, rule.Name); err != nil {
+ return nil, err
+ }
+ }
+
+ if err = group.LoadRules(ctx); err != nil {
+ return nil, err
+ }
+
+ return group, committer.Commit()
+}
+
+// CreateQuotaGroup creates a new quota group
+func CreateQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation POST /admin/quota/groups admin adminCreateQuotaGroup
+ // ---
+ // summary: Create a new quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: group
+ // in: body
+ // description: Definition of the quota group
+ // schema:
+ // "$ref": "#/definitions/CreateQuotaGroupOptions"
+ // required: true
+ // responses:
+ // "201":
+ // "$ref": "#/responses/QuotaGroup"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateQuotaGroupOptions)
+
+ group, err := createQuotaGroupWithRules(ctx, form)
+ if err != nil {
+ if quota_model.IsErrGroupAlreadyExists(err) {
+ ctx.Error(http.StatusConflict, "", err)
+ } else if quota_model.IsErrParseLimitSubjectUnrecognized(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "quota_model.CreateGroup", err)
+ }
+ return
+ }
+ ctx.JSON(http.StatusCreated, convert.ToQuotaGroup(*group, true))
+}
+
+// ListUsersInQuotaGroup lists all the users in a quota group
+func ListUsersInQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation GET /admin/quota/groups/{quotagroup}/users admin adminListUsersInQuotaGroup
+ // ---
+ // summary: List users in a quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotagroup
+ // in: path
+ // description: quota group to list members of
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ users, err := quota_model.ListUsersInGroup(ctx, ctx.QuotaGroup.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.ListUsersInGroup", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToUsers(ctx, ctx.Doer, users))
+}
+
+// AddUserToQuotaGroup adds a user to a quota group
+func AddUserToQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation PUT /admin/quota/groups/{quotagroup}/users/{username} admin adminAddUserToQuotaGroup
+ // ---
+ // summary: Add a user to a quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotagroup
+ // in: path
+ // description: quota group to add the user to
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user to add to the quota group
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ err := ctx.QuotaGroup.AddUserByID(ctx, ctx.ContextUser.ID)
+ if err != nil {
+ if quota_model.IsErrUserAlreadyInGroup(err) {
+ ctx.Error(http.StatusConflict, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "quota_group.group.AddUserByID", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// RemoveUserFromQuotaGroup removes a user from a quota group
+func RemoveUserFromQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/quota/groups/{quotagroup}/users/{username} admin adminRemoveUserFromQuotaGroup
+ // ---
+ // summary: Remove a user from a quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotagroup
+ // in: path
+ // description: quota group to remove a user from
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user to remove from the quota group
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := ctx.QuotaGroup.RemoveUserByID(ctx, ctx.ContextUser.ID)
+ if err != nil {
+ if quota_model.IsErrUserNotInGroup(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "quota_model.group.RemoveUserByID", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// SetUserQuotaGroups moves the user to specific quota groups
+func SetUserQuotaGroups(ctx *context.APIContext) {
+ // swagger:operation POST /admin/users/{username}/quota/groups admin adminSetUserQuotaGroups
+ // ---
+ // summary: Set the user's quota groups to a given list.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of the user to modify the quota groups from
+ // type: string
+ // required: true
+ // - name: groups
+ // in: body
+ // description: list of groups that the user should be a member of
+ // schema:
+ // "$ref": "#/definitions/SetUserQuotaGroupsOptions"
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.SetUserQuotaGroupsOptions)
+
+ err := quota_model.SetUserGroups(ctx, ctx.ContextUser.ID, form.Groups)
+ if err != nil {
+ if quota_model.IsErrGroupNotFound(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "quota_model.SetUserGroups", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteQuotaGroup deletes a quota group
+func DeleteQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/quota/groups/{quotagroup} admin adminDeleteQuotaGroup
+ // ---
+ // summary: Delete a quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotagroup
+ // in: path
+ // description: quota group to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := quota_model.DeleteGroupByName(ctx, ctx.QuotaGroup.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.DeleteGroupByName", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetQuotaGroup returns information about a quota group
+func GetQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation GET /admin/quota/groups/{quotagroup} admin adminGetQuotaGroup
+ // ---
+ // summary: Get information about the quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotagroup
+ // in: path
+ // description: quota group to query
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaGroup"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ ctx.JSON(http.StatusOK, convert.ToQuotaGroup(*ctx.QuotaGroup, true))
+}
+
+// AddRuleToQuotaGroup adds a rule to a quota group
+func AddRuleToQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation PUT /admin/quota/groups/{quotagroup}/rules/{quotarule} admin adminAddRuleToQuotaGroup
+ // ---
+ // summary: Adds a rule to a quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotagroup
+ // in: path
+ // description: quota group to add a rule to
+ // type: string
+ // required: true
+ // - name: quotarule
+ // in: path
+ // description: the name of the quota rule to add to the group
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ err := ctx.QuotaGroup.AddRuleByName(ctx, ctx.QuotaRule.Name)
+ if err != nil {
+ if quota_model.IsErrRuleAlreadyInGroup(err) {
+ ctx.Error(http.StatusConflict, "", err)
+ } else if quota_model.IsErrRuleNotFound(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "quota_model.group.AddRuleByName", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// RemoveRuleFromQuotaGroup removes a rule from a quota group
+func RemoveRuleFromQuotaGroup(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/quota/groups/{quotagroup}/rules/{quotarule} admin adminRemoveRuleFromQuotaGroup
+ // ---
+ // summary: Removes a rule from a quota group
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotagroup
+ // in: path
+ // description: quota group to remove a rule from
+ // type: string
+ // required: true
+ // - name: quotarule
+ // in: path
+ // description: the name of the quota rule to remove from the group
+ // type: string
+ // required: true
+ // responses:
+ // "201":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := ctx.QuotaGroup.RemoveRuleByName(ctx, ctx.QuotaRule.Name)
+ if err != nil {
+ if quota_model.IsErrRuleNotInGroup(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "quota_model.group.RemoveRuleByName", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/admin/quota_rule.go b/routers/api/v1/admin/quota_rule.go
new file mode 100644
index 0000000..85c05e1
--- /dev/null
+++ b/routers/api/v1/admin/quota_rule.go
@@ -0,0 +1,219 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "fmt"
+ "net/http"
+
+ quota_model "code.gitea.io/gitea/models/quota"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+func toLimitSubjects(subjStrings []string) (*quota_model.LimitSubjects, error) {
+ subjects := make(quota_model.LimitSubjects, len(subjStrings))
+ for i := range len(subjStrings) {
+ subj, err := quota_model.ParseLimitSubject(subjStrings[i])
+ if err != nil {
+ return nil, err
+ }
+ subjects[i] = subj
+ }
+
+ return &subjects, nil
+}
+
+// ListQuotaRules lists all the quota rules
+func ListQuotaRules(ctx *context.APIContext) {
+ // swagger:operation GET /admin/quota/rules admin adminListQuotaRules
+ // ---
+ // summary: List the available quota rules
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaRuleInfoList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ rules, err := quota_model.ListRules(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.ListQuotaRules", err)
+ return
+ }
+
+ result := make([]api.QuotaRuleInfo, len(rules))
+ for i := range len(rules) {
+ result[i] = convert.ToQuotaRuleInfo(rules[i], true)
+ }
+
+ ctx.JSON(http.StatusOK, result)
+}
+
+// CreateQuotaRule creates a new quota rule
+func CreateQuotaRule(ctx *context.APIContext) {
+ // swagger:operation POST /admin/quota/rules admin adminCreateQuotaRule
+ // ---
+ // summary: Create a new quota rule
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: rule
+ // in: body
+ // description: Definition of the quota rule
+ // schema:
+ // "$ref": "#/definitions/CreateQuotaRuleOptions"
+ // required: true
+ // responses:
+ // "201":
+ // "$ref": "#/responses/QuotaRuleInfo"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateQuotaRuleOptions)
+
+ if form.Limit == nil {
+ ctx.Error(http.StatusUnprocessableEntity, "quota_model.ParseLimitSubject", fmt.Errorf("[Limit]: Required"))
+ return
+ }
+
+ subjects, err := toLimitSubjects(form.Subjects)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "quota_model.ParseLimitSubject", err)
+ return
+ }
+
+ rule, err := quota_model.CreateRule(ctx, form.Name, *form.Limit, *subjects)
+ if err != nil {
+ if quota_model.IsErrRuleAlreadyExists(err) {
+ ctx.Error(http.StatusConflict, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "quota_model.CreateRule", err)
+ }
+ return
+ }
+ ctx.JSON(http.StatusCreated, convert.ToQuotaRuleInfo(*rule, true))
+}
+
+// GetQuotaRule returns information about the specified quota rule
+func GetQuotaRule(ctx *context.APIContext) {
+ // swagger:operation GET /admin/quota/rules/{quotarule} admin adminGetQuotaRule
+ // ---
+ // summary: Get information about a quota rule
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotarule
+ // in: path
+ // description: quota rule to query
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaRuleInfo"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ ctx.JSON(http.StatusOK, convert.ToQuotaRuleInfo(*ctx.QuotaRule, true))
+}
+
+// EditQuotaRule changes an existing quota rule
+func EditQuotaRule(ctx *context.APIContext) {
+ // swagger:operation PATCH /admin/quota/rules/{quotarule} admin adminEditQuotaRule
+ // ---
+ // summary: Change an existing quota rule
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotarule
+ // in: path
+ // description: Quota rule to change
+ // type: string
+ // required: true
+ // - name: rule
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditQuotaRuleOptions"
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaRuleInfo"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.EditQuotaRuleOptions)
+
+ var subjects *quota_model.LimitSubjects
+ if form.Subjects != nil {
+ subjs := make(quota_model.LimitSubjects, len(*form.Subjects))
+ for i := range len(*form.Subjects) {
+ subj, err := quota_model.ParseLimitSubject((*form.Subjects)[i])
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "quota_model.ParseLimitSubject", err)
+ return
+ }
+ subjs[i] = subj
+ }
+ subjects = &subjs
+ }
+
+ rule, err := ctx.QuotaRule.Edit(ctx, form.Limit, subjects)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.rule.Edit", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToQuotaRuleInfo(*rule, true))
+}
+
+// DeleteQuotaRule deletes a quota rule
+func DeleteQuotaRule(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/quota/rules/{quotarule} admin adminDEleteQuotaRule
+ // ---
+ // summary: Deletes a quota rule
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: quotarule
+ // in: path
+ // description: quota rule to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := quota_model.DeleteRuleByName(ctx, ctx.QuotaRule.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.DeleteRuleByName", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/admin/repo.go b/routers/api/v1/admin/repo.go
new file mode 100644
index 0000000..c119d53
--- /dev/null
+++ b/routers/api/v1/admin/repo.go
@@ -0,0 +1,49 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/repo"
+ "code.gitea.io/gitea/services/context"
+)
+
+// CreateRepo api for creating a repository
+func CreateRepo(ctx *context.APIContext) {
+ // swagger:operation POST /admin/users/{username}/repos admin adminCreateRepo
+ // ---
+ // summary: Create a repository on behalf of a user
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of the user. This user will own the created repository
+ // type: string
+ // required: true
+ // - name: repository
+ // in: body
+ // required: true
+ // schema: { "$ref": "#/definitions/CreateRepoOption" }
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Repository"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateRepoOption)
+
+ repo.CreateUserRepo(ctx, ctx.ContextUser, *form)
+}
diff --git a/routers/api/v1/admin/runners.go b/routers/api/v1/admin/runners.go
new file mode 100644
index 0000000..329242d
--- /dev/null
+++ b/routers/api/v1/admin/runners.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "code.gitea.io/gitea/routers/api/v1/shared"
+ "code.gitea.io/gitea/services/context"
+)
+
+// https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-a-registration-token-for-an-organization
+
+// GetRegistrationToken returns the token to register global runners
+func GetRegistrationToken(ctx *context.APIContext) {
+ // swagger:operation GET /admin/runners/registration-token admin adminGetRunnerRegistrationToken
+ // ---
+ // summary: Get an global actions runner registration token
+ // produces:
+ // - application/json
+ // parameters:
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RegistrationToken"
+
+ shared.GetRegistrationToken(ctx, 0, 0)
+}
diff --git a/routers/api/v1/admin/user.go b/routers/api/v1/admin/user.go
new file mode 100644
index 0000000..9ea210e
--- /dev/null
+++ b/routers/api/v1/admin/user.go
@@ -0,0 +1,509 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package admin
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/models"
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/auth/password"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/user"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ asymkey_service "code.gitea.io/gitea/services/asymkey"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ "code.gitea.io/gitea/services/mailer"
+ user_service "code.gitea.io/gitea/services/user"
+)
+
+func parseAuthSource(ctx *context.APIContext, u *user_model.User, sourceID int64) {
+ if sourceID == 0 {
+ return
+ }
+
+ source, err := auth.GetSourceByID(ctx, sourceID)
+ if err != nil {
+ if auth.IsErrSourceNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "auth.GetSourceByID", err)
+ }
+ return
+ }
+
+ u.LoginType = source.Type
+ u.LoginSource = source.ID
+}
+
+// CreateUser create a user
+func CreateUser(ctx *context.APIContext) {
+ // swagger:operation POST /admin/users admin adminCreateUser
+ // ---
+ // summary: Create a user
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateUserOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/User"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateUserOption)
+
+ u := &user_model.User{
+ Name: form.Username,
+ FullName: form.FullName,
+ Email: form.Email,
+ Passwd: form.Password,
+ MustChangePassword: true,
+ LoginType: auth.Plain,
+ LoginName: form.LoginName,
+ }
+ if form.MustChangePassword != nil {
+ u.MustChangePassword = *form.MustChangePassword
+ }
+
+ parseAuthSource(ctx, u, form.SourceID)
+ if ctx.Written() {
+ return
+ }
+
+ if u.LoginType == auth.Plain {
+ if len(form.Password) < setting.MinPasswordLength {
+ err := errors.New("PasswordIsRequired")
+ ctx.Error(http.StatusBadRequest, "PasswordIsRequired", err)
+ return
+ }
+
+ if !password.IsComplexEnough(form.Password) {
+ err := errors.New("PasswordComplexity")
+ ctx.Error(http.StatusBadRequest, "PasswordComplexity", err)
+ return
+ }
+
+ if err := password.IsPwned(ctx, form.Password); err != nil {
+ if password.IsErrIsPwnedRequest(err) {
+ log.Error(err.Error())
+ }
+ ctx.Error(http.StatusBadRequest, "PasswordPwned", errors.New("PasswordPwned"))
+ return
+ }
+ }
+
+ overwriteDefault := &user_model.CreateUserOverwriteOptions{
+ IsActive: optional.Some(true),
+ IsRestricted: optional.FromPtr(form.Restricted),
+ }
+
+ if form.Visibility != "" {
+ visibility := api.VisibilityModes[form.Visibility]
+ overwriteDefault.Visibility = &visibility
+ }
+
+ // Update the user creation timestamp. This can only be done after the user
+ // record has been inserted into the database; the insert intself will always
+ // set the creation timestamp to "now".
+ if form.Created != nil {
+ u.CreatedUnix = timeutil.TimeStamp(form.Created.Unix())
+ u.UpdatedUnix = u.CreatedUnix
+ }
+
+ if err := user_model.AdminCreateUser(ctx, u, overwriteDefault); err != nil {
+ if user_model.IsErrUserAlreadyExist(err) ||
+ user_model.IsErrEmailAlreadyUsed(err) ||
+ db.IsErrNameReserved(err) ||
+ db.IsErrNameCharsNotAllowed(err) ||
+ user_model.IsErrEmailCharIsNotSupported(err) ||
+ user_model.IsErrEmailInvalid(err) ||
+ db.IsErrNamePatternNotAllowed(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateUser", err)
+ }
+ return
+ }
+
+ if !user_model.IsEmailDomainAllowed(u.Email) {
+ ctx.Resp.Header().Add("X-Gitea-Warning", fmt.Sprintf("the domain of user email %s conflicts with EMAIL_DOMAIN_ALLOWLIST or EMAIL_DOMAIN_BLOCKLIST", u.Email))
+ }
+
+ log.Trace("Account created by admin (%s): %s", ctx.Doer.Name, u.Name)
+
+ // Send email notification.
+ if form.SendNotify {
+ mailer.SendRegisterNotifyMail(u)
+ }
+ ctx.JSON(http.StatusCreated, convert.ToUser(ctx, u, ctx.Doer))
+}
+
+// EditUser api for modifying a user's information
+func EditUser(ctx *context.APIContext) {
+ // swagger:operation PATCH /admin/users/{username} admin adminEditUser
+ // ---
+ // summary: Edit an existing user
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user to edit
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditUserOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/User"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.EditUserOption)
+
+ // If either LoginSource or LoginName is given, the other must be present too.
+ if form.SourceID != nil || form.LoginName != nil {
+ if form.SourceID == nil || form.LoginName == nil {
+ ctx.Error(http.StatusUnprocessableEntity, "LoginSourceAndLoginName", fmt.Errorf("source_id and login_name must be specified together"))
+ return
+ }
+ }
+
+ authOpts := &user_service.UpdateAuthOptions{
+ LoginSource: optional.FromPtr(form.SourceID),
+ LoginName: optional.FromPtr(form.LoginName),
+ Password: optional.FromNonDefault(form.Password),
+ MustChangePassword: optional.FromPtr(form.MustChangePassword),
+ ProhibitLogin: optional.FromPtr(form.ProhibitLogin),
+ }
+ if err := user_service.UpdateAuth(ctx, ctx.ContextUser, authOpts); err != nil {
+ switch {
+ case errors.Is(err, password.ErrMinLength):
+ ctx.Error(http.StatusBadRequest, "PasswordTooShort", fmt.Errorf("password must be at least %d characters", setting.MinPasswordLength))
+ case errors.Is(err, password.ErrComplexity):
+ ctx.Error(http.StatusBadRequest, "PasswordComplexity", err)
+ case errors.Is(err, password.ErrIsPwned), password.IsErrIsPwnedRequest(err):
+ ctx.Error(http.StatusBadRequest, "PasswordIsPwned", err)
+ default:
+ ctx.Error(http.StatusInternalServerError, "UpdateAuth", err)
+ }
+ return
+ }
+
+ if form.Email != nil {
+ if err := user_service.AdminAddOrSetPrimaryEmailAddress(ctx, ctx.ContextUser, *form.Email); err != nil {
+ switch {
+ case user_model.IsErrEmailCharIsNotSupported(err), user_model.IsErrEmailInvalid(err):
+ ctx.Error(http.StatusBadRequest, "EmailInvalid", err)
+ case user_model.IsErrEmailAlreadyUsed(err):
+ ctx.Error(http.StatusBadRequest, "EmailUsed", err)
+ default:
+ ctx.Error(http.StatusInternalServerError, "AddOrSetPrimaryEmailAddress", err)
+ }
+ return
+ }
+
+ if !user_model.IsEmailDomainAllowed(*form.Email) {
+ ctx.Resp.Header().Add("X-Gitea-Warning", fmt.Sprintf("the domain of user email %s conflicts with EMAIL_DOMAIN_ALLOWLIST or EMAIL_DOMAIN_BLOCKLIST", *form.Email))
+ }
+ }
+
+ opts := &user_service.UpdateOptions{
+ FullName: optional.FromPtr(form.FullName),
+ Website: optional.FromPtr(form.Website),
+ Location: optional.FromPtr(form.Location),
+ Description: optional.FromPtr(form.Description),
+ Pronouns: optional.FromPtr(form.Pronouns),
+ IsActive: optional.FromPtr(form.Active),
+ IsAdmin: optional.FromPtr(form.Admin),
+ Visibility: optional.FromNonDefault(api.VisibilityModes[form.Visibility]),
+ AllowGitHook: optional.FromPtr(form.AllowGitHook),
+ AllowImportLocal: optional.FromPtr(form.AllowImportLocal),
+ MaxRepoCreation: optional.FromPtr(form.MaxRepoCreation),
+ AllowCreateOrganization: optional.FromPtr(form.AllowCreateOrganization),
+ IsRestricted: optional.FromPtr(form.Restricted),
+ }
+
+ if err := user_service.UpdateUser(ctx, ctx.ContextUser, opts); err != nil {
+ if models.IsErrDeleteLastAdminUser(err) {
+ ctx.Error(http.StatusBadRequest, "LastAdmin", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UpdateUser", err)
+ }
+ return
+ }
+
+ log.Trace("Account profile updated by admin (%s): %s", ctx.Doer.Name, ctx.ContextUser.Name)
+
+ ctx.JSON(http.StatusOK, convert.ToUser(ctx, ctx.ContextUser, ctx.Doer))
+}
+
+// DeleteUser api for deleting a user
+func DeleteUser(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/users/{username} admin adminDeleteUser
+ // ---
+ // summary: Delete a user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user to delete
+ // type: string
+ // required: true
+ // - name: purge
+ // in: query
+ // description: purge the user from the system completely
+ // type: boolean
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ if ctx.ContextUser.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("%s is an organization not a user", ctx.ContextUser.Name))
+ return
+ }
+
+ // admin should not delete themself
+ if ctx.ContextUser.ID == ctx.Doer.ID {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("you cannot delete yourself"))
+ return
+ }
+
+ if err := user_service.DeleteUser(ctx, ctx.ContextUser, ctx.FormBool("purge")); err != nil {
+ if models.IsErrUserOwnRepos(err) ||
+ models.IsErrUserHasOrgs(err) ||
+ models.IsErrUserOwnPackages(err) ||
+ models.IsErrDeleteLastAdminUser(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteUser", err)
+ }
+ return
+ }
+ log.Trace("Account deleted by admin(%s): %s", ctx.Doer.Name, ctx.ContextUser.Name)
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreatePublicKey api for creating a public key to a user
+func CreatePublicKey(ctx *context.APIContext) {
+ // swagger:operation POST /admin/users/{username}/keys admin adminCreatePublicKey
+ // ---
+ // summary: Add a public key on behalf of a user
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // - name: key
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateKeyOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/PublicKey"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateKeyOption)
+
+ user.CreateUserPublicKey(ctx, *form, ctx.ContextUser.ID)
+}
+
+// DeleteUserPublicKey api for deleting a user's public key
+func DeleteUserPublicKey(ctx *context.APIContext) {
+ // swagger:operation DELETE /admin/users/{username}/keys/{id} admin adminDeleteUserPublicKey
+ // ---
+ // summary: Delete a user's public key
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the key to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := asymkey_service.DeletePublicKey(ctx, ctx.ContextUser, ctx.ParamsInt64(":id")); err != nil {
+ if asymkey_model.IsErrKeyNotExist(err) {
+ ctx.NotFound()
+ } else if asymkey_model.IsErrKeyAccessDenied(err) {
+ ctx.Error(http.StatusForbidden, "", "You do not have access to this key")
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteUserPublicKey", err)
+ }
+ return
+ }
+ log.Trace("Key deleted by admin(%s): %s", ctx.Doer.Name, ctx.ContextUser.Name)
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// SearchUsers API for getting information of the users according the filter conditions
+func SearchUsers(ctx *context.APIContext) {
+ // swagger:operation GET /admin/users admin adminSearchUsers
+ // ---
+ // summary: Search users according filter conditions
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: source_id
+ // in: query
+ // description: ID of the user's login source to search for
+ // type: integer
+ // format: int64
+ // - name: login_name
+ // in: query
+ // description: user's login name to search for
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ users, maxResults, err := user_model.SearchUsers(ctx, &user_model.SearchUserOptions{
+ Actor: ctx.Doer,
+ Type: user_model.UserTypeIndividual,
+ LoginName: ctx.FormTrim("login_name"),
+ SourceID: ctx.FormInt64("source_id"),
+ OrderBy: db.SearchOrderByAlphabetically,
+ ListOptions: listOptions,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchUsers", err)
+ return
+ }
+
+ results := make([]*api.User, len(users))
+ for i := range users {
+ results[i] = convert.ToUser(ctx, users[i], ctx.Doer)
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+ ctx.JSON(http.StatusOK, &results)
+}
+
+// RenameUser api for renaming a user
+func RenameUser(ctx *context.APIContext) {
+ // swagger:operation POST /admin/users/{username}/rename admin adminRenameUser
+ // ---
+ // summary: Rename a user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: existing username of user
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/RenameUserOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ if ctx.ContextUser.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("%s is an organization not a user", ctx.ContextUser.Name))
+ return
+ }
+
+ oldName := ctx.ContextUser.Name
+ newName := web.GetForm(ctx).(*api.RenameUserOption).NewName
+
+ // Check if user name has been changed
+ if err := user_service.RenameUser(ctx, ctx.ContextUser, newName); err != nil {
+ switch {
+ case user_model.IsErrUserAlreadyExist(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("form.username_been_taken"))
+ case db.IsErrNameReserved(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("user.form.name_reserved", newName))
+ case db.IsErrNamePatternNotAllowed(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("user.form.name_pattern_not_allowed", newName))
+ case db.IsErrNameCharsNotAllowed(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", ctx.Tr("user.form.name_chars_not_allowed", newName))
+ default:
+ ctx.ServerError("ChangeUserName", err)
+ }
+ return
+ }
+
+ log.Trace("User name changed: %s -> %s", oldName, newName)
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go
new file mode 100644
index 0000000..6e4a97b
--- /dev/null
+++ b/routers/api/v1/api.go
@@ -0,0 +1,1659 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// Copyright 2023-2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+// Package v1 Gitea API
+//
+// This documentation describes the Gitea API.
+//
+// Schemes: https, http
+// BasePath: /api/v1
+// Version: {{AppVer | JSEscape}}
+// License: MIT http://opensource.org/licenses/MIT
+//
+// Consumes:
+// - application/json
+// - text/plain
+//
+// Produces:
+// - application/json
+// - text/html
+//
+// Security:
+// - BasicAuth :
+// - Token :
+// - AccessToken :
+// - AuthorizationHeaderToken :
+// - SudoParam :
+// - SudoHeader :
+// - TOTPHeader :
+//
+// SecurityDefinitions:
+// BasicAuth:
+// type: basic
+// Token:
+// type: apiKey
+// name: token
+// in: query
+// description: This authentication option is deprecated for removal in Gitea 1.23. Please use AuthorizationHeaderToken instead.
+// AccessToken:
+// type: apiKey
+// name: access_token
+// in: query
+// description: This authentication option is deprecated for removal in Gitea 1.23. Please use AuthorizationHeaderToken instead.
+// AuthorizationHeaderToken:
+// type: apiKey
+// name: Authorization
+// in: header
+// description: API tokens must be prepended with "token" followed by a space.
+// SudoParam:
+// type: apiKey
+// name: sudo
+// in: query
+// description: Sudo API request as the user provided as the key. Admin privileges are required.
+// SudoHeader:
+// type: apiKey
+// name: Sudo
+// in: header
+// description: Sudo API request as the user provided as the key. Admin privileges are required.
+// TOTPHeader:
+// type: apiKey
+// name: X-FORGEJO-OTP
+// in: header
+// description: Must be used in combination with BasicAuth if two-factor authentication is enabled.
+//
+// swagger:meta
+package v1
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ auth_model "code.gitea.io/gitea/models/auth"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ quota_model "code.gitea.io/gitea/models/quota"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/forgefed"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/shared"
+ "code.gitea.io/gitea/routers/api/v1/activitypub"
+ "code.gitea.io/gitea/routers/api/v1/admin"
+ "code.gitea.io/gitea/routers/api/v1/misc"
+ "code.gitea.io/gitea/routers/api/v1/notify"
+ "code.gitea.io/gitea/routers/api/v1/org"
+ "code.gitea.io/gitea/routers/api/v1/packages"
+ "code.gitea.io/gitea/routers/api/v1/repo"
+ "code.gitea.io/gitea/routers/api/v1/settings"
+ "code.gitea.io/gitea/routers/api/v1/user"
+ "code.gitea.io/gitea/services/actions"
+ "code.gitea.io/gitea/services/auth"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/forms"
+
+ _ "code.gitea.io/gitea/routers/api/v1/swagger" // for swagger generation
+
+ "gitea.com/go-chi/binding"
+)
+
+func sudo() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ sudo := ctx.FormString("sudo")
+ if len(sudo) == 0 {
+ sudo = ctx.Req.Header.Get("Sudo")
+ }
+
+ if len(sudo) > 0 {
+ if ctx.IsSigned && ctx.Doer.IsAdmin {
+ user, err := user_model.GetUserByName(ctx, sudo)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+ log.Trace("Sudo from (%s) to: %s", ctx.Doer.Name, user.Name)
+ ctx.Doer = user
+ } else {
+ ctx.JSON(http.StatusForbidden, map[string]string{
+ "message": "Only administrators allowed to sudo.",
+ })
+ return
+ }
+ }
+ }
+}
+
+func repoAssignment() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ userName := ctx.Params("username")
+ repoName := ctx.Params("reponame")
+
+ var (
+ owner *user_model.User
+ err error
+ )
+
+ // Check if the user is the same as the repository owner.
+ if ctx.IsSigned && ctx.Doer.LowerName == strings.ToLower(userName) {
+ owner = ctx.Doer
+ } else {
+ owner, err = user_model.GetUserByName(ctx, userName)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ if redirectUserID, err := user_model.LookupUserRedirect(ctx, userName); err == nil {
+ context.RedirectToUser(ctx.Base, userName, redirectUserID)
+ } else if user_model.IsErrUserRedirectNotExist(err) {
+ ctx.NotFound("GetUserByName", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "LookupUserRedirect", err)
+ }
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+ }
+ ctx.Repo.Owner = owner
+ ctx.ContextUser = owner
+
+ // Get repository.
+ repo, err := repo_model.GetRepositoryByName(ctx, owner.ID, repoName)
+ if err != nil {
+ if repo_model.IsErrRepoNotExist(err) {
+ redirectRepoID, err := repo_model.LookupRedirect(ctx, owner.ID, repoName)
+ if err == nil {
+ context.RedirectToRepo(ctx.Base, redirectRepoID)
+ } else if repo_model.IsErrRedirectNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "LookupRepoRedirect", err)
+ }
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetRepositoryByName", err)
+ }
+ return
+ }
+
+ repo.Owner = owner
+ ctx.Repo.Repository = repo
+
+ if ctx.Doer != nil && ctx.Doer.ID == user_model.ActionsUserID {
+ taskID := ctx.Data["ActionsTaskID"].(int64)
+ task, err := actions_model.GetTaskByID(ctx, taskID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "actions_model.GetTaskByID", err)
+ return
+ }
+ if task.RepoID != repo.ID {
+ ctx.NotFound()
+ return
+ }
+
+ if task.IsForkPullRequest {
+ ctx.Repo.Permission.AccessMode = perm.AccessModeRead
+ } else {
+ ctx.Repo.Permission.AccessMode = perm.AccessModeWrite
+ }
+
+ if err := ctx.Repo.Repository.LoadUnits(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadUnits", err)
+ return
+ }
+ ctx.Repo.Permission.Units = ctx.Repo.Repository.Units
+ ctx.Repo.Permission.UnitsMode = make(map[unit.Type]perm.AccessMode)
+ for _, u := range ctx.Repo.Repository.Units {
+ ctx.Repo.Permission.UnitsMode[u.Type] = ctx.Repo.Permission.AccessMode
+ }
+ } else {
+ ctx.Repo.Permission, err = access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return
+ }
+ }
+
+ if !ctx.Repo.HasAccess() {
+ ctx.NotFound()
+ return
+ }
+ }
+}
+
+// must be used within a group with a call to repoAssignment() to set ctx.Repo
+func commentAssignment(idParam string) func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ comment, err := issues_model.GetCommentByID(ctx, ctx.ParamsInt64(idParam))
+ if err != nil {
+ if issues_model.IsErrCommentNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.InternalServerError(err)
+ }
+ return
+ }
+
+ if err = comment.LoadIssue(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if comment.Issue == nil || comment.Issue.RepoID != ctx.Repo.Repository.ID {
+ ctx.NotFound()
+ return
+ }
+
+ if !ctx.Repo.CanReadIssuesOrPulls(comment.Issue.IsPull) {
+ ctx.NotFound()
+ return
+ }
+
+ comment.Issue.Repo = ctx.Repo.Repository
+
+ ctx.Comment = comment
+ }
+}
+
+func reqPackageAccess(accessMode perm.AccessMode) func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if ctx.Package.AccessMode < accessMode && !ctx.IsUserSiteAdmin() {
+ ctx.Error(http.StatusForbidden, "reqPackageAccess", "user should have specific permission or be a site admin")
+ return
+ }
+ }
+}
+
+func checkTokenPublicOnly() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.PublicOnly {
+ return
+ }
+
+ requiredScopeCategories, ok := ctx.Data["requiredScopeCategories"].([]auth_model.AccessTokenScopeCategory)
+ if !ok || len(requiredScopeCategories) == 0 {
+ return
+ }
+
+ // public Only permission check
+ switch {
+ case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryRepository):
+ if ctx.Repo.Repository != nil && ctx.Repo.Repository.IsPrivate {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public repos")
+ return
+ }
+ case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryIssue):
+ if ctx.Repo.Repository != nil && ctx.Repo.Repository.IsPrivate {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public issues")
+ return
+ }
+ case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryOrganization):
+ if ctx.Org.Organization != nil && ctx.Org.Organization.Visibility != api.VisibleTypePublic {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public orgs")
+ return
+ }
+ if ctx.ContextUser != nil && ctx.ContextUser.IsOrganization() && ctx.ContextUser.Visibility != api.VisibleTypePublic {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public orgs")
+ return
+ }
+ case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryUser):
+ if ctx.ContextUser != nil && ctx.ContextUser.IsUser() && ctx.ContextUser.Visibility != api.VisibleTypePublic {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public users")
+ return
+ }
+ case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryActivityPub):
+ if ctx.ContextUser != nil && ctx.ContextUser.IsUser() && ctx.ContextUser.Visibility != api.VisibleTypePublic {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public activitypub")
+ return
+ }
+ case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryNotification):
+ if ctx.Repo.Repository != nil && ctx.Repo.Repository.IsPrivate {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public notifications")
+ return
+ }
+ case auth_model.ContainsCategory(requiredScopeCategories, auth_model.AccessTokenScopeCategoryPackage):
+ if ctx.Package != nil && ctx.Package.Owner.Visibility.IsPrivate() {
+ ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public packages")
+ return
+ }
+ }
+ }
+}
+
+// if a token is being used for auth, we check that it contains the required scope
+// if a token is not being used, reqToken will enforce other sign in methods
+func tokenRequiresScopes(requiredScopeCategories ...auth_model.AccessTokenScopeCategory) func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ // no scope required
+ if len(requiredScopeCategories) == 0 {
+ return
+ }
+
+ // Need OAuth2 token to be present.
+ scope, scopeExists := ctx.Data["ApiTokenScope"].(auth_model.AccessTokenScope)
+ if ctx.Data["IsApiToken"] != true || !scopeExists {
+ return
+ }
+
+ // use the http method to determine the access level
+ requiredScopeLevel := auth_model.Read
+ if ctx.Req.Method == "POST" || ctx.Req.Method == "PUT" || ctx.Req.Method == "PATCH" || ctx.Req.Method == "DELETE" {
+ requiredScopeLevel = auth_model.Write
+ }
+
+ // get the required scope for the given access level and category
+ requiredScopes := auth_model.GetRequiredScopes(requiredScopeLevel, requiredScopeCategories...)
+ allow, err := scope.HasScope(requiredScopes...)
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "tokenRequiresScope", "checking scope failed: "+err.Error())
+ return
+ }
+
+ if !allow {
+ ctx.Error(http.StatusForbidden, "tokenRequiresScope", fmt.Sprintf("token does not have at least one of required scope(s): %v", requiredScopes))
+ return
+ }
+
+ ctx.Data["requiredScopeCategories"] = requiredScopeCategories
+
+ // check if scope only applies to public resources
+ publicOnly, err := scope.PublicOnly()
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "tokenRequiresScope", "parsing public resource scope failed: "+err.Error())
+ return
+ }
+
+ // assign to true so that those searching should only filter public repositories/users/organizations
+ ctx.PublicOnly = publicOnly
+ }
+}
+
+// Contexter middleware already checks token for user sign in process.
+func reqToken() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ // If actions token is present
+ if true == ctx.Data["IsActionsToken"] {
+ return
+ }
+
+ if ctx.IsSigned {
+ return
+ }
+ ctx.Error(http.StatusUnauthorized, "reqToken", "token is required")
+ }
+}
+
+func reqExploreSignIn() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if setting.Service.Explore.RequireSigninView && !ctx.IsSigned {
+ ctx.Error(http.StatusUnauthorized, "reqExploreSignIn", "you must be signed in to search for users")
+ }
+ }
+}
+
+func reqBasicOrRevProxyAuth() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if ctx.IsSigned && setting.Service.EnableReverseProxyAuthAPI && ctx.Data["AuthedMethod"].(string) == auth.ReverseProxyMethodName {
+ return
+ }
+ if !ctx.IsBasicAuth {
+ ctx.Error(http.StatusUnauthorized, "reqBasicAuth", "auth required")
+ return
+ }
+ }
+}
+
+// reqSiteAdmin user should be the site admin
+func reqSiteAdmin() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.IsUserSiteAdmin() {
+ ctx.Error(http.StatusForbidden, "reqSiteAdmin", "user should be the site admin")
+ return
+ }
+ }
+}
+
+// reqOwner user should be the owner of the repo or site admin.
+func reqOwner() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.Repo.IsOwner() && !ctx.IsUserSiteAdmin() {
+ ctx.Error(http.StatusForbidden, "reqOwner", "user should be the owner of the repo")
+ return
+ }
+ }
+}
+
+// reqSelfOrAdmin doer should be the same as the contextUser or site admin
+func reqSelfOrAdmin() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.IsUserSiteAdmin() && ctx.ContextUser != ctx.Doer {
+ ctx.Error(http.StatusForbidden, "reqSelfOrAdmin", "doer should be the site admin or be same as the contextUser")
+ return
+ }
+ }
+}
+
+// reqAdmin user should be an owner or a collaborator with admin write of a repository, or site admin
+func reqAdmin() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.IsUserRepoAdmin() && !ctx.IsUserSiteAdmin() {
+ ctx.Error(http.StatusForbidden, "reqAdmin", "user should be an owner or a collaborator with admin write of a repository")
+ return
+ }
+ }
+}
+
+// reqRepoWriter user should have a permission to write to a repo, or be a site admin
+func reqRepoWriter(unitTypes ...unit.Type) func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.IsUserRepoWriter(unitTypes) && !ctx.IsUserRepoAdmin() && !ctx.IsUserSiteAdmin() {
+ ctx.Error(http.StatusForbidden, "reqRepoWriter", "user should have a permission to write to a repo")
+ return
+ }
+ }
+}
+
+// reqRepoBranchWriter user should have a permission to write to a branch, or be a site admin
+func reqRepoBranchWriter(ctx *context.APIContext) {
+ options, ok := web.GetForm(ctx).(api.FileOptionInterface)
+ if !ok || (!ctx.Repo.CanWriteToBranch(ctx, ctx.Doer, options.Branch()) && !ctx.IsUserSiteAdmin()) {
+ ctx.Error(http.StatusForbidden, "reqRepoBranchWriter", "user should have a permission to write to this branch")
+ return
+ }
+}
+
+// reqRepoReader user should have specific read permission or be a repo admin or a site admin
+func reqRepoReader(unitType unit.Type) func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.Repo.CanRead(unitType) && !ctx.IsUserRepoAdmin() && !ctx.IsUserSiteAdmin() {
+ ctx.Error(http.StatusForbidden, "reqRepoReader", "user should have specific read permission or be a repo admin or a site admin")
+ return
+ }
+ }
+}
+
+// reqAnyRepoReader user should have any permission to read repository or permissions of site admin
+func reqAnyRepoReader() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.Repo.HasAccess() && !ctx.IsUserSiteAdmin() {
+ ctx.Error(http.StatusForbidden, "reqAnyRepoReader", "user should have any permission to read repository or permissions of site admin")
+ return
+ }
+ }
+}
+
+// reqOrgOwnership user should be an organization owner, or a site admin
+func reqOrgOwnership() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if ctx.IsUserSiteAdmin() {
+ return
+ }
+
+ var orgID int64
+ if ctx.Org.Organization != nil {
+ orgID = ctx.Org.Organization.ID
+ } else if ctx.Org.Team != nil {
+ orgID = ctx.Org.Team.OrgID
+ } else {
+ ctx.Error(http.StatusInternalServerError, "", "reqOrgOwnership: unprepared context")
+ return
+ }
+
+ isOwner, err := organization.IsOrganizationOwner(ctx, orgID, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrganizationOwner", err)
+ return
+ } else if !isOwner {
+ if ctx.Org.Organization != nil {
+ ctx.Error(http.StatusForbidden, "", "Must be an organization owner")
+ } else {
+ ctx.NotFound()
+ }
+ return
+ }
+ }
+}
+
+// reqTeamMembership user should be an team member, or a site admin
+func reqTeamMembership() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if ctx.IsUserSiteAdmin() {
+ return
+ }
+ if ctx.Org.Team == nil {
+ ctx.Error(http.StatusInternalServerError, "", "reqTeamMembership: unprepared context")
+ return
+ }
+
+ orgID := ctx.Org.Team.OrgID
+ isOwner, err := organization.IsOrganizationOwner(ctx, orgID, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrganizationOwner", err)
+ return
+ } else if isOwner {
+ return
+ }
+
+ if isTeamMember, err := organization.IsTeamMember(ctx, orgID, ctx.Org.Team.ID, ctx.Doer.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsTeamMember", err)
+ return
+ } else if !isTeamMember {
+ isOrgMember, err := organization.IsOrganizationMember(ctx, orgID, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrganizationMember", err)
+ } else if isOrgMember {
+ ctx.Error(http.StatusForbidden, "", "Must be a team member")
+ } else {
+ ctx.NotFound()
+ }
+ return
+ }
+ }
+}
+
+// reqOrgMembership user should be an organization member, or a site admin
+func reqOrgMembership() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if ctx.IsUserSiteAdmin() {
+ return
+ }
+
+ var orgID int64
+ if ctx.Org.Organization != nil {
+ orgID = ctx.Org.Organization.ID
+ } else if ctx.Org.Team != nil {
+ orgID = ctx.Org.Team.OrgID
+ } else {
+ ctx.Error(http.StatusInternalServerError, "", "reqOrgMembership: unprepared context")
+ return
+ }
+
+ if isMember, err := organization.IsOrganizationMember(ctx, orgID, ctx.Doer.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrganizationMember", err)
+ return
+ } else if !isMember {
+ if ctx.Org.Organization != nil {
+ ctx.Error(http.StatusForbidden, "", "Must be an organization member")
+ } else {
+ ctx.NotFound()
+ }
+ return
+ }
+ }
+}
+
+func reqGitHook() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if !ctx.Doer.CanEditGitHook() {
+ ctx.Error(http.StatusForbidden, "", "must be allowed to edit Git hooks")
+ return
+ }
+ }
+}
+
+// reqWebhooksEnabled requires webhooks to be enabled by admin.
+func reqWebhooksEnabled() func(ctx *context.APIContext) {
+ return func(ctx *context.APIContext) {
+ if setting.DisableWebhooks {
+ ctx.Error(http.StatusForbidden, "", "webhooks disabled by administrator")
+ return
+ }
+ }
+}
+
+func orgAssignment(args ...bool) func(ctx *context.APIContext) {
+ var (
+ assignOrg bool
+ assignTeam bool
+ )
+ if len(args) > 0 {
+ assignOrg = args[0]
+ }
+ if len(args) > 1 {
+ assignTeam = args[1]
+ }
+ return func(ctx *context.APIContext) {
+ ctx.Org = new(context.APIOrganization)
+
+ var err error
+ if assignOrg {
+ ctx.Org.Organization, err = organization.GetOrgByName(ctx, ctx.Params(":org"))
+ if err != nil {
+ if organization.IsErrOrgNotExist(err) {
+ redirectUserID, err := user_model.LookupUserRedirect(ctx, ctx.Params(":org"))
+ if err == nil {
+ context.RedirectToUser(ctx.Base, ctx.Params(":org"), redirectUserID)
+ } else if user_model.IsErrUserRedirectNotExist(err) {
+ ctx.NotFound("GetOrgByName", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "LookupUserRedirect", err)
+ }
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetOrgByName", err)
+ }
+ return
+ }
+ ctx.ContextUser = ctx.Org.Organization.AsUser()
+ }
+
+ if assignTeam {
+ ctx.Org.Team, err = organization.GetTeamByID(ctx, ctx.ParamsInt64(":teamid"))
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetTeamById", err)
+ }
+ return
+ }
+ }
+ }
+}
+
+func mustEnableIssues(ctx *context.APIContext) {
+ if !ctx.Repo.CanRead(unit.TypeIssues) {
+ if log.IsTrace() {
+ if ctx.IsSigned {
+ log.Trace("Permission Denied: User %-v cannot read %-v in Repo %-v\n"+
+ "User in Repo has Permissions: %-+v",
+ ctx.Doer,
+ unit.TypeIssues,
+ ctx.Repo.Repository,
+ ctx.Repo.Permission)
+ } else {
+ log.Trace("Permission Denied: Anonymous user cannot read %-v in Repo %-v\n"+
+ "Anonymous user in Repo has Permissions: %-+v",
+ unit.TypeIssues,
+ ctx.Repo.Repository,
+ ctx.Repo.Permission)
+ }
+ }
+ ctx.NotFound()
+ return
+ }
+}
+
+func mustAllowPulls(ctx *context.APIContext) {
+ if !(ctx.Repo.Repository.CanEnablePulls() && ctx.Repo.CanRead(unit.TypePullRequests)) {
+ if ctx.Repo.Repository.CanEnablePulls() && log.IsTrace() {
+ if ctx.IsSigned {
+ log.Trace("Permission Denied: User %-v cannot read %-v in Repo %-v\n"+
+ "User in Repo has Permissions: %-+v",
+ ctx.Doer,
+ unit.TypePullRequests,
+ ctx.Repo.Repository,
+ ctx.Repo.Permission)
+ } else {
+ log.Trace("Permission Denied: Anonymous user cannot read %-v in Repo %-v\n"+
+ "Anonymous user in Repo has Permissions: %-+v",
+ unit.TypePullRequests,
+ ctx.Repo.Repository,
+ ctx.Repo.Permission)
+ }
+ }
+ ctx.NotFound()
+ return
+ }
+}
+
+func mustEnableIssuesOrPulls(ctx *context.APIContext) {
+ if !ctx.Repo.CanRead(unit.TypeIssues) &&
+ !(ctx.Repo.Repository.CanEnablePulls() && ctx.Repo.CanRead(unit.TypePullRequests)) {
+ if ctx.Repo.Repository.CanEnablePulls() && log.IsTrace() {
+ if ctx.IsSigned {
+ log.Trace("Permission Denied: User %-v cannot read %-v and %-v in Repo %-v\n"+
+ "User in Repo has Permissions: %-+v",
+ ctx.Doer,
+ unit.TypeIssues,
+ unit.TypePullRequests,
+ ctx.Repo.Repository,
+ ctx.Repo.Permission)
+ } else {
+ log.Trace("Permission Denied: Anonymous user cannot read %-v and %-v in Repo %-v\n"+
+ "Anonymous user in Repo has Permissions: %-+v",
+ unit.TypeIssues,
+ unit.TypePullRequests,
+ ctx.Repo.Repository,
+ ctx.Repo.Permission)
+ }
+ }
+ ctx.NotFound()
+ return
+ }
+}
+
+func mustEnableWiki(ctx *context.APIContext) {
+ if !(ctx.Repo.CanRead(unit.TypeWiki)) {
+ ctx.NotFound()
+ return
+ }
+}
+
+func mustNotBeArchived(ctx *context.APIContext) {
+ if ctx.Repo.Repository.IsArchived {
+ ctx.Error(http.StatusLocked, "RepoArchived", fmt.Errorf("%s is archived", ctx.Repo.Repository.LogString()))
+ return
+ }
+}
+
+func mustEnableAttachments(ctx *context.APIContext) {
+ if !setting.Attachment.Enabled {
+ ctx.NotFound()
+ return
+ }
+}
+
+// bind binding an obj to a func(ctx *context.APIContext)
+func bind[T any](_ T) any {
+ return func(ctx *context.APIContext) {
+ theObj := new(T) // create a new form obj for every request but not use obj directly
+ errs := binding.Bind(ctx.Req, theObj)
+ if len(errs) > 0 {
+ ctx.Error(http.StatusUnprocessableEntity, "validationError", fmt.Sprintf("%s: %s", errs[0].FieldNames, errs[0].Error()))
+ return
+ }
+ web.SetForm(ctx, theObj)
+ }
+}
+
+func individualPermsChecker(ctx *context.APIContext) {
+ // org permissions have been checked in context.OrgAssignment(), but individual permissions haven't been checked.
+ if ctx.ContextUser.IsIndividual() {
+ switch {
+ case ctx.ContextUser.Visibility == api.VisibleTypePrivate:
+ if ctx.Doer == nil || (ctx.ContextUser.ID != ctx.Doer.ID && !ctx.Doer.IsAdmin) {
+ ctx.NotFound("Visit Project", nil)
+ return
+ }
+ case ctx.ContextUser.Visibility == api.VisibleTypeLimited:
+ if ctx.Doer == nil {
+ ctx.NotFound("Visit Project", nil)
+ return
+ }
+ }
+ }
+}
+
+// Routes registers all v1 APIs routes to web application.
+func Routes() *web.Route {
+ m := web.NewRoute()
+
+ m.Use(shared.Middlewares()...)
+
+ addActionsRoutes := func(
+ m *web.Route,
+ reqChecker func(ctx *context.APIContext),
+ act actions.API,
+ ) {
+ m.Group("/actions", func() {
+ m.Group("/secrets", func() {
+ m.Get("", reqToken(), reqChecker, act.ListActionsSecrets)
+ m.Combo("/{secretname}").
+ Put(reqToken(), reqChecker, bind(api.CreateOrUpdateSecretOption{}), act.CreateOrUpdateSecret).
+ Delete(reqToken(), reqChecker, act.DeleteSecret)
+ })
+
+ m.Group("/variables", func() {
+ m.Get("", reqToken(), reqChecker, act.ListVariables)
+ m.Combo("/{variablename}").
+ Get(reqToken(), reqChecker, act.GetVariable).
+ Delete(reqToken(), reqChecker, act.DeleteVariable).
+ Post(reqToken(), reqChecker, bind(api.CreateVariableOption{}), act.CreateVariable).
+ Put(reqToken(), reqChecker, bind(api.UpdateVariableOption{}), act.UpdateVariable)
+ })
+
+ m.Group("/runners", func() {
+ m.Get("/registration-token", reqToken(), reqChecker, act.GetRegistrationToken)
+ })
+ })
+ }
+
+ m.Group("", func() {
+ // Miscellaneous (no scope required)
+ if setting.API.EnableSwagger {
+ m.Get("/swagger", func(ctx *context.APIContext) {
+ ctx.Redirect(setting.AppSubURL + "/api/swagger")
+ })
+ }
+
+ if setting.Federation.Enabled {
+ m.Get("/nodeinfo", misc.NodeInfo)
+ m.Group("/activitypub", func() {
+ // deprecated, remove in 1.20, use /user-id/{user-id} instead
+ m.Group("/user/{username}", func() {
+ m.Get("", activitypub.Person)
+ m.Post("/inbox", activitypub.ReqHTTPSignature(), activitypub.PersonInbox)
+ }, context.UserAssignmentAPI(), checkTokenPublicOnly())
+ m.Group("/user-id/{user-id}", func() {
+ m.Get("", activitypub.Person)
+ m.Post("/inbox", activitypub.ReqHTTPSignature(), activitypub.PersonInbox)
+ }, context.UserIDAssignmentAPI(), checkTokenPublicOnly())
+ m.Group("/actor", func() {
+ m.Get("", activitypub.Actor)
+ m.Post("/inbox", activitypub.ActorInbox)
+ })
+ m.Group("/repository-id/{repository-id}", func() {
+ m.Get("", activitypub.Repository)
+ m.Post("/inbox",
+ bind(forgefed.ForgeLike{}),
+ // TODO: activitypub.ReqHTTPSignature(),
+ activitypub.RepositoryInbox)
+ }, context.RepositoryIDAssignmentAPI())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryActivityPub))
+ }
+
+ // Misc (public accessible)
+ m.Group("", func() {
+ m.Get("/version", misc.Version)
+ m.Get("/signing-key.gpg", misc.SigningKey)
+ m.Post("/markup", reqToken(), bind(api.MarkupOption{}), misc.Markup)
+ m.Post("/markdown", reqToken(), bind(api.MarkdownOption{}), misc.Markdown)
+ m.Post("/markdown/raw", reqToken(), misc.MarkdownRaw)
+ m.Get("/gitignore/templates", misc.ListGitignoresTemplates)
+ m.Get("/gitignore/templates/{name}", misc.GetGitignoreTemplateInfo)
+ m.Get("/licenses", misc.ListLicenseTemplates)
+ m.Get("/licenses/{name}", misc.GetLicenseTemplateInfo)
+ m.Get("/label/templates", misc.ListLabelTemplates)
+ m.Get("/label/templates/{name}", misc.GetLabelTemplate)
+
+ m.Group("/settings", func() {
+ m.Get("/ui", settings.GetGeneralUISettings)
+ m.Get("/api", settings.GetGeneralAPISettings)
+ m.Get("/attachment", settings.GetGeneralAttachmentSettings)
+ m.Get("/repository", settings.GetGeneralRepoSettings)
+ })
+ })
+
+ // Notifications (requires 'notifications' scope)
+ m.Group("/notifications", func() {
+ m.Combo("").
+ Get(reqToken(), notify.ListNotifications).
+ Put(reqToken(), notify.ReadNotifications)
+ m.Get("/new", reqToken(), notify.NewAvailable)
+ m.Combo("/threads/{id}").
+ Get(reqToken(), notify.GetThread).
+ Patch(reqToken(), notify.ReadThread)
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryNotification))
+
+ // Users (requires user scope)
+ m.Group("/users", func() {
+ m.Get("/search", reqExploreSignIn(), user.Search)
+
+ m.Group("/{username}", func() {
+ m.Get("", reqExploreSignIn(), user.GetInfo)
+
+ if setting.Service.EnableUserHeatmap {
+ m.Get("/heatmap", user.GetUserHeatmapData)
+ }
+
+ m.Get("/repos", tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository), reqExploreSignIn(), user.ListUserRepos)
+ m.Group("/tokens", func() {
+ m.Combo("").Get(user.ListAccessTokens).
+ Post(bind(api.CreateAccessTokenOption{}), reqToken(), user.CreateAccessToken)
+ m.Combo("/{id}").Delete(reqToken(), user.DeleteAccessToken)
+ }, reqSelfOrAdmin(), reqBasicOrRevProxyAuth())
+
+ m.Get("/activities/feeds", user.ListUserActivityFeeds)
+ }, context.UserAssignmentAPI(), checkTokenPublicOnly(), individualPermsChecker)
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser))
+
+ // Users (requires user scope)
+ m.Group("/users", func() {
+ m.Group("/{username}", func() {
+ m.Get("/keys", user.ListPublicKeys)
+ m.Get("/gpg_keys", user.ListGPGKeys)
+
+ m.Get("/followers", user.ListFollowers)
+ m.Group("/following", func() {
+ m.Get("", user.ListFollowing)
+ m.Get("/{target}", user.CheckFollowing)
+ })
+
+ if !setting.Repository.DisableStars {
+ m.Get("/starred", user.GetStarredRepos)
+ }
+
+ m.Get("/subscriptions", user.GetWatchedRepos)
+ }, context.UserAssignmentAPI(), checkTokenPublicOnly())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser), reqToken())
+
+ // Users (requires user scope)
+ m.Group("/user", func() {
+ m.Get("", user.GetAuthenticatedUser)
+ if setting.Quota.Enabled {
+ m.Group("/quota", func() {
+ m.Get("", user.GetQuota)
+ m.Get("/check", user.CheckQuota)
+ m.Get("/attachments", user.ListQuotaAttachments)
+ m.Get("/packages", user.ListQuotaPackages)
+ m.Get("/artifacts", user.ListQuotaArtifacts)
+ })
+ }
+ m.Group("/settings", func() {
+ m.Get("", user.GetUserSettings)
+ m.Patch("", bind(api.UserSettingsOptions{}), user.UpdateUserSettings)
+ }, reqToken())
+ m.Combo("/emails").
+ Get(user.ListEmails).
+ Post(bind(api.CreateEmailOption{}), user.AddEmail).
+ Delete(bind(api.DeleteEmailOption{}), user.DeleteEmail)
+
+ // manage user-level actions features
+ m.Group("/actions", func() {
+ m.Group("/secrets", func() {
+ m.Combo("/{secretname}").
+ Put(bind(api.CreateOrUpdateSecretOption{}), user.CreateOrUpdateSecret).
+ Delete(user.DeleteSecret)
+ })
+
+ m.Group("/variables", func() {
+ m.Get("", user.ListVariables)
+ m.Combo("/{variablename}").
+ Get(user.GetVariable).
+ Delete(user.DeleteVariable).
+ Post(bind(api.CreateVariableOption{}), user.CreateVariable).
+ Put(bind(api.UpdateVariableOption{}), user.UpdateVariable)
+ })
+
+ m.Group("/runners", func() {
+ m.Get("/registration-token", reqToken(), user.GetRegistrationToken)
+ })
+ })
+
+ m.Get("/followers", user.ListMyFollowers)
+ m.Group("/following", func() {
+ m.Get("", user.ListMyFollowing)
+ m.Group("/{username}", func() {
+ m.Get("", user.CheckMyFollowing)
+ m.Put("", user.Follow)
+ m.Delete("", user.Unfollow)
+ }, context.UserAssignmentAPI())
+ })
+
+ // (admin:public_key scope)
+ m.Group("/keys", func() {
+ m.Combo("").Get(user.ListMyPublicKeys).
+ Post(bind(api.CreateKeyOption{}), user.CreatePublicKey)
+ m.Combo("/{id}").Get(user.GetPublicKey).
+ Delete(user.DeletePublicKey)
+ })
+
+ // (admin:application scope)
+ m.Group("/applications", func() {
+ m.Combo("/oauth2").
+ Get(user.ListOauth2Applications).
+ Post(bind(api.CreateOAuth2ApplicationOptions{}), user.CreateOauth2Application)
+ m.Combo("/oauth2/{id}").
+ Delete(user.DeleteOauth2Application).
+ Patch(bind(api.CreateOAuth2ApplicationOptions{}), user.UpdateOauth2Application).
+ Get(user.GetOauth2Application)
+ })
+
+ // (admin:gpg_key scope)
+ m.Group("/gpg_keys", func() {
+ m.Combo("").Get(user.ListMyGPGKeys).
+ Post(bind(api.CreateGPGKeyOption{}), user.CreateGPGKey)
+ m.Combo("/{id}").Get(user.GetGPGKey).
+ Delete(user.DeleteGPGKey)
+ })
+ m.Get("/gpg_key_token", user.GetVerificationToken)
+ m.Post("/gpg_key_verify", bind(api.VerifyGPGKeyOption{}), user.VerifyUserGPGKey)
+
+ // (repo scope)
+ m.Combo("/repos", tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository)).Get(user.ListMyRepos).
+ Post(bind(api.CreateRepoOption{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetUser), repo.Create)
+
+ // (repo scope)
+ if !setting.Repository.DisableStars {
+ m.Group("/starred", func() {
+ m.Get("", user.GetMyStarredRepos)
+ m.Group("/{username}/{reponame}", func() {
+ m.Get("", user.IsStarring)
+ m.Put("", user.Star)
+ m.Delete("", user.Unstar)
+ }, repoAssignment(), checkTokenPublicOnly())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository))
+ }
+ m.Get("/times", repo.ListMyTrackedTimes)
+ m.Get("/stopwatches", repo.GetStopwatches)
+ m.Get("/subscriptions", user.GetMyWatchedRepos)
+ m.Get("/teams", org.ListUserTeams)
+ m.Group("/hooks", func() {
+ m.Combo("").Get(user.ListHooks).
+ Post(bind(api.CreateHookOption{}), user.CreateHook)
+ m.Combo("/{id}").Get(user.GetHook).
+ Patch(bind(api.EditHookOption{}), user.EditHook).
+ Delete(user.DeleteHook)
+ }, reqWebhooksEnabled())
+
+ m.Group("", func() {
+ m.Get("/list_blocked", user.ListBlockedUsers)
+ m.Group("", func() {
+ m.Put("/block/{username}", user.BlockUser)
+ m.Put("/unblock/{username}", user.UnblockUser)
+ }, context.UserAssignmentAPI())
+ })
+
+ m.Group("/avatar", func() {
+ m.Post("", bind(api.UpdateUserAvatarOption{}), user.UpdateAvatar)
+ m.Delete("", user.DeleteAvatar)
+ }, reqToken())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser), reqToken())
+
+ // Repositories (requires repo scope, org scope)
+ m.Post("/org/{org}/repos",
+ // FIXME: we need org in context
+ tokenRequiresScopes(auth_model.AccessTokenScopeCategoryOrganization, auth_model.AccessTokenScopeCategoryRepository),
+ reqToken(),
+ bind(api.CreateRepoOption{}),
+ repo.CreateOrgRepoDeprecated)
+
+ // requires repo scope
+ // FIXME: Don't expose repository id outside of the system
+ m.Combo("/repositories/{id}", reqToken(), tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository)).Get(repo.GetByID)
+
+ // Repos (requires repo scope)
+ m.Group("/repos", func() {
+ m.Get("/search", repo.Search)
+
+ // (repo scope)
+ m.Post("/migrate", reqToken(), bind(api.MigrateRepoOptions{}), repo.Migrate)
+
+ m.Group("/{username}/{reponame}", func() {
+ m.Get("/compare/*", reqRepoReader(unit.TypeCode), repo.CompareDiff)
+
+ m.Combo("").Get(reqAnyRepoReader(), repo.Get).
+ Delete(reqToken(), reqOwner(), repo.Delete).
+ Patch(reqToken(), reqAdmin(), bind(api.EditRepoOption{}), repo.Edit)
+ m.Post("/generate", reqToken(), reqRepoReader(unit.TypeCode), bind(api.GenerateRepoOption{}), repo.Generate)
+ m.Group("/transfer", func() {
+ m.Post("", reqOwner(), bind(api.TransferRepoOption{}), repo.Transfer)
+ m.Post("/accept", repo.AcceptTransfer)
+ m.Post("/reject", repo.RejectTransfer)
+ }, reqToken())
+ addActionsRoutes(
+ m,
+ reqOwner(),
+ repo.NewAction(),
+ )
+ m.Group("/hooks/git", func() {
+ m.Combo("").Get(repo.ListGitHooks)
+ m.Group("/{id}", func() {
+ m.Combo("").Get(repo.GetGitHook).
+ Patch(bind(api.EditGitHookOption{}), repo.EditGitHook).
+ Delete(repo.DeleteGitHook)
+ })
+ }, reqToken(), reqAdmin(), reqGitHook(), context.ReferencesGitRepo(true))
+ m.Group("/hooks", func() {
+ m.Combo("").Get(repo.ListHooks).
+ Post(bind(api.CreateHookOption{}), repo.CreateHook)
+ m.Group("/{id}", func() {
+ m.Combo("").Get(repo.GetHook).
+ Patch(bind(api.EditHookOption{}), repo.EditHook).
+ Delete(repo.DeleteHook)
+ m.Post("/tests", context.ReferencesGitRepo(), context.RepoRefForAPI, repo.TestHook)
+ })
+ }, reqToken(), reqAdmin(), reqWebhooksEnabled())
+ m.Group("/collaborators", func() {
+ m.Get("", reqAnyRepoReader(), repo.ListCollaborators)
+ m.Group("/{collaborator}", func() {
+ m.Combo("").Get(reqAnyRepoReader(), repo.IsCollaborator).
+ Put(reqAdmin(), bind(api.AddCollaboratorOption{}), repo.AddCollaborator).
+ Delete(reqAdmin(), repo.DeleteCollaborator)
+ m.Get("/permission", repo.GetRepoPermissions)
+ })
+ }, reqToken())
+ if setting.Repository.EnableFlags {
+ m.Group("/flags", func() {
+ m.Combo("").Get(repo.ListFlags).
+ Put(bind(api.ReplaceFlagsOption{}), repo.ReplaceAllFlags).
+ Delete(repo.DeleteAllFlags)
+ m.Group("/{flag}", func() {
+ m.Combo("").Get(repo.HasFlag).
+ Put(repo.AddFlag).
+ Delete(repo.DeleteFlag)
+ })
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryAdmin), reqToken(), reqSiteAdmin())
+ }
+ m.Get("/assignees", reqToken(), reqAnyRepoReader(), repo.GetAssignees)
+ m.Get("/reviewers", reqToken(), reqAnyRepoReader(), repo.GetReviewers)
+ m.Group("/teams", func() {
+ m.Get("", reqAnyRepoReader(), repo.ListTeams)
+ m.Combo("/{team}").Get(reqAnyRepoReader(), repo.IsTeam).
+ Put(reqAdmin(), repo.AddTeam).
+ Delete(reqAdmin(), repo.DeleteTeam)
+ }, reqToken())
+ m.Get("/raw/*", context.ReferencesGitRepo(), context.RepoRefForAPI, reqRepoReader(unit.TypeCode), repo.GetRawFile)
+ m.Get("/media/*", context.ReferencesGitRepo(), context.RepoRefForAPI, reqRepoReader(unit.TypeCode), repo.GetRawFileOrLFS)
+ m.Get("/archive/*", reqRepoReader(unit.TypeCode), repo.GetArchive)
+ if !setting.Repository.DisableForks {
+ m.Combo("/forks").Get(repo.ListForks).
+ Post(reqToken(), reqRepoReader(unit.TypeCode), bind(api.CreateForkOption{}), repo.CreateFork)
+ }
+ m.Group("/branches", func() {
+ m.Get("", repo.ListBranches)
+ m.Get("/*", repo.GetBranch)
+ m.Delete("/*", reqToken(), reqRepoWriter(unit.TypeCode), mustNotBeArchived, repo.DeleteBranch)
+ m.Post("", reqToken(), reqRepoWriter(unit.TypeCode), mustNotBeArchived, bind(api.CreateBranchRepoOption{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeGitAll, context.QuotaTargetRepo), repo.CreateBranch)
+ }, context.ReferencesGitRepo(), reqRepoReader(unit.TypeCode))
+ m.Group("/branch_protections", func() {
+ m.Get("", repo.ListBranchProtections)
+ m.Post("", bind(api.CreateBranchProtectionOption{}), mustNotBeArchived, repo.CreateBranchProtection)
+ m.Group("/{name}", func() {
+ m.Get("", repo.GetBranchProtection)
+ m.Patch("", bind(api.EditBranchProtectionOption{}), mustNotBeArchived, repo.EditBranchProtection)
+ m.Delete("", repo.DeleteBranchProtection)
+ })
+ }, reqToken(), reqAdmin())
+ m.Group("/tags", func() {
+ m.Get("", repo.ListTags)
+ m.Get("/*", repo.GetTag)
+ m.Post("", reqToken(), reqRepoWriter(unit.TypeCode), mustNotBeArchived, bind(api.CreateTagOption{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.CreateTag)
+ m.Delete("/*", reqToken(), reqRepoWriter(unit.TypeCode), mustNotBeArchived, repo.DeleteTag)
+ }, reqRepoReader(unit.TypeCode), context.ReferencesGitRepo(true))
+ m.Group("/tag_protections", func() {
+ m.Combo("").Get(repo.ListTagProtection).
+ Post(bind(api.CreateTagProtectionOption{}), mustNotBeArchived, repo.CreateTagProtection)
+ m.Group("/{id}", func() {
+ m.Combo("").Get(repo.GetTagProtection).
+ Patch(bind(api.EditTagProtectionOption{}), mustNotBeArchived, repo.EditTagProtection).
+ Delete(repo.DeleteTagProtection)
+ })
+ }, reqToken(), reqAdmin())
+ m.Group("/actions", func() {
+ m.Get("/tasks", repo.ListActionTasks)
+
+ m.Group("/workflows", func() {
+ m.Group("/{workflowname}", func() {
+ m.Post("/dispatches", reqToken(), reqRepoWriter(unit.TypeActions), mustNotBeArchived, bind(api.DispatchWorkflowOption{}), repo.DispatchWorkflow)
+ })
+ })
+ }, reqRepoReader(unit.TypeActions), context.ReferencesGitRepo(true))
+ m.Group("/keys", func() {
+ m.Combo("").Get(repo.ListDeployKeys).
+ Post(bind(api.CreateKeyOption{}), repo.CreateDeployKey)
+ m.Combo("/{id}").Get(repo.GetDeployKey).
+ Delete(repo.DeleteDeploykey)
+ }, reqToken(), reqAdmin())
+ m.Group("/times", func() {
+ m.Combo("").Get(repo.ListTrackedTimesByRepository)
+ m.Combo("/{timetrackingusername}").Get(repo.ListTrackedTimesByUser)
+ }, mustEnableIssues, reqToken())
+ m.Group("/wiki", func() {
+ m.Combo("/page/{pageName}").
+ Get(repo.GetWikiPage).
+ Patch(mustNotBeArchived, reqToken(), reqRepoWriter(unit.TypeWiki), bind(api.CreateWikiPageOptions{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeWiki, context.QuotaTargetRepo), repo.EditWikiPage).
+ Delete(mustNotBeArchived, reqToken(), reqRepoWriter(unit.TypeWiki), repo.DeleteWikiPage)
+ m.Get("/revisions/{pageName}", repo.ListPageRevisions)
+ m.Post("/new", reqToken(), mustNotBeArchived, reqRepoWriter(unit.TypeWiki), bind(api.CreateWikiPageOptions{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeWiki, context.QuotaTargetRepo), repo.NewWikiPage)
+ m.Get("/pages", repo.ListWikiPages)
+ }, mustEnableWiki)
+ m.Post("/markup", reqToken(), bind(api.MarkupOption{}), misc.Markup)
+ m.Post("/markdown", reqToken(), bind(api.MarkdownOption{}), misc.Markdown)
+ m.Post("/markdown/raw", reqToken(), misc.MarkdownRaw)
+ if !setting.Repository.DisableStars {
+ m.Get("/stargazers", repo.ListStargazers)
+ }
+ m.Get("/subscribers", repo.ListSubscribers)
+ m.Group("/subscription", func() {
+ m.Get("", user.IsWatching)
+ m.Put("", user.Watch)
+ m.Delete("", user.Unwatch)
+ }, reqToken())
+ m.Group("/releases", func() {
+ m.Combo("").Get(repo.ListReleases).
+ Post(reqToken(), reqRepoWriter(unit.TypeReleases), context.ReferencesGitRepo(), bind(api.CreateReleaseOption{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.CreateRelease)
+ m.Combo("/latest").Get(repo.GetLatestRelease)
+ m.Group("/{id}", func() {
+ m.Combo("").Get(repo.GetRelease).
+ Patch(reqToken(), reqRepoWriter(unit.TypeReleases), context.ReferencesGitRepo(), bind(api.EditReleaseOption{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.EditRelease).
+ Delete(reqToken(), reqRepoWriter(unit.TypeReleases), repo.DeleteRelease)
+ m.Group("/assets", func() {
+ m.Combo("").Get(repo.ListReleaseAttachments).
+ Post(reqToken(), reqRepoWriter(unit.TypeReleases), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeAssetsAttachmentsReleases, context.QuotaTargetRepo), repo.CreateReleaseAttachment)
+ m.Combo("/{attachment_id}").Get(repo.GetReleaseAttachment).
+ Patch(reqToken(), reqRepoWriter(unit.TypeReleases), bind(api.EditAttachmentOptions{}), repo.EditReleaseAttachment).
+ Delete(reqToken(), reqRepoWriter(unit.TypeReleases), repo.DeleteReleaseAttachment)
+ })
+ })
+ m.Group("/tags", func() {
+ m.Combo("/{tag}").
+ Get(repo.GetReleaseByTag).
+ Delete(reqToken(), reqRepoWriter(unit.TypeReleases), repo.DeleteReleaseByTag)
+ })
+ }, reqRepoReader(unit.TypeReleases))
+ m.Post("/mirror-sync", reqToken(), reqRepoWriter(unit.TypeCode), mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeGitAll, context.QuotaTargetRepo), repo.MirrorSync)
+ m.Post("/push_mirrors-sync", reqAdmin(), reqToken(), mustNotBeArchived, repo.PushMirrorSync)
+ m.Group("/push_mirrors", func() {
+ m.Combo("").Get(repo.ListPushMirrors).
+ Post(mustNotBeArchived, bind(api.CreatePushMirrorOption{}), repo.AddPushMirror)
+ m.Combo("/{name}").
+ Delete(mustNotBeArchived, repo.DeletePushMirrorByRemoteName).
+ Get(repo.GetPushMirrorByName)
+ }, reqAdmin(), reqToken())
+
+ m.Get("/editorconfig/{filename}", context.ReferencesGitRepo(), context.RepoRefForAPI, reqRepoReader(unit.TypeCode), repo.GetEditorconfig)
+ m.Group("/pulls", func() {
+ m.Combo("").Get(repo.ListPullRequests).
+ Post(reqToken(), mustNotBeArchived, bind(api.CreatePullRequestOption{}), repo.CreatePullRequest)
+ m.Get("/pinned", repo.ListPinnedPullRequests)
+ m.Group("/{index}", func() {
+ m.Combo("").Get(repo.GetPullRequest).
+ Patch(reqToken(), bind(api.EditPullRequestOption{}), repo.EditPullRequest)
+ m.Get(".{diffType:diff|patch}", repo.DownloadPullDiffOrPatch)
+ m.Post("/update", reqToken(), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeGitAll, context.QuotaTargetRepo), repo.UpdatePullRequest)
+ m.Get("/commits", repo.GetPullRequestCommits)
+ m.Get("/files", repo.GetPullRequestFiles)
+ m.Combo("/merge").Get(repo.IsPullRequestMerged).
+ Post(reqToken(), mustNotBeArchived, bind(forms.MergePullRequestForm{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeGitAll, context.QuotaTargetRepo), repo.MergePullRequest).
+ Delete(reqToken(), mustNotBeArchived, repo.CancelScheduledAutoMerge)
+ m.Group("/reviews", func() {
+ m.Combo("").
+ Get(repo.ListPullReviews).
+ Post(reqToken(), bind(api.CreatePullReviewOptions{}), repo.CreatePullReview)
+ m.Group("/{id}", func() {
+ m.Combo("").
+ Get(repo.GetPullReview).
+ Delete(reqToken(), repo.DeletePullReview).
+ Post(reqToken(), bind(api.SubmitPullReviewOptions{}), repo.SubmitPullReview)
+ m.Group("/comments", func() {
+ m.Combo("").
+ Get(repo.GetPullReviewComments).
+ Post(reqToken(), bind(api.CreatePullReviewCommentOptions{}), repo.CreatePullReviewComment)
+ m.Group("/{comment}", func() {
+ m.Combo("").
+ Get(repo.GetPullReviewComment).
+ Delete(reqToken(), repo.DeletePullReviewComment)
+ }, commentAssignment("comment"))
+ })
+ m.Post("/dismissals", reqToken(), bind(api.DismissPullReviewOptions{}), repo.DismissPullReview)
+ m.Post("/undismissals", reqToken(), repo.UnDismissPullReview)
+ })
+ })
+ m.Combo("/requested_reviewers", reqToken()).
+ Delete(bind(api.PullReviewRequestOptions{}), repo.DeleteReviewRequests).
+ Post(bind(api.PullReviewRequestOptions{}), repo.CreateReviewRequests)
+ })
+ m.Get("/{base}/*", repo.GetPullRequestByBaseHead)
+ }, mustAllowPulls, reqRepoReader(unit.TypeCode), context.ReferencesGitRepo())
+ m.Group("/statuses", func() {
+ m.Combo("/{sha}").Get(repo.GetCommitStatuses).
+ Post(reqToken(), reqRepoWriter(unit.TypeCode), bind(api.CreateStatusOption{}), repo.NewCommitStatus)
+ }, reqRepoReader(unit.TypeCode))
+ m.Group("/commits", func() {
+ m.Get("", context.ReferencesGitRepo(), repo.GetAllCommits)
+ m.Group("/{ref}", func() {
+ m.Get("/status", repo.GetCombinedCommitStatusByRef)
+ m.Get("/statuses", repo.GetCommitStatusesByRef)
+ m.Get("/pull", repo.GetCommitPullRequest)
+ }, context.ReferencesGitRepo())
+ }, reqRepoReader(unit.TypeCode))
+ m.Group("/git", func() {
+ m.Group("/commits", func() {
+ m.Get("/{sha}", repo.GetSingleCommit)
+ m.Get("/{sha}.{diffType:diff|patch}", repo.DownloadCommitDiffOrPatch)
+ })
+ m.Get("/refs", repo.GetGitAllRefs)
+ m.Get("/refs/*", repo.GetGitRefs)
+ m.Get("/trees/{sha}", repo.GetTree)
+ m.Get("/blobs/{sha}", repo.GetBlob)
+ m.Get("/tags/{sha}", repo.GetAnnotatedTag)
+ m.Get("/notes/{sha}", repo.GetNote)
+ }, context.ReferencesGitRepo(true), reqRepoReader(unit.TypeCode))
+ m.Post("/diffpatch", reqRepoWriter(unit.TypeCode), reqToken(), bind(api.ApplyDiffPatchFileOptions{}), mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.ApplyDiffPatch)
+ m.Group("/contents", func() {
+ m.Get("", repo.GetContentsList)
+ m.Post("", reqToken(), bind(api.ChangeFilesOptions{}), reqRepoBranchWriter, mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.ChangeFiles)
+ m.Get("/*", repo.GetContents)
+ m.Group("/*", func() {
+ m.Post("", bind(api.CreateFileOptions{}), reqRepoBranchWriter, mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.CreateFile)
+ m.Put("", bind(api.UpdateFileOptions{}), reqRepoBranchWriter, mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.UpdateFile)
+ m.Delete("", bind(api.DeleteFileOptions{}), reqRepoBranchWriter, mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetRepo), repo.DeleteFile)
+ }, reqToken())
+ }, reqRepoReader(unit.TypeCode))
+ m.Get("/signing-key.gpg", misc.SigningKey)
+ m.Group("/topics", func() {
+ m.Combo("").Get(repo.ListTopics).
+ Put(reqToken(), reqAdmin(), bind(api.RepoTopicOptions{}), repo.UpdateTopics)
+ m.Group("/{topic}", func() {
+ m.Combo("").Put(reqToken(), repo.AddTopic).
+ Delete(reqToken(), repo.DeleteTopic)
+ }, reqAdmin())
+ }, reqAnyRepoReader())
+ m.Get("/issue_templates", context.ReferencesGitRepo(), repo.GetIssueTemplates)
+ m.Get("/issue_config", context.ReferencesGitRepo(), repo.GetIssueConfig)
+ m.Get("/issue_config/validate", context.ReferencesGitRepo(), repo.ValidateIssueConfig)
+ m.Get("/languages", reqRepoReader(unit.TypeCode), repo.GetLanguages)
+ m.Get("/activities/feeds", repo.ListRepoActivityFeeds)
+ m.Get("/new_pin_allowed", repo.AreNewIssuePinsAllowed)
+ m.Group("/avatar", func() {
+ m.Post("", bind(api.UpdateRepoAvatarOption{}), repo.UpdateAvatar)
+ m.Delete("", repo.DeleteAvatar)
+ }, reqAdmin(), reqToken())
+ }, repoAssignment(), checkTokenPublicOnly())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository))
+
+ // Notifications (requires notifications scope)
+ m.Group("/repos", func() {
+ m.Group("/{username}/{reponame}", func() {
+ m.Combo("/notifications", reqToken()).
+ Get(notify.ListRepoNotifications).
+ Put(notify.ReadRepoNotifications)
+ }, repoAssignment(), checkTokenPublicOnly())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryNotification))
+
+ // Issue (requires issue scope)
+ m.Group("/repos", func() {
+ m.Get("/issues/search", repo.SearchIssues)
+
+ m.Group("/{username}/{reponame}", func() {
+ m.Group("/issues", func() {
+ m.Combo("").Get(repo.ListIssues).
+ Post(reqToken(), mustNotBeArchived, bind(api.CreateIssueOption{}), reqRepoReader(unit.TypeIssues), repo.CreateIssue)
+ m.Get("/pinned", reqRepoReader(unit.TypeIssues), repo.ListPinnedIssues)
+ m.Group("/comments", func() {
+ m.Get("", repo.ListRepoIssueComments)
+ m.Group("/{id}", func() {
+ m.Combo("").
+ Get(repo.GetIssueComment).
+ Patch(mustNotBeArchived, reqToken(), bind(api.EditIssueCommentOption{}), repo.EditIssueComment).
+ Delete(reqToken(), repo.DeleteIssueComment)
+ m.Combo("/reactions").
+ Get(repo.GetIssueCommentReactions).
+ Post(reqToken(), bind(api.EditReactionOption{}), repo.PostIssueCommentReaction).
+ Delete(reqToken(), bind(api.EditReactionOption{}), repo.DeleteIssueCommentReaction)
+ m.Group("/assets", func() {
+ m.Combo("").
+ Get(repo.ListIssueCommentAttachments).
+ Post(reqToken(), mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeAssetsAttachmentsIssues, context.QuotaTargetRepo), repo.CreateIssueCommentAttachment)
+ m.Combo("/{attachment_id}").
+ Get(repo.GetIssueCommentAttachment).
+ Patch(reqToken(), mustNotBeArchived, bind(api.EditAttachmentOptions{}), repo.EditIssueCommentAttachment).
+ Delete(reqToken(), mustNotBeArchived, repo.DeleteIssueCommentAttachment)
+ }, mustEnableAttachments)
+ }, commentAssignment(":id"))
+ })
+ m.Group("/{index}", func() {
+ m.Combo("").Get(repo.GetIssue).
+ Patch(reqToken(), bind(api.EditIssueOption{}), repo.EditIssue).
+ Delete(reqToken(), reqAdmin(), context.ReferencesGitRepo(), repo.DeleteIssue)
+ m.Group("/comments", func() {
+ m.Combo("").Get(repo.ListIssueComments).
+ Post(reqToken(), mustNotBeArchived, bind(api.CreateIssueCommentOption{}), repo.CreateIssueComment)
+ m.Combo("/{id}", reqToken()).Patch(bind(api.EditIssueCommentOption{}), repo.EditIssueCommentDeprecated).
+ Delete(repo.DeleteIssueCommentDeprecated)
+ })
+ m.Get("/timeline", repo.ListIssueCommentsAndTimeline)
+ m.Group("/labels", func() {
+ m.Combo("").Get(repo.ListIssueLabels).
+ Post(reqToken(), bind(api.IssueLabelsOption{}), repo.AddIssueLabels).
+ Put(reqToken(), bind(api.IssueLabelsOption{}), repo.ReplaceIssueLabels).
+ Delete(reqToken(), bind(api.DeleteLabelsOption{}), repo.ClearIssueLabels)
+ m.Delete("/{id}", reqToken(), bind(api.DeleteLabelsOption{}), repo.DeleteIssueLabel)
+ })
+ m.Group("/times", func() {
+ m.Combo("").
+ Get(repo.ListTrackedTimes).
+ Post(bind(api.AddTimeOption{}), repo.AddTime).
+ Delete(repo.ResetIssueTime)
+ m.Delete("/{id}", repo.DeleteTime)
+ }, reqToken())
+ m.Combo("/deadline").Post(reqToken(), bind(api.EditDeadlineOption{}), repo.UpdateIssueDeadline)
+ m.Group("/stopwatch", func() {
+ m.Post("/start", repo.StartIssueStopwatch)
+ m.Post("/stop", repo.StopIssueStopwatch)
+ m.Delete("/delete", repo.DeleteIssueStopwatch)
+ }, reqToken())
+ m.Group("/subscriptions", func() {
+ m.Get("", repo.GetIssueSubscribers)
+ m.Get("/check", reqToken(), repo.CheckIssueSubscription)
+ m.Put("/{user}", reqToken(), repo.AddIssueSubscription)
+ m.Delete("/{user}", reqToken(), repo.DelIssueSubscription)
+ })
+ m.Combo("/reactions").
+ Get(repo.GetIssueReactions).
+ Post(reqToken(), bind(api.EditReactionOption{}), repo.PostIssueReaction).
+ Delete(reqToken(), bind(api.EditReactionOption{}), repo.DeleteIssueReaction)
+ m.Group("/assets", func() {
+ m.Combo("").
+ Get(repo.ListIssueAttachments).
+ Post(reqToken(), mustNotBeArchived, context.EnforceQuotaAPI(quota_model.LimitSubjectSizeAssetsAttachmentsIssues, context.QuotaTargetRepo), repo.CreateIssueAttachment)
+ m.Combo("/{attachment_id}").
+ Get(repo.GetIssueAttachment).
+ Patch(reqToken(), mustNotBeArchived, bind(api.EditAttachmentOptions{}), repo.EditIssueAttachment).
+ Delete(reqToken(), mustNotBeArchived, repo.DeleteIssueAttachment)
+ }, mustEnableAttachments)
+ m.Combo("/dependencies").
+ Get(repo.GetIssueDependencies).
+ Post(reqToken(), mustNotBeArchived, bind(api.IssueMeta{}), repo.CreateIssueDependency).
+ Delete(reqToken(), mustNotBeArchived, bind(api.IssueMeta{}), repo.RemoveIssueDependency)
+ m.Combo("/blocks").
+ Get(repo.GetIssueBlocks).
+ Post(reqToken(), bind(api.IssueMeta{}), repo.CreateIssueBlocking).
+ Delete(reqToken(), bind(api.IssueMeta{}), repo.RemoveIssueBlocking)
+ m.Group("/pin", func() {
+ m.Combo("").
+ Post(reqToken(), reqAdmin(), repo.PinIssue).
+ Delete(reqToken(), reqAdmin(), repo.UnpinIssue)
+ m.Patch("/{position}", reqToken(), reqAdmin(), repo.MoveIssuePin)
+ })
+ })
+ }, mustEnableIssuesOrPulls)
+ m.Group("/labels", func() {
+ m.Combo("").Get(repo.ListLabels).
+ Post(reqToken(), reqRepoWriter(unit.TypeIssues, unit.TypePullRequests), bind(api.CreateLabelOption{}), repo.CreateLabel)
+ m.Combo("/{id}").Get(repo.GetLabel).
+ Patch(reqToken(), reqRepoWriter(unit.TypeIssues, unit.TypePullRequests), bind(api.EditLabelOption{}), repo.EditLabel).
+ Delete(reqToken(), reqRepoWriter(unit.TypeIssues, unit.TypePullRequests), repo.DeleteLabel)
+ })
+ m.Group("/milestones", func() {
+ m.Combo("").Get(repo.ListMilestones).
+ Post(reqToken(), reqRepoWriter(unit.TypeIssues, unit.TypePullRequests), bind(api.CreateMilestoneOption{}), repo.CreateMilestone)
+ m.Combo("/{id}").Get(repo.GetMilestone).
+ Patch(reqToken(), reqRepoWriter(unit.TypeIssues, unit.TypePullRequests), bind(api.EditMilestoneOption{}), repo.EditMilestone).
+ Delete(reqToken(), reqRepoWriter(unit.TypeIssues, unit.TypePullRequests), repo.DeleteMilestone)
+ })
+ }, repoAssignment(), checkTokenPublicOnly())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryIssue))
+
+ // NOTE: these are Gitea package management API - see packages.CommonRoutes and packages.DockerContainerRoutes for endpoints that implement package manager APIs
+ m.Group("/packages/{username}", func() {
+ m.Group("/{type}/{name}/{version}", func() {
+ m.Get("", reqToken(), packages.GetPackage)
+ m.Delete("", reqToken(), reqPackageAccess(perm.AccessModeWrite), packages.DeletePackage)
+ m.Get("/files", reqToken(), packages.ListPackageFiles)
+ })
+ m.Get("/", reqToken(), packages.ListPackages)
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryPackage), context.UserAssignmentAPI(), context.PackageAssignmentAPI(), reqPackageAccess(perm.AccessModeRead), checkTokenPublicOnly())
+
+ // Organizations
+ m.Get("/user/orgs", reqToken(), tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser, auth_model.AccessTokenScopeCategoryOrganization), org.ListMyOrgs)
+ m.Group("/users/{username}/orgs", func() {
+ m.Get("", reqToken(), org.ListUserOrgs)
+ m.Get("/{org}/permissions", reqToken(), org.GetUserOrgsPermissions)
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser, auth_model.AccessTokenScopeCategoryOrganization), context.UserAssignmentAPI(), checkTokenPublicOnly())
+ m.Post("/orgs", tokenRequiresScopes(auth_model.AccessTokenScopeCategoryOrganization), reqToken(), bind(api.CreateOrgOption{}), org.Create)
+ m.Get("/orgs", org.GetAll, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryOrganization))
+ m.Group("/orgs/{org}", func() {
+ m.Combo("").Get(org.Get).
+ Patch(reqToken(), reqOrgOwnership(), bind(api.EditOrgOption{}), org.Edit).
+ Delete(reqToken(), reqOrgOwnership(), org.Delete)
+ m.Combo("/repos").Get(user.ListOrgRepos).
+ Post(reqToken(), bind(api.CreateRepoOption{}), context.EnforceQuotaAPI(quota_model.LimitSubjectSizeReposAll, context.QuotaTargetOrg), repo.CreateOrgRepo)
+ m.Group("/members", func() {
+ m.Get("", reqToken(), org.ListMembers)
+ m.Combo("/{username}").Get(reqToken(), org.IsMember).
+ Delete(reqToken(), reqOrgOwnership(), org.DeleteMember)
+ })
+ addActionsRoutes(
+ m,
+ reqOrgOwnership(),
+ org.NewAction(),
+ )
+ m.Group("/public_members", func() {
+ m.Get("", org.ListPublicMembers)
+ m.Combo("/{username}").Get(org.IsPublicMember).
+ Put(reqToken(), reqOrgMembership(), org.PublicizeMember).
+ Delete(reqToken(), reqOrgMembership(), org.ConcealMember)
+ })
+ m.Group("/teams", func() {
+ m.Get("", org.ListTeams)
+ m.Post("", reqOrgOwnership(), bind(api.CreateTeamOption{}), org.CreateTeam)
+ m.Get("/search", org.SearchTeam)
+ }, reqToken(), reqOrgMembership())
+ m.Group("/labels", func() {
+ m.Get("", org.ListLabels)
+ m.Post("", reqToken(), reqOrgOwnership(), bind(api.CreateLabelOption{}), org.CreateLabel)
+ m.Combo("/{id}").Get(reqToken(), org.GetLabel).
+ Patch(reqToken(), reqOrgOwnership(), bind(api.EditLabelOption{}), org.EditLabel).
+ Delete(reqToken(), reqOrgOwnership(), org.DeleteLabel)
+ })
+ m.Group("/hooks", func() {
+ m.Combo("").Get(org.ListHooks).
+ Post(bind(api.CreateHookOption{}), org.CreateHook)
+ m.Combo("/{id}").Get(org.GetHook).
+ Patch(bind(api.EditHookOption{}), org.EditHook).
+ Delete(org.DeleteHook)
+ }, reqToken(), reqOrgOwnership(), reqWebhooksEnabled())
+ m.Group("/avatar", func() {
+ m.Post("", bind(api.UpdateUserAvatarOption{}), org.UpdateAvatar)
+ m.Delete("", org.DeleteAvatar)
+ }, reqToken(), reqOrgOwnership())
+ m.Get("/activities/feeds", org.ListOrgActivityFeeds)
+
+ if setting.Quota.Enabled {
+ m.Group("/quota", func() {
+ m.Get("", org.GetQuota)
+ m.Get("/check", org.CheckQuota)
+ m.Get("/attachments", org.ListQuotaAttachments)
+ m.Get("/packages", org.ListQuotaPackages)
+ m.Get("/artifacts", org.ListQuotaArtifacts)
+ }, reqToken(), reqOrgOwnership())
+ }
+
+ m.Group("", func() {
+ m.Get("/list_blocked", org.ListBlockedUsers)
+ m.Group("", func() {
+ m.Put("/block/{username}", org.BlockUser)
+ m.Put("/unblock/{username}", org.UnblockUser)
+ }, context.UserAssignmentAPI())
+ }, reqToken(), reqOrgOwnership())
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryOrganization), orgAssignment(true), checkTokenPublicOnly())
+ m.Group("/teams/{teamid}", func() {
+ m.Combo("").Get(reqToken(), org.GetTeam).
+ Patch(reqToken(), reqOrgOwnership(), bind(api.EditTeamOption{}), org.EditTeam).
+ Delete(reqToken(), reqOrgOwnership(), org.DeleteTeam)
+ m.Group("/members", func() {
+ m.Get("", reqToken(), org.GetTeamMembers)
+ m.Combo("/{username}").
+ Get(reqToken(), org.GetTeamMember).
+ Put(reqToken(), reqOrgOwnership(), org.AddTeamMember).
+ Delete(reqToken(), reqOrgOwnership(), org.RemoveTeamMember)
+ })
+ m.Group("/repos", func() {
+ m.Get("", reqToken(), org.GetTeamRepos)
+ m.Combo("/{org}/{reponame}").
+ Put(reqToken(), org.AddTeamRepository).
+ Delete(reqToken(), org.RemoveTeamRepository).
+ Get(reqToken(), org.GetTeamRepo)
+ })
+ m.Get("/activities/feeds", org.ListTeamActivityFeeds)
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryOrganization), orgAssignment(false, true), reqToken(), reqTeamMembership(), checkTokenPublicOnly())
+
+ m.Group("/admin", func() {
+ m.Group("/cron", func() {
+ m.Get("", admin.ListCronTasks)
+ m.Post("/{task}", admin.PostCronTask)
+ })
+ m.Get("/orgs", admin.GetAllOrgs)
+ m.Group("/users", func() {
+ m.Get("", admin.SearchUsers)
+ m.Post("", bind(api.CreateUserOption{}), admin.CreateUser)
+ m.Group("/{username}", func() {
+ m.Combo("").Patch(bind(api.EditUserOption{}), admin.EditUser).
+ Delete(admin.DeleteUser)
+ m.Group("/keys", func() {
+ m.Post("", bind(api.CreateKeyOption{}), admin.CreatePublicKey)
+ m.Delete("/{id}", admin.DeleteUserPublicKey)
+ })
+ m.Get("/orgs", org.ListUserOrgs)
+ m.Post("/orgs", bind(api.CreateOrgOption{}), admin.CreateOrg)
+ m.Post("/repos", bind(api.CreateRepoOption{}), admin.CreateRepo)
+ m.Post("/rename", bind(api.RenameUserOption{}), admin.RenameUser)
+ if setting.Quota.Enabled {
+ m.Group("/quota", func() {
+ m.Get("", admin.GetUserQuota)
+ m.Post("/groups", bind(api.SetUserQuotaGroupsOptions{}), admin.SetUserQuotaGroups)
+ })
+ }
+ }, context.UserAssignmentAPI())
+ })
+ m.Group("/emails", func() {
+ m.Get("", admin.GetAllEmails)
+ m.Get("/search", admin.SearchEmail)
+ })
+ m.Group("/unadopted", func() {
+ m.Get("", admin.ListUnadoptedRepositories)
+ m.Post("/{username}/{reponame}", admin.AdoptRepository)
+ m.Delete("/{username}/{reponame}", admin.DeleteUnadoptedRepository)
+ })
+ m.Group("/hooks", func() {
+ m.Combo("").Get(admin.ListHooks).
+ Post(bind(api.CreateHookOption{}), admin.CreateHook)
+ m.Combo("/{id}").Get(admin.GetHook).
+ Patch(bind(api.EditHookOption{}), admin.EditHook).
+ Delete(admin.DeleteHook)
+ })
+ m.Group("/runners", func() {
+ m.Get("/registration-token", admin.GetRegistrationToken)
+ })
+ if setting.Quota.Enabled {
+ m.Group("/quota", func() {
+ m.Group("/rules", func() {
+ m.Combo("").Get(admin.ListQuotaRules).
+ Post(bind(api.CreateQuotaRuleOptions{}), admin.CreateQuotaRule)
+ m.Combo("/{quotarule}", context.QuotaRuleAssignmentAPI()).
+ Get(admin.GetQuotaRule).
+ Patch(bind(api.EditQuotaRuleOptions{}), admin.EditQuotaRule).
+ Delete(admin.DeleteQuotaRule)
+ })
+ m.Group("/groups", func() {
+ m.Combo("").Get(admin.ListQuotaGroups).
+ Post(bind(api.CreateQuotaGroupOptions{}), admin.CreateQuotaGroup)
+ m.Group("/{quotagroup}", func() {
+ m.Combo("").Get(admin.GetQuotaGroup).
+ Delete(admin.DeleteQuotaGroup)
+ m.Group("/rules", func() {
+ m.Combo("/{quotarule}", context.QuotaRuleAssignmentAPI()).
+ Put(admin.AddRuleToQuotaGroup).
+ Delete(admin.RemoveRuleFromQuotaGroup)
+ })
+ m.Group("/users", func() {
+ m.Get("", admin.ListUsersInQuotaGroup)
+ m.Combo("/{username}", context.UserAssignmentAPI()).
+ Put(admin.AddUserToQuotaGroup).
+ Delete(admin.RemoveUserFromQuotaGroup)
+ })
+ }, context.QuotaGroupAssignmentAPI())
+ })
+ })
+ }
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryAdmin), reqToken(), reqSiteAdmin())
+
+ m.Group("/topics", func() {
+ m.Get("/search", repo.TopicSearch)
+ }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryRepository))
+ }, sudo())
+
+ return m
+}
diff --git a/routers/api/v1/misc/gitignore.go b/routers/api/v1/misc/gitignore.go
new file mode 100644
index 0000000..dffd771
--- /dev/null
+++ b/routers/api/v1/misc/gitignore.go
@@ -0,0 +1,56 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/options"
+ repo_module "code.gitea.io/gitea/modules/repository"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/services/context"
+)
+
+// Shows a list of all Gitignore templates
+func ListGitignoresTemplates(ctx *context.APIContext) {
+ // swagger:operation GET /gitignore/templates miscellaneous listGitignoresTemplates
+ // ---
+ // summary: Returns a list of all gitignore templates
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GitignoreTemplateList"
+ ctx.JSON(http.StatusOK, repo_module.Gitignores)
+}
+
+// SHows information about a gitignore template
+func GetGitignoreTemplateInfo(ctx *context.APIContext) {
+ // swagger:operation GET /gitignore/templates/{name} miscellaneous getGitignoreTemplateInfo
+ // ---
+ // summary: Returns information about a gitignore template
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: name
+ // in: path
+ // description: name of the template
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GitignoreTemplateInfo"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ name := util.PathJoinRelX(ctx.Params("name"))
+
+ text, err := options.Gitignore(name)
+ if err != nil {
+ ctx.NotFound()
+ return
+ }
+
+ ctx.JSON(http.StatusOK, &structs.GitignoreTemplateInfo{Name: name, Source: string(text)})
+}
diff --git a/routers/api/v1/misc/label_templates.go b/routers/api/v1/misc/label_templates.go
new file mode 100644
index 0000000..cc11f37
--- /dev/null
+++ b/routers/api/v1/misc/label_templates.go
@@ -0,0 +1,60 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ "net/http"
+
+ repo_module "code.gitea.io/gitea/modules/repository"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// Shows a list of all Label templates
+func ListLabelTemplates(ctx *context.APIContext) {
+ // swagger:operation GET /label/templates miscellaneous listLabelTemplates
+ // ---
+ // summary: Returns a list of all label templates
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LabelTemplateList"
+ result := make([]string, len(repo_module.LabelTemplateFiles))
+ for i := range repo_module.LabelTemplateFiles {
+ result[i] = repo_module.LabelTemplateFiles[i].DisplayName
+ }
+
+ ctx.JSON(http.StatusOK, result)
+}
+
+// Shows all labels in a template
+func GetLabelTemplate(ctx *context.APIContext) {
+ // swagger:operation GET /label/templates/{name} miscellaneous getLabelTemplateInfo
+ // ---
+ // summary: Returns all labels in a template
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: name
+ // in: path
+ // description: name of the template
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LabelTemplateInfo"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ name := util.PathJoinRelX(ctx.Params("name"))
+
+ labels, err := repo_module.LoadTemplateLabelsByDisplayName(name)
+ if err != nil {
+ ctx.NotFound()
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabelTemplateList(labels))
+}
diff --git a/routers/api/v1/misc/licenses.go b/routers/api/v1/misc/licenses.go
new file mode 100644
index 0000000..2a980f5
--- /dev/null
+++ b/routers/api/v1/misc/licenses.go
@@ -0,0 +1,76 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "code.gitea.io/gitea/modules/options"
+ repo_module "code.gitea.io/gitea/modules/repository"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/services/context"
+)
+
+// Returns a list of all License templates
+func ListLicenseTemplates(ctx *context.APIContext) {
+ // swagger:operation GET /licenses miscellaneous listLicenseTemplates
+ // ---
+ // summary: Returns a list of all license templates
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LicenseTemplateList"
+ response := make([]api.LicensesTemplateListEntry, len(repo_module.Licenses))
+ for i, license := range repo_module.Licenses {
+ response[i] = api.LicensesTemplateListEntry{
+ Key: license,
+ Name: license,
+ URL: fmt.Sprintf("%sapi/v1/licenses/%s", setting.AppURL, url.PathEscape(license)),
+ }
+ }
+ ctx.JSON(http.StatusOK, response)
+}
+
+// Returns information about a gitignore template
+func GetLicenseTemplateInfo(ctx *context.APIContext) {
+ // swagger:operation GET /licenses/{name} miscellaneous getLicenseTemplateInfo
+ // ---
+ // summary: Returns information about a license template
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: name
+ // in: path
+ // description: name of the license
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LicenseTemplateInfo"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ name := util.PathJoinRelX(ctx.Params("name"))
+
+ text, err := options.License(name)
+ if err != nil {
+ ctx.NotFound()
+ return
+ }
+
+ response := api.LicenseTemplateInfo{
+ Key: name,
+ Name: name,
+ URL: fmt.Sprintf("%sapi/v1/licenses/%s", setting.AppURL, url.PathEscape(name)),
+ Body: string(text),
+ // This is for combatibilty with the GitHub API. This Text is for some reason added to each License response.
+ Implementation: "Create a text file (typically named LICENSE or LICENSE.txt) in the root of your source code and copy the text of the license into the file",
+ }
+
+ ctx.JSON(http.StatusOK, response)
+}
diff --git a/routers/api/v1/misc/markup.go b/routers/api/v1/misc/markup.go
new file mode 100644
index 0000000..9699c79
--- /dev/null
+++ b/routers/api/v1/misc/markup.go
@@ -0,0 +1,110 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/markdown"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/common"
+ "code.gitea.io/gitea/services/context"
+)
+
+// Markup render markup document to HTML
+func Markup(ctx *context.APIContext) {
+ // swagger:operation POST /markup miscellaneous renderMarkup
+ // ---
+ // summary: Render a markup document as HTML
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/MarkupOption"
+ // consumes:
+ // - application/json
+ // produces:
+ // - text/html
+ // responses:
+ // "200":
+ // "$ref": "#/responses/MarkupRender"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.MarkupOption)
+
+ if ctx.HasAPIError() {
+ ctx.Error(http.StatusUnprocessableEntity, "", ctx.GetErrMsg())
+ return
+ }
+
+ common.RenderMarkup(ctx.Base, ctx.Repo, form.Mode, form.Text, form.Context, form.FilePath, form.Wiki)
+}
+
+// Markdown render markdown document to HTML
+func Markdown(ctx *context.APIContext) {
+ // swagger:operation POST /markdown miscellaneous renderMarkdown
+ // ---
+ // summary: Render a markdown document as HTML
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/MarkdownOption"
+ // consumes:
+ // - application/json
+ // produces:
+ // - text/html
+ // responses:
+ // "200":
+ // "$ref": "#/responses/MarkdownRender"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.MarkdownOption)
+
+ if ctx.HasAPIError() {
+ ctx.Error(http.StatusUnprocessableEntity, "", ctx.GetErrMsg())
+ return
+ }
+
+ mode := "markdown"
+ if form.Mode == "comment" || form.Mode == "gfm" {
+ mode = form.Mode
+ }
+
+ common.RenderMarkup(ctx.Base, ctx.Repo, mode, form.Text, form.Context, "", form.Wiki)
+}
+
+// MarkdownRaw render raw markdown HTML
+func MarkdownRaw(ctx *context.APIContext) {
+ // swagger:operation POST /markdown/raw miscellaneous renderMarkdownRaw
+ // ---
+ // summary: Render raw markdown as HTML
+ // parameters:
+ // - name: body
+ // in: body
+ // description: Request body to render
+ // required: true
+ // schema:
+ // type: string
+ // consumes:
+ // - text/plain
+ // produces:
+ // - text/html
+ // responses:
+ // "200":
+ // "$ref": "#/responses/MarkdownRender"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ defer ctx.Req.Body.Close()
+ if err := markdown.RenderRaw(&markup.RenderContext{
+ Ctx: ctx,
+ }, ctx.Req.Body, ctx.Resp); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+}
diff --git a/routers/api/v1/misc/markup_test.go b/routers/api/v1/misc/markup_test.go
new file mode 100644
index 0000000..5236fd0
--- /dev/null
+++ b/routers/api/v1/misc/markup_test.go
@@ -0,0 +1,184 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ go_context "context"
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/contexttest"
+
+ "github.com/stretchr/testify/assert"
+)
+
+const (
+ AppURL = "http://localhost:3000/"
+ Repo = "gogits/gogs"
+ FullURL = AppURL + Repo + "/"
+)
+
+func testRenderMarkup(t *testing.T, mode, filePath, text, responseBody string, responseCode int) {
+ setting.AppURL = AppURL
+ options := api.MarkupOption{
+ Mode: mode,
+ Text: text,
+ Context: Repo,
+ Wiki: true,
+ FilePath: filePath,
+ }
+ ctx, resp := contexttest.MockAPIContext(t, "POST /api/v1/markup")
+ web.SetForm(ctx, &options)
+ Markup(ctx)
+ assert.Equal(t, responseBody, resp.Body.String())
+ assert.Equal(t, responseCode, resp.Code)
+ resp.Body.Reset()
+}
+
+func testRenderMarkdown(t *testing.T, mode, text, responseBody string, responseCode int) {
+ setting.AppURL = AppURL
+ options := api.MarkdownOption{
+ Mode: mode,
+ Text: text,
+ Context: Repo,
+ Wiki: true,
+ }
+ ctx, resp := contexttest.MockAPIContext(t, "POST /api/v1/markdown")
+ web.SetForm(ctx, &options)
+ Markdown(ctx)
+ assert.Equal(t, responseBody, resp.Body.String())
+ assert.Equal(t, responseCode, resp.Code)
+ resp.Body.Reset()
+}
+
+func TestAPI_RenderGFM(t *testing.T) {
+ markup.Init(&markup.ProcessorHelper{
+ IsUsernameMentionable: func(ctx go_context.Context, username string) bool {
+ return username == "r-lyeh"
+ },
+ })
+
+ testCasesCommon := []string{
+ // dear imgui wiki markdown extract: special wiki syntax
+ `Wiki! Enjoy :)
+- [[Links, Language bindings, Engine bindings|Links]]
+- [[Tips]]
+- Bezier widget (by @r-lyeh) https://github.com/ocornut/imgui/issues/786`,
+ // rendered
+ `<p>Wiki! Enjoy :)</p>
+<ul>
+<li><a href="` + FullURL + `wiki/Links" rel="nofollow">Links, Language bindings, Engine bindings</a></li>
+<li><a href="` + FullURL + `wiki/Tips" rel="nofollow">Tips</a></li>
+<li>Bezier widget (by <a href="` + AppURL + `r-lyeh" rel="nofollow">@r-lyeh</a>) <a href="https://github.com/ocornut/imgui/issues/786" rel="nofollow">https://github.com/ocornut/imgui/issues/786</a></li>
+</ul>
+`,
+ // Guard wiki sidebar: special syntax
+ `[[Guardfile-DSL / Configuring-Guard|Guardfile-DSL---Configuring-Guard]]`,
+ // rendered
+ `<p><a href="` + FullURL + `wiki/Guardfile-DSL---Configuring-Guard" rel="nofollow">Guardfile-DSL / Configuring-Guard</a></p>
+`,
+ // special syntax
+ `[[Name|Link]]`,
+ // rendered
+ `<p><a href="` + FullURL + `wiki/Link" rel="nofollow">Name</a></p>
+`,
+ // empty
+ ``,
+ // rendered
+ ``,
+ }
+
+ testCasesDocument := []string{
+ // wine-staging wiki home extract: special wiki syntax, images
+ `## What is Wine Staging?
+**Wine Staging** on website [wine-staging.com](http://wine-staging.com).
+
+## Quick Links
+Here are some links to the most important topics. You can find the full list of pages at the sidebar.
+
+[[Configuration]]
+[[images/icon-bug.png]]
+`,
+ // rendered
+ `<h2 id="user-content-what-is-wine-staging">What is Wine Staging?</h2>
+<p><strong>Wine Staging</strong> on website <a href="http://wine-staging.com" rel="nofollow">wine-staging.com</a>.</p>
+<h2 id="user-content-quick-links">Quick Links</h2>
+<p>Here are some links to the most important topics. You can find the full list of pages at the sidebar.</p>
+<p><a href="` + FullURL + `wiki/Configuration" rel="nofollow">Configuration</a>
+<a href="` + FullURL + `wiki/raw/images/icon-bug.png" rel="nofollow"><img src="` + FullURL + `wiki/raw/images/icon-bug.png" title="icon-bug.png" alt="images/icon-bug.png"/></a></p>
+`,
+ }
+
+ for i := 0; i < len(testCasesCommon); i += 2 {
+ text := testCasesCommon[i]
+ response := testCasesCommon[i+1]
+ testRenderMarkdown(t, "gfm", text, response, http.StatusOK)
+ testRenderMarkup(t, "gfm", "", text, response, http.StatusOK)
+ testRenderMarkdown(t, "comment", text, response, http.StatusOK)
+ testRenderMarkup(t, "comment", "", text, response, http.StatusOK)
+ testRenderMarkup(t, "file", "path/test.md", text, response, http.StatusOK)
+ }
+
+ for i := 0; i < len(testCasesDocument); i += 2 {
+ text := testCasesDocument[i]
+ response := testCasesDocument[i+1]
+ testRenderMarkdown(t, "gfm", text, response, http.StatusOK)
+ testRenderMarkup(t, "gfm", "", text, response, http.StatusOK)
+ testRenderMarkup(t, "file", "path/test.md", text, response, http.StatusOK)
+ }
+
+ testRenderMarkup(t, "file", "path/test.unknown", "## Test", "Unsupported render extension: .unknown\n", http.StatusUnprocessableEntity)
+ testRenderMarkup(t, "unknown", "", "## Test", "Unknown mode: unknown\n", http.StatusUnprocessableEntity)
+}
+
+var simpleCases = []string{
+ // Guard wiki sidebar: special syntax
+ `[[Guardfile-DSL / Configuring-Guard|Guardfile-DSL---Configuring-Guard]]`,
+ // rendered
+ `<p>[[Guardfile-DSL / Configuring-Guard|Guardfile-DSL---Configuring-Guard]]</p>
+`,
+ // special syntax
+ `[[Name|Link]]`,
+ // rendered
+ `<p>[[Name|Link]]</p>
+`,
+ // empty
+ ``,
+ // rendered
+ ``,
+}
+
+func TestAPI_RenderSimple(t *testing.T) {
+ setting.AppURL = AppURL
+ options := api.MarkdownOption{
+ Mode: "markdown",
+ Text: "",
+ Context: Repo,
+ }
+ ctx, resp := contexttest.MockAPIContext(t, "POST /api/v1/markdown")
+ for i := 0; i < len(simpleCases); i += 2 {
+ options.Text = simpleCases[i]
+ web.SetForm(ctx, &options)
+ Markdown(ctx)
+ assert.Equal(t, simpleCases[i+1], resp.Body.String())
+ resp.Body.Reset()
+ }
+}
+
+func TestAPI_RenderRaw(t *testing.T) {
+ setting.AppURL = AppURL
+ ctx, resp := contexttest.MockAPIContext(t, "POST /api/v1/markdown")
+ for i := 0; i < len(simpleCases); i += 2 {
+ ctx.Req.Body = io.NopCloser(strings.NewReader(simpleCases[i]))
+ MarkdownRaw(ctx)
+ assert.Equal(t, simpleCases[i+1], resp.Body.String())
+ resp.Body.Reset()
+ }
+}
diff --git a/routers/api/v1/misc/nodeinfo.go b/routers/api/v1/misc/nodeinfo.go
new file mode 100644
index 0000000..9c2a0db
--- /dev/null
+++ b/routers/api/v1/misc/nodeinfo.go
@@ -0,0 +1,80 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ "net/http"
+ "time"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+)
+
+const cacheKeyNodeInfoUsage = "API_NodeInfoUsage"
+
+// NodeInfo returns the NodeInfo for the Gitea instance to allow for federation
+func NodeInfo(ctx *context.APIContext) {
+ // swagger:operation GET /nodeinfo miscellaneous getNodeInfo
+ // ---
+ // summary: Returns the nodeinfo of the Gitea application
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/NodeInfo"
+
+ nodeInfoUsage := structs.NodeInfoUsage{}
+ if setting.Federation.ShareUserStatistics {
+ var cached bool
+ nodeInfoUsage, cached = ctx.Cache.Get(cacheKeyNodeInfoUsage).(structs.NodeInfoUsage)
+
+ if !cached {
+ usersTotal := int(user_model.CountUsers(ctx, nil))
+ now := time.Now()
+ timeOneMonthAgo := now.AddDate(0, -1, 0).Unix()
+ timeHaveYearAgo := now.AddDate(0, -6, 0).Unix()
+ usersActiveMonth := int(user_model.CountUsers(ctx, &user_model.CountUserFilter{LastLoginSince: &timeOneMonthAgo}))
+ usersActiveHalfyear := int(user_model.CountUsers(ctx, &user_model.CountUserFilter{LastLoginSince: &timeHaveYearAgo}))
+
+ allIssues, _ := issues_model.CountIssues(ctx, &issues_model.IssuesOptions{})
+ allComments, _ := issues_model.CountComments(ctx, &issues_model.FindCommentsOptions{})
+
+ nodeInfoUsage = structs.NodeInfoUsage{
+ Users: structs.NodeInfoUsageUsers{
+ Total: usersTotal,
+ ActiveMonth: usersActiveMonth,
+ ActiveHalfyear: usersActiveHalfyear,
+ },
+ LocalPosts: int(allIssues),
+ LocalComments: int(allComments),
+ }
+
+ if err := ctx.Cache.Put(cacheKeyNodeInfoUsage, nodeInfoUsage, 180); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ }
+ }
+
+ nodeInfo := &structs.NodeInfo{
+ Version: "2.1",
+ Software: structs.NodeInfoSoftware{
+ Name: "forgejo",
+ Version: setting.AppVer,
+ Repository: "https://codeberg.org/forgejo/forgejo.git",
+ Homepage: "https://forgejo.org/",
+ },
+ Protocols: []string{"activitypub"},
+ Services: structs.NodeInfoServices{
+ Inbound: []string{},
+ Outbound: []string{"rss2.0"},
+ },
+ OpenRegistrations: setting.Service.ShowRegistrationButton,
+ Usage: nodeInfoUsage,
+ }
+ ctx.JSON(http.StatusOK, nodeInfo)
+}
diff --git a/routers/api/v1/misc/signing.go b/routers/api/v1/misc/signing.go
new file mode 100644
index 0000000..24a46c1
--- /dev/null
+++ b/routers/api/v1/misc/signing.go
@@ -0,0 +1,63 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ "fmt"
+ "net/http"
+
+ asymkey_service "code.gitea.io/gitea/services/asymkey"
+ "code.gitea.io/gitea/services/context"
+)
+
+// SigningKey returns the public key of the default signing key if it exists
+func SigningKey(ctx *context.APIContext) {
+ // swagger:operation GET /signing-key.gpg miscellaneous getSigningKey
+ // ---
+ // summary: Get default signing-key.gpg
+ // produces:
+ // - text/plain
+ // responses:
+ // "200":
+ // description: "GPG armored public key"
+ // schema:
+ // type: string
+
+ // swagger:operation GET /repos/{owner}/{repo}/signing-key.gpg repository repoSigningKey
+ // ---
+ // summary: Get signing-key.gpg for given repository
+ // produces:
+ // - text/plain
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // description: "GPG armored public key"
+ // schema:
+ // type: string
+
+ path := ""
+ if ctx.Repo != nil && ctx.Repo.Repository != nil {
+ path = ctx.Repo.Repository.RepoPath()
+ }
+
+ content, err := asymkey_service.PublicSigningKey(ctx, path)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "gpg export", err)
+ return
+ }
+ _, err = ctx.Write([]byte(content))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "gpg export", fmt.Errorf("Error writing key content %w", err))
+ }
+}
diff --git a/routers/api/v1/misc/version.go b/routers/api/v1/misc/version.go
new file mode 100644
index 0000000..e3b43a0
--- /dev/null
+++ b/routers/api/v1/misc/version.go
@@ -0,0 +1,25 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package misc
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+)
+
+// Version shows the version of the Gitea server
+func Version(ctx *context.APIContext) {
+ // swagger:operation GET /version miscellaneous getVersion
+ // ---
+ // summary: Returns the version of the Gitea application
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ServerVersion"
+ ctx.JSON(http.StatusOK, &structs.ServerVersion{Version: setting.AppVer})
+}
diff --git a/routers/api/v1/notify/notifications.go b/routers/api/v1/notify/notifications.go
new file mode 100644
index 0000000..46b3c7f
--- /dev/null
+++ b/routers/api/v1/notify/notifications.go
@@ -0,0 +1,77 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package notify
+
+import (
+ "net/http"
+ "strings"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+)
+
+// NewAvailable check if unread notifications exist
+func NewAvailable(ctx *context.APIContext) {
+ // swagger:operation GET /notifications/new notification notifyNewAvailable
+ // ---
+ // summary: Check if unread notifications exist
+ // responses:
+ // "200":
+ // "$ref": "#/responses/NotificationCount"
+
+ total, err := db.Count[activities_model.Notification](ctx, activities_model.FindNotificationOptions{
+ UserID: ctx.Doer.ID,
+ Status: []activities_model.NotificationStatus{activities_model.NotificationStatusUnread},
+ })
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "db.Count[activities_model.Notification]", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, api.NotificationCount{New: total})
+}
+
+func getFindNotificationOptions(ctx *context.APIContext) *activities_model.FindNotificationOptions {
+ before, since, err := context.GetQueryBeforeSince(ctx.Base)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return nil
+ }
+ opts := &activities_model.FindNotificationOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ UserID: ctx.Doer.ID,
+ UpdatedBeforeUnix: before,
+ UpdatedAfterUnix: since,
+ }
+ if !ctx.FormBool("all") {
+ statuses := ctx.FormStrings("status-types")
+ opts.Status = statusStringsToNotificationStatuses(statuses, []string{"unread", "pinned"})
+ }
+
+ subjectTypes := ctx.FormStrings("subject-type")
+ if len(subjectTypes) != 0 {
+ opts.Source = subjectToSource(subjectTypes)
+ }
+
+ return opts
+}
+
+func subjectToSource(value []string) (result []activities_model.NotificationSource) {
+ for _, v := range value {
+ switch strings.ToLower(v) {
+ case "issue":
+ result = append(result, activities_model.NotificationSourceIssue)
+ case "pull":
+ result = append(result, activities_model.NotificationSourcePullRequest)
+ case "commit":
+ result = append(result, activities_model.NotificationSourceCommit)
+ case "repository":
+ result = append(result, activities_model.NotificationSourceRepository)
+ }
+ }
+ return result
+}
diff --git a/routers/api/v1/notify/repo.go b/routers/api/v1/notify/repo.go
new file mode 100644
index 0000000..1744426
--- /dev/null
+++ b/routers/api/v1/notify/repo.go
@@ -0,0 +1,227 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package notify
+
+import (
+ "net/http"
+ "strings"
+ "time"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+func statusStringToNotificationStatus(status string) activities_model.NotificationStatus {
+ switch strings.ToLower(strings.TrimSpace(status)) {
+ case "unread":
+ return activities_model.NotificationStatusUnread
+ case "read":
+ return activities_model.NotificationStatusRead
+ case "pinned":
+ return activities_model.NotificationStatusPinned
+ default:
+ return 0
+ }
+}
+
+func statusStringsToNotificationStatuses(statuses, defaultStatuses []string) []activities_model.NotificationStatus {
+ if len(statuses) == 0 {
+ statuses = defaultStatuses
+ }
+ results := make([]activities_model.NotificationStatus, 0, len(statuses))
+ for _, status := range statuses {
+ notificationStatus := statusStringToNotificationStatus(status)
+ if notificationStatus > 0 {
+ results = append(results, notificationStatus)
+ }
+ }
+ return results
+}
+
+// ListRepoNotifications list users's notification threads on a specific repo
+func ListRepoNotifications(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/notifications notification notifyGetRepoList
+ // ---
+ // summary: List users's notification threads on a specific repo
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: all
+ // in: query
+ // description: If true, show notifications marked as read. Default value is false
+ // type: boolean
+ // - name: status-types
+ // in: query
+ // description: "Show notifications with the provided status types. Options are: unread, read and/or pinned. Defaults to unread & pinned"
+ // type: array
+ // collectionFormat: multi
+ // items:
+ // type: string
+ // - name: subject-type
+ // in: query
+ // description: "filter notifications by subject type"
+ // type: array
+ // collectionFormat: multi
+ // items:
+ // type: string
+ // enum: [issue,pull,commit,repository]
+ // - name: since
+ // in: query
+ // description: Only show notifications updated after the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: before
+ // in: query
+ // description: Only show notifications updated before the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/NotificationThreadList"
+ opts := getFindNotificationOptions(ctx)
+ if ctx.Written() {
+ return
+ }
+ opts.RepoID = ctx.Repo.Repository.ID
+
+ totalCount, err := db.Count[activities_model.Notification](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ nl, err := db.Find[activities_model.Notification](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ err = activities_model.NotificationList(nl).LoadAttributes(ctx)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(totalCount)
+
+ ctx.JSON(http.StatusOK, convert.ToNotifications(ctx, nl))
+}
+
+// ReadRepoNotifications mark notification threads as read on a specific repo
+func ReadRepoNotifications(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/notifications notification notifyReadRepoList
+ // ---
+ // summary: Mark notification threads as read, pinned or unread on a specific repo
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: all
+ // in: query
+ // description: If true, mark all notifications on this repo. Default value is false
+ // type: string
+ // required: false
+ // - name: status-types
+ // in: query
+ // description: "Mark notifications with the provided status types. Options are: unread, read and/or pinned. Defaults to unread."
+ // type: array
+ // collectionFormat: multi
+ // items:
+ // type: string
+ // required: false
+ // - name: to-status
+ // in: query
+ // description: Status to mark notifications as. Defaults to read.
+ // type: string
+ // required: false
+ // - name: last_read_at
+ // in: query
+ // description: Describes the last point that notifications were checked. Anything updated since this time will not be updated.
+ // type: string
+ // format: date-time
+ // required: false
+ // responses:
+ // "205":
+ // "$ref": "#/responses/NotificationThreadList"
+
+ lastRead := int64(0)
+ qLastRead := ctx.FormTrim("last_read_at")
+ if len(qLastRead) > 0 {
+ tmpLastRead, err := time.Parse(time.RFC3339, qLastRead)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "Parse", err)
+ return
+ }
+ if !tmpLastRead.IsZero() {
+ lastRead = tmpLastRead.Unix()
+ }
+ }
+
+ opts := &activities_model.FindNotificationOptions{
+ UserID: ctx.Doer.ID,
+ RepoID: ctx.Repo.Repository.ID,
+ UpdatedBeforeUnix: lastRead,
+ }
+
+ if !ctx.FormBool("all") {
+ statuses := ctx.FormStrings("status-types")
+ opts.Status = statusStringsToNotificationStatuses(statuses, []string{"unread"})
+ }
+ nl, err := db.Find[activities_model.Notification](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ targetStatus := statusStringToNotificationStatus(ctx.FormString("to-status"))
+ if targetStatus == 0 {
+ targetStatus = activities_model.NotificationStatusRead
+ }
+
+ changed := make([]*structs.NotificationThread, 0, len(nl))
+
+ for _, n := range nl {
+ notif, err := activities_model.SetNotificationStatus(ctx, n.ID, ctx.Doer, targetStatus)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ _ = notif.LoadAttributes(ctx)
+ changed = append(changed, convert.ToNotificationThread(ctx, notif))
+ }
+ ctx.JSON(http.StatusResetContent, changed)
+}
diff --git a/routers/api/v1/notify/threads.go b/routers/api/v1/notify/threads.go
new file mode 100644
index 0000000..8e12d35
--- /dev/null
+++ b/routers/api/v1/notify/threads.go
@@ -0,0 +1,118 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package notify
+
+import (
+ "fmt"
+ "net/http"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// GetThread get notification by ID
+func GetThread(ctx *context.APIContext) {
+ // swagger:operation GET /notifications/threads/{id} notification notifyGetThread
+ // ---
+ // summary: Get notification thread by ID
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of notification thread
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/NotificationThread"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ n := getThread(ctx)
+ if n == nil {
+ return
+ }
+ if err := n.LoadAttributes(ctx); err != nil && !issues_model.IsErrCommentNotExist(err) {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToNotificationThread(ctx, n))
+}
+
+// ReadThread mark notification as read by ID
+func ReadThread(ctx *context.APIContext) {
+ // swagger:operation PATCH /notifications/threads/{id} notification notifyReadThread
+ // ---
+ // summary: Mark notification thread as read by ID
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of notification thread
+ // type: string
+ // required: true
+ // - name: to-status
+ // in: query
+ // description: Status to mark notifications as
+ // type: string
+ // default: read
+ // required: false
+ // responses:
+ // "205":
+ // "$ref": "#/responses/NotificationThread"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ n := getThread(ctx)
+ if n == nil {
+ return
+ }
+
+ targetStatus := statusStringToNotificationStatus(ctx.FormString("to-status"))
+ if targetStatus == 0 {
+ targetStatus = activities_model.NotificationStatusRead
+ }
+
+ notif, err := activities_model.SetNotificationStatus(ctx, n.ID, ctx.Doer, targetStatus)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if err = notif.LoadAttributes(ctx); err != nil && !issues_model.IsErrCommentNotExist(err) {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusResetContent, convert.ToNotificationThread(ctx, notif))
+}
+
+func getThread(ctx *context.APIContext) *activities_model.Notification {
+ n, err := activities_model.GetNotificationByID(ctx, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if db.IsErrNotExist(err) {
+ ctx.Error(http.StatusNotFound, "GetNotificationByID", err)
+ } else {
+ ctx.InternalServerError(err)
+ }
+ return nil
+ }
+ if n.UserID != ctx.Doer.ID && !ctx.Doer.IsAdmin {
+ ctx.Error(http.StatusForbidden, "GetNotificationByID", fmt.Errorf("only user itself and admin are allowed to read/change this thread %d", n.ID))
+ return nil
+ }
+ return n
+}
diff --git a/routers/api/v1/notify/user.go b/routers/api/v1/notify/user.go
new file mode 100644
index 0000000..879f484
--- /dev/null
+++ b/routers/api/v1/notify/user.go
@@ -0,0 +1,175 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package notify
+
+import (
+ "net/http"
+ "time"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListNotifications list users's notification threads
+func ListNotifications(ctx *context.APIContext) {
+ // swagger:operation GET /notifications notification notifyGetList
+ // ---
+ // summary: List users's notification threads
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: all
+ // in: query
+ // description: If true, show notifications marked as read. Default value is false
+ // type: boolean
+ // - name: status-types
+ // in: query
+ // description: "Show notifications with the provided status types. Options are: unread, read and/or pinned. Defaults to unread & pinned."
+ // type: array
+ // collectionFormat: multi
+ // items:
+ // type: string
+ // - name: subject-type
+ // in: query
+ // description: "filter notifications by subject type"
+ // type: array
+ // collectionFormat: multi
+ // items:
+ // type: string
+ // enum: [issue,pull,commit,repository]
+ // - name: since
+ // in: query
+ // description: Only show notifications updated after the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: before
+ // in: query
+ // description: Only show notifications updated before the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/NotificationThreadList"
+ opts := getFindNotificationOptions(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ totalCount, err := db.Count[activities_model.Notification](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ nl, err := db.Find[activities_model.Notification](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ err = activities_model.NotificationList(nl).LoadAttributes(ctx)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(totalCount)
+ ctx.JSON(http.StatusOK, convert.ToNotifications(ctx, nl))
+}
+
+// ReadNotifications mark notification threads as read, unread, or pinned
+func ReadNotifications(ctx *context.APIContext) {
+ // swagger:operation PUT /notifications notification notifyReadList
+ // ---
+ // summary: Mark notification threads as read, pinned or unread
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: last_read_at
+ // in: query
+ // description: Describes the last point that notifications were checked. Anything updated since this time will not be updated.
+ // type: string
+ // format: date-time
+ // required: false
+ // - name: all
+ // in: query
+ // description: If true, mark all notifications on this repo. Default value is false
+ // type: string
+ // required: false
+ // - name: status-types
+ // in: query
+ // description: "Mark notifications with the provided status types. Options are: unread, read and/or pinned. Defaults to unread."
+ // type: array
+ // collectionFormat: multi
+ // items:
+ // type: string
+ // required: false
+ // - name: to-status
+ // in: query
+ // description: Status to mark notifications as, Defaults to read.
+ // type: string
+ // required: false
+ // responses:
+ // "205":
+ // "$ref": "#/responses/NotificationThreadList"
+
+ lastRead := int64(0)
+ qLastRead := ctx.FormTrim("last_read_at")
+ if len(qLastRead) > 0 {
+ tmpLastRead, err := time.Parse(time.RFC3339, qLastRead)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "Parse", err)
+ return
+ }
+ if !tmpLastRead.IsZero() {
+ lastRead = tmpLastRead.Unix()
+ }
+ }
+ opts := &activities_model.FindNotificationOptions{
+ UserID: ctx.Doer.ID,
+ UpdatedBeforeUnix: lastRead,
+ }
+ if !ctx.FormBool("all") {
+ statuses := ctx.FormStrings("status-types")
+ opts.Status = statusStringsToNotificationStatuses(statuses, []string{"unread"})
+ }
+ nl, err := db.Find[activities_model.Notification](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ targetStatus := statusStringToNotificationStatus(ctx.FormString("to-status"))
+ if targetStatus == 0 {
+ targetStatus = activities_model.NotificationStatusRead
+ }
+
+ changed := make([]*structs.NotificationThread, 0, len(nl))
+
+ for _, n := range nl {
+ notif, err := activities_model.SetNotificationStatus(ctx, n.ID, ctx.Doer, targetStatus)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ _ = notif.LoadAttributes(ctx)
+ changed = append(changed, convert.ToNotificationThread(ctx, notif))
+ }
+
+ ctx.JSON(http.StatusResetContent, changed)
+}
diff --git a/routers/api/v1/org/action.go b/routers/api/v1/org/action.go
new file mode 100644
index 0000000..03a1fa8
--- /dev/null
+++ b/routers/api/v1/org/action.go
@@ -0,0 +1,473 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "errors"
+ "net/http"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ secret_model "code.gitea.io/gitea/models/secret"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/shared"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ actions_service "code.gitea.io/gitea/services/actions"
+ "code.gitea.io/gitea/services/context"
+ secret_service "code.gitea.io/gitea/services/secrets"
+)
+
+// ListActionsSecrets list an organization's actions secrets
+func (Action) ListActionsSecrets(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/actions/secrets organization orgListActionsSecrets
+ // ---
+ // summary: List an organization's actions secrets
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/SecretList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opts := &secret_model.FindSecretsOptions{
+ OwnerID: ctx.Org.Organization.ID,
+ ListOptions: utils.GetListOptions(ctx),
+ }
+
+ secrets, count, err := db.FindAndCount[secret_model.Secret](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiSecrets := make([]*api.Secret, len(secrets))
+ for k, v := range secrets {
+ apiSecrets[k] = &api.Secret{
+ Name: v.Name,
+ Created: v.CreatedUnix.AsTime(),
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiSecrets)
+}
+
+// create or update one secret of the organization
+func (Action) CreateOrUpdateSecret(ctx *context.APIContext) {
+ // swagger:operation PUT /orgs/{org}/actions/secrets/{secretname} organization updateOrgSecret
+ // ---
+ // summary: Create or Update a secret value in an organization
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of organization
+ // type: string
+ // required: true
+ // - name: secretname
+ // in: path
+ // description: name of the secret
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateOrUpdateSecretOption"
+ // responses:
+ // "201":
+ // description: response when creating a secret
+ // "204":
+ // description: response when updating a secret
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.CreateOrUpdateSecretOption)
+
+ _, created, err := secret_service.CreateOrUpdateSecret(ctx, ctx.Org.Organization.ID, 0, ctx.Params("secretname"), opt.Data)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "CreateOrUpdateSecret", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "CreateOrUpdateSecret", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateOrUpdateSecret", err)
+ }
+ return
+ }
+
+ if created {
+ ctx.Status(http.StatusCreated)
+ } else {
+ ctx.Status(http.StatusNoContent)
+ }
+}
+
+// DeleteSecret delete one secret of the organization
+func (Action) DeleteSecret(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org}/actions/secrets/{secretname} organization deleteOrgSecret
+ // ---
+ // summary: Delete a secret in an organization
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of organization
+ // type: string
+ // required: true
+ // - name: secretname
+ // in: path
+ // description: name of the secret
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // description: delete one secret of the organization
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := secret_service.DeleteSecretByName(ctx, ctx.Org.Organization.ID, 0, ctx.Params("secretname"))
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "DeleteSecret", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "DeleteSecret", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteSecret", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-a-registration-token-for-an-organization
+// GetRegistrationToken returns the token to register org runners
+func (Action) GetRegistrationToken(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/actions/runners/registration-token organization orgGetRunnerRegistrationToken
+ // ---
+ // summary: Get an organization's actions runner registration token
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RegistrationToken"
+
+ shared.GetRegistrationToken(ctx, ctx.Org.Organization.ID, 0)
+}
+
+// ListVariables list org-level variables
+func (Action) ListVariables(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/actions/variables organization getOrgVariablesList
+ // ---
+ // summary: Get an org-level variables list
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/VariableList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ vars, count, err := db.FindAndCount[actions_model.ActionVariable](ctx, &actions_model.FindVariablesOpts{
+ OwnerID: ctx.Org.Organization.ID,
+ ListOptions: utils.GetListOptions(ctx),
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindVariables", err)
+ return
+ }
+
+ variables := make([]*api.ActionVariable, len(vars))
+ for i, v := range vars {
+ variables[i] = &api.ActionVariable{
+ OwnerID: v.OwnerID,
+ RepoID: v.RepoID,
+ Name: v.Name,
+ Data: v.Data,
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, variables)
+}
+
+// GetVariable get an org-level variable
+func (Action) GetVariable(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/actions/variables/{variablename} organization getOrgVariable
+ // ---
+ // summary: Get an org-level variable
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActionVariable"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ OwnerID: ctx.Org.Organization.ID,
+ Name: ctx.Params("variablename"),
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "GetVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ }
+ return
+ }
+
+ variable := &api.ActionVariable{
+ OwnerID: v.OwnerID,
+ RepoID: v.RepoID,
+ Name: v.Name,
+ Data: v.Data,
+ }
+
+ ctx.JSON(http.StatusOK, variable)
+}
+
+// DeleteVariable delete an org-level variable
+func (Action) DeleteVariable(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org}/actions/variables/{variablename} organization deleteOrgVariable
+ // ---
+ // summary: Delete an org-level variable
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActionVariable"
+ // "201":
+ // description: response when deleting a variable
+ // "204":
+ // description: response when deleting a variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := actions_service.DeleteVariableByName(ctx, ctx.Org.Organization.ID, 0, ctx.Params("variablename")); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "DeleteVariableByName", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "DeleteVariableByName", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteVariableByName", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreateVariable create an org-level variable
+func (Action) CreateVariable(ctx *context.APIContext) {
+ // swagger:operation POST /orgs/{org}/actions/variables/{variablename} organization createOrgVariable
+ // ---
+ // summary: Create an org-level variable
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateVariableOption"
+ // responses:
+ // "201":
+ // description: response when creating an org-level variable
+ // "204":
+ // description: response when creating an org-level variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.CreateVariableOption)
+
+ ownerID := ctx.Org.Organization.ID
+ variableName := ctx.Params("variablename")
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ OwnerID: ownerID,
+ Name: variableName,
+ })
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ return
+ }
+ if v != nil && v.ID > 0 {
+ ctx.Error(http.StatusConflict, "VariableNameAlreadyExists", util.NewAlreadyExistErrorf("variable name %s already exists", variableName))
+ return
+ }
+
+ if _, err := actions_service.CreateVariable(ctx, ownerID, 0, variableName, opt.Value); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "CreateVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateVariable", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// UpdateVariable update an org-level variable
+func (Action) UpdateVariable(ctx *context.APIContext) {
+ // swagger:operation PUT /orgs/{org}/actions/variables/{variablename} organization updateOrgVariable
+ // ---
+ // summary: Update an org-level variable
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/UpdateVariableOption"
+ // responses:
+ // "201":
+ // description: response when updating an org-level variable
+ // "204":
+ // description: response when updating an org-level variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.UpdateVariableOption)
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ OwnerID: ctx.Org.Organization.ID,
+ Name: ctx.Params("variablename"),
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "GetVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ }
+ return
+ }
+
+ if opt.Name == "" {
+ opt.Name = ctx.Params("variablename")
+ }
+ if _, err := actions_service.UpdateVariable(ctx, v.ID, opt.Name, opt.Value); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "UpdateVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UpdateVariable", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+var _ actions_service.API = new(Action)
+
+// Action implements actions_service.API
+type Action struct{}
+
+// NewAction creates a new Action service
+func NewAction() actions_service.API {
+ return Action{}
+}
diff --git a/routers/api/v1/org/avatar.go b/routers/api/v1/org/avatar.go
new file mode 100644
index 0000000..f11eb6c
--- /dev/null
+++ b/routers/api/v1/org/avatar.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "encoding/base64"
+ "net/http"
+
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ user_service "code.gitea.io/gitea/services/user"
+)
+
+// UpdateAvatarupdates the Avatar of an Organisation
+func UpdateAvatar(ctx *context.APIContext) {
+ // swagger:operation POST /orgs/{org}/avatar organization orgUpdateAvatar
+ // ---
+ // summary: Update Avatar
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/UpdateUserAvatarOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.UpdateUserAvatarOption)
+
+ content, err := base64.StdEncoding.DecodeString(form.Image)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "DecodeImage", err)
+ return
+ }
+
+ err = user_service.UploadAvatar(ctx, ctx.Org.Organization.AsUser(), content)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UploadAvatar", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteAvatar deletes the Avatar of an Organisation
+func DeleteAvatar(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org}/avatar organization orgDeleteAvatar
+ // ---
+ // summary: Delete Avatar
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ err := user_service.DeleteAvatar(ctx, ctx.Org.Organization.AsUser())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteAvatar", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/org/hook.go b/routers/api/v1/org/hook.go
new file mode 100644
index 0000000..c1dc051
--- /dev/null
+++ b/routers/api/v1/org/hook.go
@@ -0,0 +1,189 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "net/http"
+
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ webhook_service "code.gitea.io/gitea/services/webhook"
+)
+
+// ListHooks list an organziation's webhooks
+func ListHooks(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/hooks organization orgListHooks
+ // ---
+ // summary: List an organization's webhooks
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/HookList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ utils.ListOwnerHooks(
+ ctx,
+ ctx.ContextUser,
+ )
+}
+
+// GetHook get an organization's hook by id
+func GetHook(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/hooks/{id} organization orgGetHook
+ // ---
+ // summary: Get a hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ hook, err := utils.GetOwnerHook(ctx, ctx.ContextUser.ID, ctx.ParamsInt64("id"))
+ if err != nil {
+ return
+ }
+
+ apiHook, err := webhook_service.ToHook(ctx.ContextUser.HomeLink(), hook)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiHook)
+}
+
+// CreateHook create a hook for an organization
+func CreateHook(ctx *context.APIContext) {
+ // swagger:operation POST /orgs/{org}/hooks organization orgCreateHook
+ // ---
+ // summary: Create a hook
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreateHookOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Hook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ utils.AddOwnerHook(
+ ctx,
+ ctx.ContextUser,
+ web.GetForm(ctx).(*api.CreateHookOption),
+ )
+}
+
+// EditHook modify a hook of an organization
+func EditHook(ctx *context.APIContext) {
+ // swagger:operation PATCH /orgs/{org}/hooks/{id} organization orgEditHook
+ // ---
+ // summary: Update a hook
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to update
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditHookOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ utils.EditOwnerHook(
+ ctx,
+ ctx.ContextUser,
+ web.GetForm(ctx).(*api.EditHookOption),
+ ctx.ParamsInt64("id"),
+ )
+}
+
+// DeleteHook delete a hook of an organization
+func DeleteHook(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org}/hooks/{id} organization orgDeleteHook
+ // ---
+ // summary: Delete a hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ utils.DeleteOwnerHook(
+ ctx,
+ ctx.ContextUser,
+ ctx.ParamsInt64("id"),
+ )
+}
diff --git a/routers/api/v1/org/label.go b/routers/api/v1/org/label.go
new file mode 100644
index 0000000..b5ec54c
--- /dev/null
+++ b/routers/api/v1/org/label.go
@@ -0,0 +1,258 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/modules/label"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListLabels list all the labels of an organization
+func ListLabels(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/labels organization orgListLabels
+ // ---
+ // summary: List an organization's labels
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LabelList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ labels, err := issues_model.GetLabelsByOrgID(ctx, ctx.Org.Organization.ID, ctx.FormString("sort"), utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsByOrgID", err)
+ return
+ }
+
+ count, err := issues_model.CountLabelsByOrgID(ctx, ctx.Org.Organization.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, convert.ToLabelList(labels, nil, ctx.Org.Organization.AsUser()))
+}
+
+// CreateLabel create a label for a repository
+func CreateLabel(ctx *context.APIContext) {
+ // swagger:operation POST /orgs/{org}/labels organization orgCreateLabel
+ // ---
+ // summary: Create a label for an organization
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateLabelOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Label"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ form := web.GetForm(ctx).(*api.CreateLabelOption)
+ form.Color = strings.Trim(form.Color, " ")
+ color, err := label.NormalizeColor(form.Color)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "Color", err)
+ return
+ }
+ form.Color = color
+
+ label := &issues_model.Label{
+ Name: form.Name,
+ Exclusive: form.Exclusive,
+ Color: form.Color,
+ OrgID: ctx.Org.Organization.ID,
+ Description: form.Description,
+ }
+ if err := issues_model.NewLabel(ctx, label); err != nil {
+ ctx.Error(http.StatusInternalServerError, "NewLabel", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToLabel(label, nil, ctx.Org.Organization.AsUser()))
+}
+
+// GetLabel get label by organization and label id
+func GetLabel(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/labels/{id} organization orgGetLabel
+ // ---
+ // summary: Get a single label
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the label to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Label"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ var (
+ label *issues_model.Label
+ err error
+ )
+ strID := ctx.Params(":id")
+ if intID, err2 := strconv.ParseInt(strID, 10, 64); err2 != nil {
+ label, err = issues_model.GetLabelInOrgByName(ctx, ctx.Org.Organization.ID, strID)
+ } else {
+ label, err = issues_model.GetLabelInOrgByID(ctx, ctx.Org.Organization.ID, intID)
+ }
+ if err != nil {
+ if issues_model.IsErrOrgLabelNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetLabelByOrgID", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabel(label, nil, ctx.Org.Organization.AsUser()))
+}
+
+// EditLabel modify a label for an Organization
+func EditLabel(ctx *context.APIContext) {
+ // swagger:operation PATCH /orgs/{org}/labels/{id} organization orgEditLabel
+ // ---
+ // summary: Update a label
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the label to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditLabelOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Label"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ form := web.GetForm(ctx).(*api.EditLabelOption)
+ l, err := issues_model.GetLabelInOrgByID(ctx, ctx.Org.Organization.ID, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if issues_model.IsErrOrgLabelNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetLabelByRepoID", err)
+ }
+ return
+ }
+
+ if form.Name != nil {
+ l.Name = *form.Name
+ }
+ if form.Exclusive != nil {
+ l.Exclusive = *form.Exclusive
+ }
+ if form.Color != nil {
+ color, err := label.NormalizeColor(*form.Color)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "Color", err)
+ return
+ }
+ l.Color = color
+ }
+ if form.Description != nil {
+ l.Description = *form.Description
+ }
+ l.SetArchived(form.IsArchived != nil && *form.IsArchived)
+ if err := issues_model.UpdateLabel(ctx, l); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateLabel", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabel(l, nil, ctx.Org.Organization.AsUser()))
+}
+
+// DeleteLabel delete a label for an organization
+func DeleteLabel(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org}/labels/{id} organization orgDeleteLabel
+ // ---
+ // summary: Delete a label
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the label to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := issues_model.DeleteLabel(ctx, ctx.Org.Organization.ID, ctx.ParamsInt64(":id")); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteLabel", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/org/member.go b/routers/api/v1/org/member.go
new file mode 100644
index 0000000..fb66d4c
--- /dev/null
+++ b/routers/api/v1/org/member.go
@@ -0,0 +1,325 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "net/http"
+ "net/url"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/user"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// listMembers list an organization's members
+func listMembers(ctx *context.APIContext, publicOnly bool) {
+ opts := &organization.FindOrgMembersOpts{
+ OrgID: ctx.Org.Organization.ID,
+ PublicOnly: publicOnly,
+ ListOptions: utils.GetListOptions(ctx),
+ }
+
+ count, err := organization.CountOrgMembers(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ members, _, err := organization.FindOrgMembers(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiMembers := make([]*api.User, len(members))
+ for i, member := range members {
+ apiMembers[i] = convert.ToUser(ctx, member, ctx.Doer)
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiMembers)
+}
+
+// ListMembers list an organization's members
+func ListMembers(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/members organization orgListMembers
+ // ---
+ // summary: List an organization's members
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ publicOnly := true
+ if ctx.Doer != nil {
+ isMember, err := ctx.Org.Organization.IsOrgMember(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrgMember", err)
+ return
+ }
+ publicOnly = !isMember && !ctx.Doer.IsAdmin
+ }
+ listMembers(ctx, publicOnly)
+}
+
+// ListPublicMembers list an organization's public members
+func ListPublicMembers(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/public_members organization orgListPublicMembers
+ // ---
+ // summary: List an organization's public members
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listMembers(ctx, true)
+}
+
+// IsMember check if a user is a member of an organization
+func IsMember(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/members/{username} organization orgIsMember
+ // ---
+ // summary: Check if a user is a member of an organization
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // description: user is a member
+ // "303":
+ // description: redirection to /orgs/{org}/public_members/{username}
+ // "404":
+ // description: user is not a member
+
+ userToCheck := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ if ctx.Doer != nil {
+ userIsMember, err := ctx.Org.Organization.IsOrgMember(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrgMember", err)
+ return
+ } else if userIsMember || ctx.Doer.IsAdmin {
+ userToCheckIsMember, err := ctx.Org.Organization.IsOrgMember(ctx, userToCheck.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrgMember", err)
+ } else if userToCheckIsMember {
+ ctx.Status(http.StatusNoContent)
+ } else {
+ ctx.NotFound()
+ }
+ return
+ } else if ctx.Doer.ID == userToCheck.ID {
+ ctx.NotFound()
+ return
+ }
+ }
+
+ redirectURL := setting.AppSubURL + "/api/v1/orgs/" + url.PathEscape(ctx.Org.Organization.Name) + "/public_members/" + url.PathEscape(userToCheck.Name)
+ ctx.Redirect(redirectURL)
+}
+
+// IsPublicMember check if a user is a public member of an organization
+func IsPublicMember(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/public_members/{username} organization orgIsPublicMember
+ // ---
+ // summary: Check if a user is a public member of an organization
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // description: user is a public member
+ // "404":
+ // description: user is not a public member
+
+ userToCheck := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ is, err := organization.IsPublicMembership(ctx, ctx.Org.Organization.ID, userToCheck.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsPublicMembership", err)
+ return
+ }
+ if is {
+ ctx.Status(http.StatusNoContent)
+ } else {
+ ctx.NotFound()
+ }
+}
+
+// PublicizeMember make a member's membership public
+func PublicizeMember(ctx *context.APIContext) {
+ // swagger:operation PUT /orgs/{org}/public_members/{username} organization orgPublicizeMember
+ // ---
+ // summary: Publicize a user's membership
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // description: membership publicized
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ userToPublicize := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ if userToPublicize.ID != ctx.Doer.ID {
+ ctx.Error(http.StatusForbidden, "", "Cannot publicize another member")
+ return
+ }
+ err := organization.ChangeOrgUserStatus(ctx, ctx.Org.Organization.ID, userToPublicize.ID, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeOrgUserStatus", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// ConcealMember make a member's membership not public
+func ConcealMember(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org}/public_members/{username} organization orgConcealMember
+ // ---
+ // summary: Conceal a user's membership
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ userToConceal := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ if userToConceal.ID != ctx.Doer.ID {
+ ctx.Error(http.StatusForbidden, "", "Cannot conceal another member")
+ return
+ }
+ err := organization.ChangeOrgUserStatus(ctx, ctx.Org.Organization.ID, userToConceal.ID, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeOrgUserStatus", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteMember remove a member from an organization
+func DeleteMember(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org}/members/{username} organization orgDeleteMember
+ // ---
+ // summary: Remove a member from an organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // description: member removed
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ member := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ if err := models.RemoveOrgUser(ctx, ctx.Org.Organization.ID, member.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "RemoveOrgUser", err)
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/org/org.go b/routers/api/v1/org/org.go
new file mode 100644
index 0000000..3623b85
--- /dev/null
+++ b/routers/api/v1/org/org.go
@@ -0,0 +1,559 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "fmt"
+ "net/http"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/user"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ "code.gitea.io/gitea/services/org"
+ user_service "code.gitea.io/gitea/services/user"
+)
+
+func listUserOrgs(ctx *context.APIContext, u *user_model.User) {
+ listOptions := utils.GetListOptions(ctx)
+ showPrivate := ctx.IsSigned && (ctx.Doer.IsAdmin || ctx.Doer.ID == u.ID)
+
+ opts := organization.FindOrgOptions{
+ ListOptions: listOptions,
+ UserID: u.ID,
+ IncludePrivate: showPrivate,
+ }
+ orgs, maxResults, err := db.FindAndCount[organization.Organization](ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "db.FindAndCount[organization.Organization]", err)
+ return
+ }
+
+ apiOrgs := make([]*api.Organization, len(orgs))
+ for i := range orgs {
+ apiOrgs[i] = convert.ToOrganization(ctx, orgs[i])
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+ ctx.JSON(http.StatusOK, &apiOrgs)
+}
+
+// ListMyOrgs list all my orgs
+func ListMyOrgs(ctx *context.APIContext) {
+ // swagger:operation GET /user/orgs organization orgListCurrentUserOrgs
+ // ---
+ // summary: List the current user's organizations
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OrganizationList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listUserOrgs(ctx, ctx.Doer)
+}
+
+// ListUserOrgs list user's orgs
+func ListUserOrgs(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/orgs organization orgListUserOrgs
+ // ---
+ // summary: List a user's organizations
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OrganizationList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listUserOrgs(ctx, ctx.ContextUser)
+}
+
+// GetUserOrgsPermissions get user permissions in organization
+func GetUserOrgsPermissions(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/orgs/{org}/permissions organization orgGetUserPermissions
+ // ---
+ // summary: Get user permissions in organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OrganizationPermissions"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ var o *user_model.User
+ if o = user.GetUserByParamsName(ctx, ":org"); o == nil {
+ return
+ }
+
+ op := api.OrganizationPermissions{}
+
+ if !organization.HasOrgOrUserVisible(ctx, o, ctx.ContextUser) {
+ ctx.NotFound("HasOrgOrUserVisible", nil)
+ return
+ }
+
+ org := organization.OrgFromUser(o)
+ authorizeLevel, err := org.GetOrgUserMaxAuthorizeLevel(ctx, ctx.ContextUser.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetOrgUserAuthorizeLevel", err)
+ return
+ }
+
+ if authorizeLevel > perm.AccessModeNone {
+ op.CanRead = true
+ }
+ if authorizeLevel > perm.AccessModeRead {
+ op.CanWrite = true
+ }
+ if authorizeLevel > perm.AccessModeWrite {
+ op.IsAdmin = true
+ }
+ if authorizeLevel > perm.AccessModeAdmin {
+ op.IsOwner = true
+ }
+
+ op.CanCreateRepository, err = org.CanCreateOrgRepo(ctx, ctx.ContextUser.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CanCreateOrgRepo", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, op)
+}
+
+// GetAll return list of all public organizations
+func GetAll(ctx *context.APIContext) {
+ // swagger:operation Get /orgs organization orgGetAll
+ // ---
+ // summary: Get list of organizations
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OrganizationList"
+
+ vMode := []api.VisibleType{api.VisibleTypePublic}
+ if ctx.IsSigned && !ctx.PublicOnly {
+ vMode = append(vMode, api.VisibleTypeLimited)
+ if ctx.Doer.IsAdmin {
+ vMode = append(vMode, api.VisibleTypePrivate)
+ }
+ }
+
+ listOptions := utils.GetListOptions(ctx)
+
+ publicOrgs, maxResults, err := user_model.SearchUsers(ctx, &user_model.SearchUserOptions{
+ Actor: ctx.Doer,
+ ListOptions: listOptions,
+ Type: user_model.UserTypeOrganization,
+ OrderBy: db.SearchOrderByAlphabetically,
+ Visible: vMode,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchOrganizations", err)
+ return
+ }
+ orgs := make([]*api.Organization, len(publicOrgs))
+ for i := range publicOrgs {
+ orgs[i] = convert.ToOrganization(ctx, organization.OrgFromUser(publicOrgs[i]))
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+ ctx.JSON(http.StatusOK, &orgs)
+}
+
+// Create api for create organization
+func Create(ctx *context.APIContext) {
+ // swagger:operation POST /orgs organization orgCreate
+ // ---
+ // summary: Create an organization
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: organization
+ // in: body
+ // required: true
+ // schema: { "$ref": "#/definitions/CreateOrgOption" }
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Organization"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ form := web.GetForm(ctx).(*api.CreateOrgOption)
+ if !ctx.Doer.CanCreateOrganization() {
+ ctx.Error(http.StatusForbidden, "Create organization not allowed", nil)
+ return
+ }
+
+ visibility := api.VisibleTypePublic
+ if form.Visibility != "" {
+ visibility = api.VisibilityModes[form.Visibility]
+ }
+
+ org := &organization.Organization{
+ Name: form.UserName,
+ FullName: form.FullName,
+ Email: form.Email,
+ Description: form.Description,
+ Website: form.Website,
+ Location: form.Location,
+ IsActive: true,
+ Type: user_model.UserTypeOrganization,
+ Visibility: visibility,
+ RepoAdminChangeTeamAccess: form.RepoAdminChangeTeamAccess,
+ }
+ if err := organization.CreateOrganization(ctx, org, ctx.Doer); err != nil {
+ if user_model.IsErrUserAlreadyExist(err) ||
+ db.IsErrNameReserved(err) ||
+ db.IsErrNameCharsNotAllowed(err) ||
+ db.IsErrNamePatternNotAllowed(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateOrganization", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToOrganization(ctx, org))
+}
+
+// Get get an organization
+func Get(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org} organization orgGet
+ // ---
+ // summary: Get an organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Organization"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !organization.HasOrgOrUserVisible(ctx, ctx.Org.Organization.AsUser(), ctx.Doer) {
+ ctx.NotFound("HasOrgOrUserVisible", nil)
+ return
+ }
+
+ org := convert.ToOrganization(ctx, ctx.Org.Organization)
+
+ // Don't show Mail, when User is not logged in
+ if ctx.Doer == nil {
+ org.Email = ""
+ }
+
+ ctx.JSON(http.StatusOK, org)
+}
+
+// Edit change an organization's information
+func Edit(ctx *context.APIContext) {
+ // swagger:operation PATCH /orgs/{org} organization orgEdit
+ // ---
+ // summary: Edit an organization
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization to edit
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/EditOrgOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Organization"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.EditOrgOption)
+
+ if form.Email != "" {
+ if err := user_service.ReplacePrimaryEmailAddress(ctx, ctx.Org.Organization.AsUser(), form.Email); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ReplacePrimaryEmailAddress", err)
+ return
+ }
+ }
+
+ opts := &user_service.UpdateOptions{
+ FullName: optional.Some(form.FullName),
+ Description: optional.Some(form.Description),
+ Website: optional.Some(form.Website),
+ Location: optional.Some(form.Location),
+ Visibility: optional.FromNonDefault(api.VisibilityModes[form.Visibility]),
+ RepoAdminChangeTeamAccess: optional.FromPtr(form.RepoAdminChangeTeamAccess),
+ }
+ if err := user_service.UpdateUser(ctx, ctx.Org.Organization.AsUser(), opts); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateUser", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToOrganization(ctx, ctx.Org.Organization))
+}
+
+// Delete an organization
+func Delete(ctx *context.APIContext) {
+ // swagger:operation DELETE /orgs/{org} organization orgDelete
+ // ---
+ // summary: Delete an organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: organization that is to be deleted
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := org.DeleteOrganization(ctx, ctx.Org.Organization, false); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteOrganization", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+func ListOrgActivityFeeds(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/activities/feeds organization orgListActivityFeeds
+ // ---
+ // summary: List an organization's activity feeds
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the org
+ // type: string
+ // required: true
+ // - name: date
+ // in: query
+ // description: the date of the activities to be found
+ // type: string
+ // format: date
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActivityFeedsList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ includePrivate := false
+ if ctx.IsSigned {
+ if ctx.Doer.IsAdmin {
+ includePrivate = true
+ } else {
+ org := organization.OrgFromUser(ctx.ContextUser)
+ isMember, err := org.IsOrgMember(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrgMember", err)
+ return
+ }
+ includePrivate = isMember
+ }
+ }
+
+ listOptions := utils.GetListOptions(ctx)
+
+ opts := activities_model.GetFeedsOptions{
+ RequestedUser: ctx.ContextUser,
+ Actor: ctx.Doer,
+ IncludePrivate: includePrivate,
+ Date: ctx.FormString("date"),
+ ListOptions: listOptions,
+ }
+
+ feeds, count, err := activities_model.GetFeeds(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetFeeds", err)
+ return
+ }
+ ctx.SetTotalCountHeader(count)
+
+ ctx.JSON(http.StatusOK, convert.ToActivities(ctx, feeds, ctx.Doer))
+}
+
+// ListBlockedUsers list the organization's blocked users.
+func ListBlockedUsers(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/list_blocked organization orgListBlockedUsers
+ // ---
+ // summary: List the organization's blocked users
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the org
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/BlockedUserList"
+
+ utils.ListUserBlockedUsers(ctx, ctx.ContextUser)
+}
+
+// BlockUser blocks a user from the organization.
+func BlockUser(ctx *context.APIContext) {
+ // swagger:operation PUT /orgs/{org}/block/{username} organization orgBlockUser
+ // ---
+ // summary: Blocks a user from the organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the org
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ if ctx.ContextUser.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("%s is an organization not a user", ctx.ContextUser.Name))
+ return
+ }
+
+ utils.BlockUser(ctx, ctx.Org.Organization.AsUser(), ctx.ContextUser)
+}
+
+// UnblockUser unblocks a user from the organization.
+func UnblockUser(ctx *context.APIContext) {
+ // swagger:operation PUT /orgs/{org}/unblock/{username} organization orgUnblockUser
+ // ---
+ // summary: Unblock a user from the organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the org
+ // type: string
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ if ctx.ContextUser.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("%s is an organization not a user", ctx.ContextUser.Name))
+ return
+ }
+
+ utils.UnblockUser(ctx, ctx.Org.Organization.AsUser(), ctx.ContextUser)
+}
diff --git a/routers/api/v1/org/quota.go b/routers/api/v1/org/quota.go
new file mode 100644
index 0000000..57c41f5
--- /dev/null
+++ b/routers/api/v1/org/quota.go
@@ -0,0 +1,155 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "code.gitea.io/gitea/routers/api/v1/shared"
+ "code.gitea.io/gitea/services/context"
+)
+
+// GetQuota returns the quota information for a given organization
+func GetQuota(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/quota organization orgGetQuota
+ // ---
+ // summary: Get quota information for an organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaInfo"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ shared.GetQuota(ctx, ctx.Org.Organization.ID)
+}
+
+// CheckQuota returns whether the organization in context is over the subject quota
+func CheckQuota(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/quota/check organization orgCheckQuota
+ // ---
+ // summary: Check if the organization is over quota for a given subject
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/boolean"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ shared.CheckQuota(ctx, ctx.Org.Organization.ID)
+}
+
+// ListQuotaAttachments lists attachments affecting the organization's quota
+func ListQuotaAttachments(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/quota/attachments organization orgListQuotaAttachments
+ // ---
+ // summary: List the attachments affecting the organization's quota
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaUsedAttachmentList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ shared.ListQuotaAttachments(ctx, ctx.Org.Organization.ID)
+}
+
+// ListQuotaPackages lists packages affecting the organization's quota
+func ListQuotaPackages(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/quota/packages organization orgListQuotaPackages
+ // ---
+ // summary: List the packages affecting the organization's quota
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaUsedPackageList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ shared.ListQuotaPackages(ctx, ctx.Org.Organization.ID)
+}
+
+// ListQuotaArtifacts lists artifacts affecting the organization's quota
+func ListQuotaArtifacts(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/quota/artifacts organization orgListQuotaArtifacts
+ // ---
+ // summary: List the artifacts affecting the organization's quota
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaUsedArtifactList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ shared.ListQuotaArtifacts(ctx, ctx.Org.Organization.ID)
+}
diff --git a/routers/api/v1/org/team.go b/routers/api/v1/org/team.go
new file mode 100644
index 0000000..bf28d54
--- /dev/null
+++ b/routers/api/v1/org/team.go
@@ -0,0 +1,891 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package org
+
+import (
+ "errors"
+ "net/http"
+
+ "code.gitea.io/gitea/models"
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ unit_model "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/log"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/user"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ org_service "code.gitea.io/gitea/services/org"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// ListTeams list all the teams of an organization
+func ListTeams(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/teams organization orgListTeams
+ // ---
+ // summary: List an organization's teams
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TeamList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ teams, count, err := organization.SearchTeam(ctx, &organization.SearchTeamOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ OrgID: ctx.Org.Organization.ID,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadTeams", err)
+ return
+ }
+
+ apiTeams, err := convert.ToTeams(ctx, teams, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ConvertToTeams", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiTeams)
+}
+
+// ListUserTeams list all the teams a user belongs to
+func ListUserTeams(ctx *context.APIContext) {
+ // swagger:operation GET /user/teams user userListTeams
+ // ---
+ // summary: List all the teams a user belongs to
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TeamList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ teams, count, err := organization.SearchTeam(ctx, &organization.SearchTeamOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ UserID: ctx.Doer.ID,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserTeams", err)
+ return
+ }
+
+ apiTeams, err := convert.ToTeams(ctx, teams, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ConvertToTeams", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiTeams)
+}
+
+// GetTeam api for get a team
+func GetTeam(ctx *context.APIContext) {
+ // swagger:operation GET /teams/{id} organization orgGetTeam
+ // ---
+ // summary: Get a team
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Team"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ apiTeam, err := convert.ToTeam(ctx, ctx.Org.Team, true)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, apiTeam)
+}
+
+func attachTeamUnits(team *organization.Team, units []string) {
+ unitTypes, _ := unit_model.FindUnitTypes(units...)
+ team.Units = make([]*organization.TeamUnit, 0, len(units))
+ for _, tp := range unitTypes {
+ team.Units = append(team.Units, &organization.TeamUnit{
+ OrgID: team.OrgID,
+ Type: tp,
+ AccessMode: team.AccessMode,
+ })
+ }
+}
+
+func convertUnitsMap(unitsMap map[string]string) map[unit_model.Type]perm.AccessMode {
+ res := make(map[unit_model.Type]perm.AccessMode, len(unitsMap))
+ for unitKey, p := range unitsMap {
+ res[unit_model.TypeFromKey(unitKey)] = perm.ParseAccessMode(p)
+ }
+ return res
+}
+
+func attachTeamUnitsMap(team *organization.Team, unitsMap map[string]string) {
+ team.Units = make([]*organization.TeamUnit, 0, len(unitsMap))
+ for unitKey, p := range unitsMap {
+ team.Units = append(team.Units, &organization.TeamUnit{
+ OrgID: team.OrgID,
+ Type: unit_model.TypeFromKey(unitKey),
+ AccessMode: perm.ParseAccessMode(p),
+ })
+ }
+}
+
+func attachAdminTeamUnits(team *organization.Team) {
+ team.Units = make([]*organization.TeamUnit, 0, len(unit_model.AllRepoUnitTypes))
+ for _, ut := range unit_model.AllRepoUnitTypes {
+ up := perm.AccessModeAdmin
+ if ut == unit_model.TypeExternalTracker || ut == unit_model.TypeExternalWiki {
+ up = perm.AccessModeRead
+ }
+ team.Units = append(team.Units, &organization.TeamUnit{
+ OrgID: team.OrgID,
+ Type: ut,
+ AccessMode: up,
+ })
+ }
+}
+
+// CreateTeam api for create a team
+func CreateTeam(ctx *context.APIContext) {
+ // swagger:operation POST /orgs/{org}/teams organization orgCreateTeam
+ // ---
+ // summary: Create a team
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateTeamOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Team"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ form := web.GetForm(ctx).(*api.CreateTeamOption)
+ p := perm.ParseAccessMode(form.Permission)
+ if p < perm.AccessModeAdmin && len(form.UnitsMap) > 0 {
+ p = unit_model.MinUnitAccessMode(convertUnitsMap(form.UnitsMap))
+ }
+ team := &organization.Team{
+ OrgID: ctx.Org.Organization.ID,
+ Name: form.Name,
+ Description: form.Description,
+ IncludesAllRepositories: form.IncludesAllRepositories,
+ CanCreateOrgRepo: form.CanCreateOrgRepo,
+ AccessMode: p,
+ }
+
+ if team.AccessMode < perm.AccessModeAdmin {
+ if len(form.UnitsMap) > 0 {
+ attachTeamUnitsMap(team, form.UnitsMap)
+ } else if len(form.Units) > 0 {
+ attachTeamUnits(team, form.Units)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "getTeamUnits", errors.New("units permission should not be empty"))
+ return
+ }
+ } else {
+ attachAdminTeamUnits(team)
+ }
+
+ if err := models.NewTeam(ctx, team); err != nil {
+ if organization.IsErrTeamAlreadyExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "NewTeam", err)
+ }
+ return
+ }
+
+ apiTeam, err := convert.ToTeam(ctx, team, true)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusCreated, apiTeam)
+}
+
+// EditTeam api for edit a team
+func EditTeam(ctx *context.APIContext) {
+ // swagger:operation PATCH /teams/{id} organization orgEditTeam
+ // ---
+ // summary: Edit a team
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team to edit
+ // type: integer
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditTeamOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Team"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.EditTeamOption)
+ team := ctx.Org.Team
+ if err := team.LoadUnits(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ if form.CanCreateOrgRepo != nil {
+ team.CanCreateOrgRepo = team.IsOwnerTeam() || *form.CanCreateOrgRepo
+ }
+
+ if len(form.Name) > 0 {
+ team.Name = form.Name
+ }
+
+ if form.Description != nil {
+ team.Description = *form.Description
+ }
+
+ isAuthChanged := false
+ isIncludeAllChanged := false
+ if !team.IsOwnerTeam() && len(form.Permission) != 0 {
+ // Validate permission level.
+ p := perm.ParseAccessMode(form.Permission)
+ if p < perm.AccessModeAdmin && len(form.UnitsMap) > 0 {
+ p = unit_model.MinUnitAccessMode(convertUnitsMap(form.UnitsMap))
+ }
+
+ if team.AccessMode != p {
+ isAuthChanged = true
+ team.AccessMode = p
+ }
+
+ if form.IncludesAllRepositories != nil {
+ isIncludeAllChanged = true
+ team.IncludesAllRepositories = *form.IncludesAllRepositories
+ }
+ }
+
+ if team.AccessMode < perm.AccessModeAdmin {
+ if len(form.UnitsMap) > 0 {
+ attachTeamUnitsMap(team, form.UnitsMap)
+ } else if len(form.Units) > 0 {
+ attachTeamUnits(team, form.Units)
+ }
+ } else {
+ attachAdminTeamUnits(team)
+ }
+
+ if err := models.UpdateTeam(ctx, team, isAuthChanged, isIncludeAllChanged); err != nil {
+ ctx.Error(http.StatusInternalServerError, "EditTeam", err)
+ return
+ }
+
+ apiTeam, err := convert.ToTeam(ctx, team)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiTeam)
+}
+
+// DeleteTeam api for delete a team
+func DeleteTeam(ctx *context.APIContext) {
+ // swagger:operation DELETE /teams/{id} organization orgDeleteTeam
+ // ---
+ // summary: Delete a team
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // description: team deleted
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := models.DeleteTeam(ctx, ctx.Org.Team); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteTeam", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetTeamMembers api for get a team's members
+func GetTeamMembers(ctx *context.APIContext) {
+ // swagger:operation GET /teams/{id}/members organization orgListTeamMembers
+ // ---
+ // summary: List a team's members
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ isMember, err := organization.IsOrganizationMember(ctx, ctx.Org.Team.OrgID, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrganizationMember", err)
+ return
+ } else if !isMember && !ctx.Doer.IsAdmin {
+ ctx.NotFound()
+ return
+ }
+
+ teamMembers, err := organization.GetTeamMembers(ctx, &organization.SearchMembersOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ TeamID: ctx.Org.Team.ID,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTeamMembers", err)
+ return
+ }
+
+ members := make([]*api.User, len(teamMembers))
+ for i, member := range teamMembers {
+ members[i] = convert.ToUser(ctx, member, ctx.Doer)
+ }
+
+ ctx.SetTotalCountHeader(int64(ctx.Org.Team.NumMembers))
+ ctx.JSON(http.StatusOK, members)
+}
+
+// GetTeamMember api for get a particular member of team
+func GetTeamMember(ctx *context.APIContext) {
+ // swagger:operation GET /teams/{id}/members/{username} organization orgListTeamMember
+ // ---
+ // summary: List a particular member of team
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the member to list
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/User"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ u := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ teamID := ctx.ParamsInt64("teamid")
+ isTeamMember, err := organization.IsUserInTeams(ctx, u.ID, []int64{teamID})
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsUserInTeams", err)
+ return
+ } else if !isTeamMember {
+ ctx.NotFound()
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToUser(ctx, u, ctx.Doer))
+}
+
+// AddTeamMember api for add a member to a team
+func AddTeamMember(ctx *context.APIContext) {
+ // swagger:operation PUT /teams/{id}/members/{username} organization orgAddTeamMember
+ // ---
+ // summary: Add a team member
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user to add
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ u := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ if err := models.AddTeamMember(ctx, ctx.Org.Team, u.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "AddMember", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// RemoveTeamMember api for remove one member from a team
+func RemoveTeamMember(ctx *context.APIContext) {
+ // swagger:operation DELETE /teams/{id}/members/{username} organization orgRemoveTeamMember
+ // ---
+ // summary: Remove a team member
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: username
+ // in: path
+ // description: username of the user to remove
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ u := user.GetUserByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ if err := models.RemoveTeamMember(ctx, ctx.Org.Team, u.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "RemoveTeamMember", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetTeamRepos api for get a team's repos
+func GetTeamRepos(ctx *context.APIContext) {
+ // swagger:operation GET /teams/{id}/repos organization orgListTeamRepos
+ // ---
+ // summary: List a team's repos
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ team := ctx.Org.Team
+ teamRepos, err := organization.GetTeamRepositories(ctx, &organization.SearchTeamRepoOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ TeamID: team.ID,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTeamRepos", err)
+ return
+ }
+ repos := make([]*api.Repository, len(teamRepos))
+ for i, repo := range teamRepos {
+ permission, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTeamRepos", err)
+ return
+ }
+ repos[i] = convert.ToRepo(ctx, repo, permission)
+ }
+ ctx.SetTotalCountHeader(int64(team.NumRepos))
+ ctx.JSON(http.StatusOK, repos)
+}
+
+// GetTeamRepo api for get a particular repo of team
+func GetTeamRepo(ctx *context.APIContext) {
+ // swagger:operation GET /teams/{id}/repos/{org}/{repo} organization orgListTeamRepo
+ // ---
+ // summary: List a particular repo of team
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: org
+ // in: path
+ // description: organization that owns the repo to list
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to list
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Repository"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := getRepositoryByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ if !organization.HasTeamRepo(ctx, ctx.Org.Team.OrgID, ctx.Org.Team.ID, repo.ID) {
+ ctx.NotFound()
+ return
+ }
+
+ permission, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTeamRepos", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToRepo(ctx, repo, permission))
+}
+
+// getRepositoryByParams get repository by a team's organization ID and repo name
+func getRepositoryByParams(ctx *context.APIContext) *repo_model.Repository {
+ repo, err := repo_model.GetRepositoryByName(ctx, ctx.Org.Team.OrgID, ctx.Params(":reponame"))
+ if err != nil {
+ if repo_model.IsErrRepoNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetRepositoryByName", err)
+ }
+ return nil
+ }
+ return repo
+}
+
+// AddTeamRepository api for adding a repository to a team
+func AddTeamRepository(ctx *context.APIContext) {
+ // swagger:operation PUT /teams/{id}/repos/{org}/{repo} organization orgAddTeamRepository
+ // ---
+ // summary: Add a repository to a team
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: org
+ // in: path
+ // description: organization that owns the repo to add
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to add
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := getRepositoryByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ if access, err := access_model.AccessLevel(ctx, ctx.Doer, repo); err != nil {
+ ctx.Error(http.StatusInternalServerError, "AccessLevel", err)
+ return
+ } else if access < perm.AccessModeAdmin {
+ ctx.Error(http.StatusForbidden, "", "Must have admin-level access to the repository")
+ return
+ }
+ if err := org_service.TeamAddRepository(ctx, ctx.Org.Team, repo); err != nil {
+ ctx.Error(http.StatusInternalServerError, "TeamAddRepository", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// RemoveTeamRepository api for removing a repository from a team
+func RemoveTeamRepository(ctx *context.APIContext) {
+ // swagger:operation DELETE /teams/{id}/repos/{org}/{repo} organization orgRemoveTeamRepository
+ // ---
+ // summary: Remove a repository from a team
+ // description: This does not delete the repository, it only removes the
+ // repository from the team.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: org
+ // in: path
+ // description: organization that owns the repo to remove
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to remove
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := getRepositoryByParams(ctx)
+ if ctx.Written() {
+ return
+ }
+ if access, err := access_model.AccessLevel(ctx, ctx.Doer, repo); err != nil {
+ ctx.Error(http.StatusInternalServerError, "AccessLevel", err)
+ return
+ } else if access < perm.AccessModeAdmin {
+ ctx.Error(http.StatusForbidden, "", "Must have admin-level access to the repository")
+ return
+ }
+ if err := repo_service.RemoveRepositoryFromTeam(ctx, ctx.Org.Team, repo.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "RemoveRepository", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// SearchTeam api for searching teams
+func SearchTeam(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/teams/search organization teamSearch
+ // ---
+ // summary: Search for teams within an organization
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: q
+ // in: query
+ // description: keywords to search
+ // type: string
+ // - name: include_desc
+ // in: query
+ // description: include search within team description (defaults to true)
+ // type: boolean
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // description: "SearchResults of a successful search"
+ // schema:
+ // type: object
+ // properties:
+ // ok:
+ // type: boolean
+ // data:
+ // type: array
+ // items:
+ // "$ref": "#/definitions/Team"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ opts := &organization.SearchTeamOptions{
+ Keyword: ctx.FormTrim("q"),
+ OrgID: ctx.Org.Organization.ID,
+ IncludeDesc: ctx.FormString("include_desc") == "" || ctx.FormBool("include_desc"),
+ ListOptions: listOptions,
+ }
+
+ // Only admin is allowed to search for all teams
+ if !ctx.Doer.IsAdmin {
+ opts.UserID = ctx.Doer.ID
+ }
+
+ teams, maxResults, err := organization.SearchTeam(ctx, opts)
+ if err != nil {
+ log.Error("SearchTeam failed: %v", err)
+ ctx.JSON(http.StatusInternalServerError, map[string]any{
+ "ok": false,
+ "error": "SearchTeam internal failure",
+ })
+ return
+ }
+
+ apiTeams, err := convert.ToTeams(ctx, teams, false)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+ ctx.JSON(http.StatusOK, map[string]any{
+ "ok": true,
+ "data": apiTeams,
+ })
+}
+
+func ListTeamActivityFeeds(ctx *context.APIContext) {
+ // swagger:operation GET /teams/{id}/activities/feeds organization orgListTeamActivityFeeds
+ // ---
+ // summary: List a team's activity feeds
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the team
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: date
+ // in: query
+ // description: the date of the activities to be found
+ // type: string
+ // format: date
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActivityFeedsList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ opts := activities_model.GetFeedsOptions{
+ RequestedTeam: ctx.Org.Team,
+ Actor: ctx.Doer,
+ IncludePrivate: true,
+ Date: ctx.FormString("date"),
+ ListOptions: listOptions,
+ }
+
+ feeds, count, err := activities_model.GetFeeds(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetFeeds", err)
+ return
+ }
+ ctx.SetTotalCountHeader(count)
+
+ ctx.JSON(http.StatusOK, convert.ToActivities(ctx, feeds, ctx.Doer))
+}
diff --git a/routers/api/v1/packages/package.go b/routers/api/v1/packages/package.go
new file mode 100644
index 0000000..b38aa13
--- /dev/null
+++ b/routers/api/v1/packages/package.go
@@ -0,0 +1,215 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/optional"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+// ListPackages gets all packages of an owner
+func ListPackages(ctx *context.APIContext) {
+ // swagger:operation GET /packages/{owner} package listPackages
+ // ---
+ // summary: Gets all packages of an owner
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the packages
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // - name: type
+ // in: query
+ // description: package type filter
+ // type: string
+ // enum: [alpine, cargo, chef, composer, conan, conda, container, cran, debian, generic, go, helm, maven, npm, nuget, pub, pypi, rpm, rubygems, swift, vagrant]
+ // - name: q
+ // in: query
+ // description: name filter
+ // type: string
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PackageList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ packageType := ctx.FormTrim("type")
+ query := ctx.FormTrim("q")
+
+ pvs, count, err := packages.SearchVersions(ctx, &packages.PackageSearchOptions{
+ OwnerID: ctx.Package.Owner.ID,
+ Type: packages.Type(packageType),
+ Name: packages.SearchValue{Value: query},
+ IsInternal: optional.Some(false),
+ Paginator: &listOptions,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchVersions", err)
+ return
+ }
+
+ pds, err := packages.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetPackageDescriptors", err)
+ return
+ }
+
+ apiPackages := make([]*api.Package, 0, len(pds))
+ for _, pd := range pds {
+ apiPackage, err := convert.ToPackage(ctx, pd, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "Error converting package for api", err)
+ return
+ }
+ apiPackages = append(apiPackages, apiPackage)
+ }
+
+ ctx.SetLinkHeader(int(count), listOptions.PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiPackages)
+}
+
+// GetPackage gets a package
+func GetPackage(ctx *context.APIContext) {
+ // swagger:operation GET /packages/{owner}/{type}/{name}/{version} package getPackage
+ // ---
+ // summary: Gets a package
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the package
+ // type: string
+ // required: true
+ // - name: type
+ // in: path
+ // description: type of the package
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: name of the package
+ // type: string
+ // required: true
+ // - name: version
+ // in: path
+ // description: version of the package
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Package"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ apiPackage, err := convert.ToPackage(ctx, ctx.Package.Descriptor, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "Error converting package for api", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, apiPackage)
+}
+
+// DeletePackage deletes a package
+func DeletePackage(ctx *context.APIContext) {
+ // swagger:operation DELETE /packages/{owner}/{type}/{name}/{version} package deletePackage
+ // ---
+ // summary: Delete a package
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the package
+ // type: string
+ // required: true
+ // - name: type
+ // in: path
+ // description: type of the package
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: name of the package
+ // type: string
+ // required: true
+ // - name: version
+ // in: path
+ // description: version of the package
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := packages_service.RemovePackageVersion(ctx, ctx.Doer, ctx.Package.Descriptor.Version)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "RemovePackageVersion", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// ListPackageFiles gets all files of a package
+func ListPackageFiles(ctx *context.APIContext) {
+ // swagger:operation GET /packages/{owner}/{type}/{name}/{version}/files package listPackageFiles
+ // ---
+ // summary: Gets all files of a package
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the package
+ // type: string
+ // required: true
+ // - name: type
+ // in: path
+ // description: type of the package
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: name of the package
+ // type: string
+ // required: true
+ // - name: version
+ // in: path
+ // description: version of the package
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PackageFileList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ apiPackageFiles := make([]*api.PackageFile, 0, len(ctx.Package.Descriptor.Files))
+ for _, pfd := range ctx.Package.Descriptor.Files {
+ apiPackageFiles = append(apiPackageFiles, convert.ToPackageFile(pfd))
+ }
+
+ ctx.JSON(http.StatusOK, apiPackageFiles)
+}
diff --git a/routers/api/v1/repo/action.go b/routers/api/v1/repo/action.go
new file mode 100644
index 0000000..0c7506b
--- /dev/null
+++ b/routers/api/v1/repo/action.go
@@ -0,0 +1,653 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "net/http"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ secret_model "code.gitea.io/gitea/models/secret"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/shared"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ actions_service "code.gitea.io/gitea/services/actions"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ secret_service "code.gitea.io/gitea/services/secrets"
+)
+
+// ListActionsSecrets list an repo's actions secrets
+func (Action) ListActionsSecrets(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/actions/secrets repository repoListActionsSecrets
+ // ---
+ // summary: List an repo's actions secrets
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repository
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/SecretList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo.Repository
+
+ opts := &secret_model.FindSecretsOptions{
+ RepoID: repo.ID,
+ ListOptions: utils.GetListOptions(ctx),
+ }
+
+ secrets, count, err := db.FindAndCount[secret_model.Secret](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiSecrets := make([]*api.Secret, len(secrets))
+ for k, v := range secrets {
+ apiSecrets[k] = &api.Secret{
+ Name: v.Name,
+ Created: v.CreatedUnix.AsTime(),
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiSecrets)
+}
+
+// create or update one secret of the repository
+func (Action) CreateOrUpdateSecret(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/actions/secrets/{secretname} repository updateRepoSecret
+ // ---
+ // summary: Create or Update a secret value in a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repository
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: secretname
+ // in: path
+ // description: name of the secret
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateOrUpdateSecretOption"
+ // responses:
+ // "201":
+ // description: response when creating a secret
+ // "204":
+ // description: response when updating a secret
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo.Repository
+
+ opt := web.GetForm(ctx).(*api.CreateOrUpdateSecretOption)
+
+ _, created, err := secret_service.CreateOrUpdateSecret(ctx, 0, repo.ID, ctx.Params("secretname"), opt.Data)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "CreateOrUpdateSecret", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "CreateOrUpdateSecret", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateOrUpdateSecret", err)
+ }
+ return
+ }
+
+ if created {
+ ctx.Status(http.StatusCreated)
+ } else {
+ ctx.Status(http.StatusNoContent)
+ }
+}
+
+// DeleteSecret delete one secret of the repository
+func (Action) DeleteSecret(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/actions/secrets/{secretname} repository deleteRepoSecret
+ // ---
+ // summary: Delete a secret in a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repository
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: secretname
+ // in: path
+ // description: name of the secret
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // description: delete one secret of the organization
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo.Repository
+
+ err := secret_service.DeleteSecretByName(ctx, 0, repo.ID, ctx.Params("secretname"))
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "DeleteSecret", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "DeleteSecret", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteSecret", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetVariable get a repo-level variable
+func (Action) GetVariable(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/actions/variables/{variablename} repository getRepoVariable
+ // ---
+ // summary: Get a repo-level variable
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: name of the owner
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActionVariable"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ RepoID: ctx.Repo.Repository.ID,
+ Name: ctx.Params("variablename"),
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "GetVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ }
+ return
+ }
+
+ variable := &api.ActionVariable{
+ OwnerID: v.OwnerID,
+ RepoID: v.RepoID,
+ Name: v.Name,
+ Data: v.Data,
+ }
+
+ ctx.JSON(http.StatusOK, variable)
+}
+
+// DeleteVariable delete a repo-level variable
+func (Action) DeleteVariable(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/actions/variables/{variablename} repository deleteRepoVariable
+ // ---
+ // summary: Delete a repo-level variable
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: name of the owner
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActionVariable"
+ // "201":
+ // description: response when deleting a variable
+ // "204":
+ // description: response when deleting a variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := actions_service.DeleteVariableByName(ctx, 0, ctx.Repo.Repository.ID, ctx.Params("variablename")); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "DeleteVariableByName", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "DeleteVariableByName", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteVariableByName", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreateVariable create a repo-level variable
+func (Action) CreateVariable(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/actions/variables/{variablename} repository createRepoVariable
+ // ---
+ // summary: Create a repo-level variable
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: name of the owner
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateVariableOption"
+ // responses:
+ // "201":
+ // description: response when creating a repo-level variable
+ // "204":
+ // description: response when creating a repo-level variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.CreateVariableOption)
+
+ repoID := ctx.Repo.Repository.ID
+ variableName := ctx.Params("variablename")
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ RepoID: repoID,
+ Name: variableName,
+ })
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ return
+ }
+ if v != nil && v.ID > 0 {
+ ctx.Error(http.StatusConflict, "VariableNameAlreadyExists", util.NewAlreadyExistErrorf("variable name %s already exists", variableName))
+ return
+ }
+
+ if _, err := actions_service.CreateVariable(ctx, 0, repoID, variableName, opt.Value); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "CreateVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateVariable", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// UpdateVariable update a repo-level variable
+func (Action) UpdateVariable(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/actions/variables/{variablename} repository updateRepoVariable
+ // ---
+ // summary: Update a repo-level variable
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: name of the owner
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/UpdateVariableOption"
+ // responses:
+ // "201":
+ // description: response when updating a repo-level variable
+ // "204":
+ // description: response when updating a repo-level variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.UpdateVariableOption)
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ RepoID: ctx.Repo.Repository.ID,
+ Name: ctx.Params("variablename"),
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "GetVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ }
+ return
+ }
+
+ if opt.Name == "" {
+ opt.Name = ctx.Params("variablename")
+ }
+ if _, err := actions_service.UpdateVariable(ctx, v.ID, opt.Name, opt.Value); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "UpdateVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UpdateVariable", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// ListVariables list repo-level variables
+func (Action) ListVariables(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/actions/variables repository getRepoVariablesList
+ // ---
+ // summary: Get repo-level variables list
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: name of the owner
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repository
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/VariableList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ vars, count, err := db.FindAndCount[actions_model.ActionVariable](ctx, &actions_model.FindVariablesOpts{
+ RepoID: ctx.Repo.Repository.ID,
+ ListOptions: utils.GetListOptions(ctx),
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindVariables", err)
+ return
+ }
+
+ variables := make([]*api.ActionVariable, len(vars))
+ for i, v := range vars {
+ variables[i] = &api.ActionVariable{
+ OwnerID: v.OwnerID,
+ RepoID: v.RepoID,
+ Name: v.Name,
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, variables)
+}
+
+// GetRegistrationToken returns the token to register repo runners
+func (Action) GetRegistrationToken(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/actions/runners/registration-token repository repoGetRunnerRegistrationToken
+ // ---
+ // summary: Get a repository's actions runner registration token
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RegistrationToken"
+
+ shared.GetRegistrationToken(ctx, 0, ctx.Repo.Repository.ID)
+}
+
+var _ actions_service.API = new(Action)
+
+// Action implements actions_service.API
+type Action struct{}
+
+// NewAction creates a new Action service
+func NewAction() actions_service.API {
+ return Action{}
+}
+
+// ListActionTasks list all the actions of a repository
+func ListActionTasks(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/actions/tasks repository ListActionTasks
+ // ---
+ // summary: List a repository's action tasks
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results, default maximum page size is 50
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TasksList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/conflict"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ tasks, total, err := db.FindAndCount[actions_model.ActionTask](ctx, &actions_model.FindTaskOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepoID: ctx.Repo.Repository.ID,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListActionTasks", err)
+ return
+ }
+
+ res := new(api.ActionTaskResponse)
+ res.TotalCount = total
+
+ res.Entries = make([]*api.ActionTask, len(tasks))
+ for i := range tasks {
+ convertedTask, err := convert.ToActionTask(ctx, tasks[i])
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ToActionTask", err)
+ return
+ }
+ res.Entries[i] = convertedTask
+ }
+
+ ctx.JSON(http.StatusOK, &res)
+}
+
+// DispatchWorkflow dispatches a workflow
+func DispatchWorkflow(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/actions/workflows/{workflowname}/dispatches repository DispatchWorkflow
+ // ---
+ // summary: Dispatches a workflow
+ // consumes:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: workflowname
+ // in: path
+ // description: name of the workflow
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/DispatchWorkflowOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.DispatchWorkflowOption)
+ name := ctx.Params("workflowname")
+
+ if len(opt.Ref) == 0 {
+ ctx.Error(http.StatusBadRequest, "ref", "ref is empty")
+ return
+ } else if len(name) == 0 {
+ ctx.Error(http.StatusBadRequest, "workflowname", "workflow name is empty")
+ return
+ }
+
+ workflow, err := actions_service.GetWorkflowFromCommit(ctx.Repo.GitRepo, opt.Ref, name)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "GetWorkflowFromCommit", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetWorkflowFromCommit", err)
+ }
+ return
+ }
+
+ inputGetter := func(key string) string {
+ return opt.Inputs[key]
+ }
+
+ if err := workflow.Dispatch(ctx, inputGetter, ctx.Repo.Repository, ctx.Doer); err != nil {
+ if actions_service.IsInputRequiredErr(err) {
+ ctx.Error(http.StatusBadRequest, "workflow.Dispatch", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "workflow.Dispatch", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusNoContent, nil)
+}
diff --git a/routers/api/v1/repo/avatar.go b/routers/api/v1/repo/avatar.go
new file mode 100644
index 0000000..698337f
--- /dev/null
+++ b/routers/api/v1/repo/avatar.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "encoding/base64"
+ "net/http"
+
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// UpdateVatar updates the Avatar of an Repo
+func UpdateAvatar(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/avatar repository repoUpdateAvatar
+ // ---
+ // summary: Update avatar
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/UpdateRepoAvatarOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.UpdateRepoAvatarOption)
+
+ content, err := base64.StdEncoding.DecodeString(form.Image)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "DecodeImage", err)
+ return
+ }
+
+ err = repo_service.UploadAvatar(ctx, ctx.Repo.Repository, content)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UploadAvatar", err)
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// UpdateAvatar deletes the Avatar of an Repo
+func DeleteAvatar(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/avatar repository repoDeleteAvatar
+ // ---
+ // summary: Delete avatar
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ err := repo_service.DeleteAvatar(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteAvatar", err)
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/blob.go b/routers/api/v1/repo/blob.go
new file mode 100644
index 0000000..3b11666
--- /dev/null
+++ b/routers/api/v1/repo/blob.go
@@ -0,0 +1,55 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/services/context"
+ files_service "code.gitea.io/gitea/services/repository/files"
+)
+
+// GetBlob get the blob of a repository file.
+func GetBlob(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/blobs/{sha} repository GetBlob
+ // ---
+ // summary: Gets the blob of a repository.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: sha of the commit
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GitBlobResponse"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ sha := ctx.Params("sha")
+ if len(sha) == 0 {
+ ctx.Error(http.StatusBadRequest, "", "sha not provided")
+ return
+ }
+
+ if blob, err := files_service.GetBlobBySHA(ctx, ctx.Repo.Repository, ctx.Repo.GitRepo, sha); err != nil {
+ ctx.Error(http.StatusBadRequest, "", err)
+ } else {
+ ctx.JSON(http.StatusOK, blob)
+ }
+}
diff --git a/routers/api/v1/repo/branch.go b/routers/api/v1/repo/branch.go
new file mode 100644
index 0000000..a468fd9
--- /dev/null
+++ b/routers/api/v1/repo/branch.go
@@ -0,0 +1,1019 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ "code.gitea.io/gitea/models/organization"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/optional"
+ repo_module "code.gitea.io/gitea/modules/repository"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ pull_service "code.gitea.io/gitea/services/pull"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// GetBranch get a branch of a repository
+func GetBranch(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/branches/{branch} repository repoGetBranch
+ // ---
+ // summary: Retrieve a specific branch from a repository, including its effective branch protection
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: branch
+ // in: path
+ // description: branch to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Branch"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ branchName := ctx.Params("*")
+
+ branch, err := ctx.Repo.GitRepo.GetBranch(branchName)
+ if err != nil {
+ if git.IsErrBranchNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetBranch", err)
+ }
+ return
+ }
+
+ c, err := branch.GetCommit()
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommit", err)
+ return
+ }
+
+ branchProtection, err := git_model.GetFirstMatchProtectedBranchRule(ctx, ctx.Repo.Repository.ID, branchName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBranchProtection", err)
+ return
+ }
+
+ br, err := convert.ToBranch(ctx, ctx.Repo.Repository, branch.Name, c, branchProtection, ctx.Doer, ctx.Repo.IsAdmin())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToBranch", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, br)
+}
+
+// DeleteBranch get a branch of a repository
+func DeleteBranch(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/branches/{branch} repository repoDeleteBranch
+ // ---
+ // summary: Delete a specific branch from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: branch
+ // in: path
+ // description: branch to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.Error(http.StatusNotFound, "", "Git Repository is empty.")
+ return
+ }
+
+ if ctx.Repo.Repository.IsMirror {
+ ctx.Error(http.StatusForbidden, "", "Git Repository is a mirror.")
+ return
+ }
+
+ branchName := ctx.Params("*")
+
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.Error(http.StatusForbidden, "", "Git Repository is empty.")
+ return
+ }
+
+ // check whether branches of this repository has been synced
+ totalNumOfBranches, err := db.Count[git_model.Branch](ctx, git_model.FindBranchOptions{
+ RepoID: ctx.Repo.Repository.ID,
+ IsDeletedBranch: optional.Some(false),
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CountBranches", err)
+ return
+ }
+ if totalNumOfBranches == 0 { // sync branches immediately because non-empty repository should have at least 1 branch
+ _, err = repo_module.SyncRepoBranches(ctx, ctx.Repo.Repository.ID, 0)
+ if err != nil {
+ ctx.ServerError("SyncRepoBranches", err)
+ return
+ }
+ }
+
+ if ctx.Repo.Repository.IsMirror {
+ ctx.Error(http.StatusForbidden, "IsMirrored", fmt.Errorf("can not delete branch of an mirror repository"))
+ return
+ }
+
+ if err := repo_service.DeleteBranch(ctx, ctx.Doer, ctx.Repo.Repository, ctx.Repo.GitRepo, branchName); err != nil {
+ switch {
+ case git.IsErrBranchNotExist(err):
+ ctx.NotFound(err)
+ case errors.Is(err, repo_service.ErrBranchIsDefault):
+ ctx.Error(http.StatusForbidden, "DefaultBranch", fmt.Errorf("can not delete default branch"))
+ case errors.Is(err, git_model.ErrBranchIsProtected):
+ ctx.Error(http.StatusForbidden, "IsProtectedBranch", fmt.Errorf("branch protected"))
+ default:
+ ctx.Error(http.StatusInternalServerError, "DeleteBranch", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreateBranch creates a branch for a user's repository
+func CreateBranch(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/branches repository repoCreateBranch
+ // ---
+ // summary: Create a branch
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateBranchRepoOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Branch"
+ // "403":
+ // description: The branch is archived or a mirror.
+ // "404":
+ // description: The old branch does not exist.
+ // "409":
+ // description: The branch with the same name already exists.
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.Error(http.StatusNotFound, "", "Git Repository is empty.")
+ return
+ }
+
+ if ctx.Repo.Repository.IsMirror {
+ ctx.Error(http.StatusForbidden, "", "Git Repository is a mirror.")
+ return
+ }
+
+ opt := web.GetForm(ctx).(*api.CreateBranchRepoOption)
+
+ var oldCommit *git.Commit
+ var err error
+
+ if len(opt.OldRefName) > 0 {
+ oldCommit, err = ctx.Repo.GitRepo.GetCommit(opt.OldRefName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommit", err)
+ return
+ }
+ } else if len(opt.OldBranchName) > 0 { //nolint
+ if ctx.Repo.GitRepo.IsBranchExist(opt.OldBranchName) { //nolint
+ oldCommit, err = ctx.Repo.GitRepo.GetBranchCommit(opt.OldBranchName) //nolint
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBranchCommit", err)
+ return
+ }
+ } else {
+ ctx.Error(http.StatusNotFound, "", "The old branch does not exist")
+ return
+ }
+ } else {
+ oldCommit, err = ctx.Repo.GitRepo.GetBranchCommit(ctx.Repo.Repository.DefaultBranch)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBranchCommit", err)
+ return
+ }
+ }
+
+ err = repo_service.CreateNewBranchFromCommit(ctx, ctx.Doer, ctx.Repo.Repository, ctx.Repo.GitRepo, oldCommit.ID.String(), opt.BranchName)
+ if err != nil {
+ if git_model.IsErrBranchNotExist(err) {
+ ctx.Error(http.StatusNotFound, "", "The old branch does not exist")
+ } else if models.IsErrTagAlreadyExists(err) {
+ ctx.Error(http.StatusConflict, "", "The branch with the same tag already exists.")
+ } else if git_model.IsErrBranchAlreadyExists(err) || git.IsErrPushOutOfDate(err) {
+ ctx.Error(http.StatusConflict, "", "The branch already exists.")
+ } else if git_model.IsErrBranchNameConflict(err) {
+ ctx.Error(http.StatusConflict, "", "The branch with the same name already exists.")
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateNewBranchFromCommit", err)
+ }
+ return
+ }
+
+ branch, err := ctx.Repo.GitRepo.GetBranch(opt.BranchName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBranch", err)
+ return
+ }
+
+ commit, err := branch.GetCommit()
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommit", err)
+ return
+ }
+
+ branchProtection, err := git_model.GetFirstMatchProtectedBranchRule(ctx, ctx.Repo.Repository.ID, branch.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBranchProtection", err)
+ return
+ }
+
+ br, err := convert.ToBranch(ctx, ctx.Repo.Repository, branch.Name, commit, branchProtection, ctx.Doer, ctx.Repo.IsAdmin())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToBranch", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, br)
+}
+
+// ListBranches list all the branches of a repository
+func ListBranches(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/branches repository repoListBranches
+ // ---
+ // summary: List a repository's branches
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/BranchList"
+
+ var totalNumOfBranches int64
+ var apiBranches []*api.Branch
+
+ listOptions := utils.GetListOptions(ctx)
+
+ if !ctx.Repo.Repository.IsEmpty {
+ if ctx.Repo.GitRepo == nil {
+ ctx.Error(http.StatusInternalServerError, "Load git repository failed", nil)
+ return
+ }
+
+ branchOpts := git_model.FindBranchOptions{
+ ListOptions: listOptions,
+ RepoID: ctx.Repo.Repository.ID,
+ IsDeletedBranch: optional.Some(false),
+ }
+ var err error
+ totalNumOfBranches, err = db.Count[git_model.Branch](ctx, branchOpts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CountBranches", err)
+ return
+ }
+ if totalNumOfBranches == 0 { // sync branches immediately because non-empty repository should have at least 1 branch
+ totalNumOfBranches, err = repo_module.SyncRepoBranches(ctx, ctx.Repo.Repository.ID, 0)
+ if err != nil {
+ ctx.ServerError("SyncRepoBranches", err)
+ return
+ }
+ }
+
+ rules, err := git_model.FindRepoProtectedBranchRules(ctx, ctx.Repo.Repository.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindMatchedProtectedBranchRules", err)
+ return
+ }
+
+ branches, err := db.Find[git_model.Branch](ctx, branchOpts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBranches", err)
+ return
+ }
+
+ apiBranches = make([]*api.Branch, 0, len(branches))
+ for i := range branches {
+ c, err := ctx.Repo.GitRepo.GetBranchCommit(branches[i].Name)
+ if err != nil {
+ // Skip if this branch doesn't exist anymore.
+ if git.IsErrNotExist(err) {
+ totalNumOfBranches--
+ continue
+ }
+ ctx.Error(http.StatusInternalServerError, "GetCommit", err)
+ return
+ }
+
+ branchProtection := rules.GetFirstMatched(branches[i].Name)
+ apiBranch, err := convert.ToBranch(ctx, ctx.Repo.Repository, branches[i].Name, c, branchProtection, ctx.Doer, ctx.Repo.IsAdmin())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToBranch", err)
+ return
+ }
+ apiBranches = append(apiBranches, apiBranch)
+ }
+ }
+
+ ctx.SetLinkHeader(int(totalNumOfBranches), listOptions.PageSize)
+ ctx.SetTotalCountHeader(totalNumOfBranches)
+ ctx.JSON(http.StatusOK, apiBranches)
+}
+
+// GetBranchProtection gets a branch protection
+func GetBranchProtection(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection
+ // ---
+ // summary: Get a specific branch protection for the repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: name of protected branch
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/BranchProtection"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo.Repository
+ bpName := ctx.Params(":name")
+ bp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedBranchByID", err)
+ return
+ }
+ if bp == nil || bp.RepoID != repo.ID {
+ ctx.NotFound()
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToBranchProtection(ctx, bp, repo))
+}
+
+// ListBranchProtections list branch protections for a repo
+func ListBranchProtections(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/branch_protections repository repoListBranchProtection
+ // ---
+ // summary: List branch protections for a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/BranchProtectionList"
+
+ repo := ctx.Repo.Repository
+ bps, err := git_model.FindRepoProtectedBranchRules(ctx, repo.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedBranches", err)
+ return
+ }
+ apiBps := make([]*api.BranchProtection, len(bps))
+ for i := range bps {
+ apiBps[i] = convert.ToBranchProtection(ctx, bps[i], repo)
+ }
+
+ ctx.JSON(http.StatusOK, apiBps)
+}
+
+// CreateBranchProtection creates a branch protection for a repo
+func CreateBranchProtection(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection
+ // ---
+ // summary: Create a branch protections for a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateBranchProtectionOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/BranchProtection"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ form := web.GetForm(ctx).(*api.CreateBranchProtectionOption)
+ repo := ctx.Repo.Repository
+
+ ruleName := form.RuleName
+ if ruleName == "" {
+ ruleName = form.BranchName //nolint
+ }
+ if len(ruleName) == 0 {
+ ctx.Error(http.StatusBadRequest, "both rule_name and branch_name are empty", "both rule_name and branch_name are empty")
+ return
+ }
+
+ isPlainRule := !git_model.IsRuleNameSpecial(ruleName)
+ var isBranchExist bool
+ if isPlainRule {
+ isBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)
+ }
+
+ protectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectBranchOfRepoByName", err)
+ return
+ } else if protectBranch != nil {
+ ctx.Error(http.StatusForbidden, "Create branch protection", "Branch protection already exist")
+ return
+ }
+
+ var requiredApprovals int64
+ if form.RequiredApprovals > 0 {
+ requiredApprovals = form.RequiredApprovals
+ }
+
+ whitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+ mergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+ approvalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+ var whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64
+ if repo.Owner.IsOrganization() {
+ whitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.PushWhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ mergeWhitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.MergeWhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ approvalsWhitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.ApprovalsWhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ }
+
+ protectBranch = &git_model.ProtectedBranch{
+ RepoID: ctx.Repo.Repository.ID,
+ RuleName: ruleName,
+ CanPush: form.EnablePush,
+ EnableWhitelist: form.EnablePush && form.EnablePushWhitelist,
+ EnableMergeWhitelist: form.EnableMergeWhitelist,
+ WhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,
+ EnableStatusCheck: form.EnableStatusCheck,
+ StatusCheckContexts: form.StatusCheckContexts,
+ EnableApprovalsWhitelist: form.EnableApprovalsWhitelist,
+ RequiredApprovals: requiredApprovals,
+ BlockOnRejectedReviews: form.BlockOnRejectedReviews,
+ BlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,
+ DismissStaleApprovals: form.DismissStaleApprovals,
+ IgnoreStaleApprovals: form.IgnoreStaleApprovals,
+ RequireSignedCommits: form.RequireSignedCommits,
+ ProtectedFilePatterns: form.ProtectedFilePatterns,
+ UnprotectedFilePatterns: form.UnprotectedFilePatterns,
+ BlockOnOutdatedBranch: form.BlockOnOutdatedBranch,
+ ApplyToAdmins: form.ApplyToAdmins,
+ }
+
+ err = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{
+ UserIDs: whitelistUsers,
+ TeamIDs: whitelistTeams,
+ MergeUserIDs: mergeWhitelistUsers,
+ MergeTeamIDs: mergeWhitelistTeams,
+ ApprovalsUserIDs: approvalsWhitelistUsers,
+ ApprovalsTeamIDs: approvalsWhitelistTeams,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateProtectBranch", err)
+ return
+ }
+
+ if isBranchExist {
+ if err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CheckPRsForBaseBranch", err)
+ return
+ }
+ } else {
+ if !isPlainRule {
+ if ctx.Repo.GitRepo == nil {
+ ctx.Repo.GitRepo, err = gitrepo.OpenRepository(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
+ return
+ }
+ defer func() {
+ ctx.Repo.GitRepo.Close()
+ ctx.Repo.GitRepo = nil
+ }()
+ }
+ // FIXME: since we only need to recheck files protected rules, we could improve this
+ matchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindAllMatchedBranches", err)
+ return
+ }
+
+ for _, branchName := range matchedBranches {
+ if err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CheckPRsForBaseBranch", err)
+ return
+ }
+ }
+ }
+ }
+
+ // Reload from db to get all whitelists
+ bp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedBranchByID", err)
+ return
+ }
+ if bp == nil || bp.RepoID != ctx.Repo.Repository.ID {
+ ctx.Error(http.StatusInternalServerError, "New branch protection not found", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToBranchProtection(ctx, bp, repo))
+}
+
+// EditBranchProtection edits a branch protection for a repo
+func EditBranchProtection(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection
+ // ---
+ // summary: Edit a branch protections for a repository. Only fields that are set will be changed
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: name of protected branch
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditBranchProtectionOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/BranchProtection"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ form := web.GetForm(ctx).(*api.EditBranchProtectionOption)
+ repo := ctx.Repo.Repository
+ bpName := ctx.Params(":name")
+ protectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedBranchByID", err)
+ return
+ }
+ if protectBranch == nil || protectBranch.RepoID != repo.ID {
+ ctx.NotFound()
+ return
+ }
+
+ if form.EnablePush != nil {
+ if !*form.EnablePush {
+ protectBranch.CanPush = false
+ protectBranch.EnableWhitelist = false
+ protectBranch.WhitelistDeployKeys = false
+ } else {
+ protectBranch.CanPush = true
+ if form.EnablePushWhitelist != nil {
+ if !*form.EnablePushWhitelist {
+ protectBranch.EnableWhitelist = false
+ protectBranch.WhitelistDeployKeys = false
+ } else {
+ protectBranch.EnableWhitelist = true
+ if form.PushWhitelistDeployKeys != nil {
+ protectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys
+ }
+ }
+ }
+ }
+ }
+
+ if form.EnableMergeWhitelist != nil {
+ protectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist
+ }
+
+ if form.EnableStatusCheck != nil {
+ protectBranch.EnableStatusCheck = *form.EnableStatusCheck
+ }
+
+ if form.StatusCheckContexts != nil {
+ protectBranch.StatusCheckContexts = form.StatusCheckContexts
+ }
+
+ if form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {
+ protectBranch.RequiredApprovals = *form.RequiredApprovals
+ }
+
+ if form.EnableApprovalsWhitelist != nil {
+ protectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist
+ }
+
+ if form.BlockOnRejectedReviews != nil {
+ protectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews
+ }
+
+ if form.BlockOnOfficialReviewRequests != nil {
+ protectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests
+ }
+
+ if form.DismissStaleApprovals != nil {
+ protectBranch.DismissStaleApprovals = *form.DismissStaleApprovals
+ }
+
+ if form.IgnoreStaleApprovals != nil {
+ protectBranch.IgnoreStaleApprovals = *form.IgnoreStaleApprovals
+ }
+
+ if form.RequireSignedCommits != nil {
+ protectBranch.RequireSignedCommits = *form.RequireSignedCommits
+ }
+
+ if form.ProtectedFilePatterns != nil {
+ protectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns
+ }
+
+ if form.UnprotectedFilePatterns != nil {
+ protectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns
+ }
+
+ if form.BlockOnOutdatedBranch != nil {
+ protectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch
+ }
+
+ if form.ApplyToAdmins != nil {
+ protectBranch.ApplyToAdmins = *form.ApplyToAdmins
+ }
+
+ var whitelistUsers []int64
+ if form.PushWhitelistUsernames != nil {
+ whitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+ } else {
+ whitelistUsers = protectBranch.WhitelistUserIDs
+ }
+ var mergeWhitelistUsers []int64
+ if form.MergeWhitelistUsernames != nil {
+ mergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+ } else {
+ mergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs
+ }
+ var approvalsWhitelistUsers []int64
+ if form.ApprovalsWhitelistUsernames != nil {
+ approvalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+ } else {
+ approvalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs
+ }
+
+ var whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64
+ if repo.Owner.IsOrganization() {
+ if form.PushWhitelistTeams != nil {
+ whitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.PushWhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ } else {
+ whitelistTeams = protectBranch.WhitelistTeamIDs
+ }
+ if form.MergeWhitelistTeams != nil {
+ mergeWhitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.MergeWhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ } else {
+ mergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs
+ }
+ if form.ApprovalsWhitelistTeams != nil {
+ approvalsWhitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.ApprovalsWhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ } else {
+ approvalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs
+ }
+ }
+
+ err = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{
+ UserIDs: whitelistUsers,
+ TeamIDs: whitelistTeams,
+ MergeUserIDs: mergeWhitelistUsers,
+ MergeTeamIDs: mergeWhitelistTeams,
+ ApprovalsUserIDs: approvalsWhitelistUsers,
+ ApprovalsTeamIDs: approvalsWhitelistTeams,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateProtectBranch", err)
+ return
+ }
+
+ isPlainRule := !git_model.IsRuleNameSpecial(bpName)
+ var isBranchExist bool
+ if isPlainRule {
+ isBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)
+ }
+
+ if isBranchExist {
+ if err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CheckPrsForBaseBranch", err)
+ return
+ }
+ } else {
+ if !isPlainRule {
+ if ctx.Repo.GitRepo == nil {
+ ctx.Repo.GitRepo, err = gitrepo.OpenRepository(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
+ return
+ }
+ defer func() {
+ ctx.Repo.GitRepo.Close()
+ ctx.Repo.GitRepo = nil
+ }()
+ }
+
+ // FIXME: since we only need to recheck files protected rules, we could improve this
+ matchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindAllMatchedBranches", err)
+ return
+ }
+
+ for _, branchName := range matchedBranches {
+ if err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CheckPrsForBaseBranch", err)
+ return
+ }
+ }
+ }
+ }
+
+ // Reload from db to ensure get all whitelists
+ bp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedBranchBy", err)
+ return
+ }
+ if bp == nil || bp.RepoID != ctx.Repo.Repository.ID {
+ ctx.Error(http.StatusInternalServerError, "New branch protection not found", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToBranchProtection(ctx, bp, repo))
+}
+
+// DeleteBranchProtection deletes a branch protection for a repo
+func DeleteBranchProtection(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/branch_protections/{name} repository repoDeleteBranchProtection
+ // ---
+ // summary: Delete a specific branch protection for the repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: name of protected branch
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo.Repository
+ bpName := ctx.Params(":name")
+ bp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedBranchByID", err)
+ return
+ }
+ if bp == nil || bp.RepoID != repo.ID {
+ ctx.NotFound()
+ return
+ }
+
+ if err := git_model.DeleteProtectedBranch(ctx, ctx.Repo.Repository, bp.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteProtectedBranch", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/collaborators.go b/routers/api/v1/repo/collaborators.go
new file mode 100644
index 0000000..a43a21a
--- /dev/null
+++ b/routers/api/v1/repo/collaborators.go
@@ -0,0 +1,370 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ repo_module "code.gitea.io/gitea/modules/repository"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// ListCollaborators list a repository's collaborators
+func ListCollaborators(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/collaborators repository repoListCollaborators
+ // ---
+ // summary: List a repository's collaborators
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ count, err := db.Count[repo_model.Collaboration](ctx, repo_model.FindCollaborationOptions{
+ RepoID: ctx.Repo.Repository.ID,
+ })
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ collaborators, err := repo_model.GetCollaborators(ctx, ctx.Repo.Repository.ID, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListCollaborators", err)
+ return
+ }
+
+ users := make([]*api.User, len(collaborators))
+ for i, collaborator := range collaborators {
+ users[i] = convert.ToUser(ctx, collaborator.User, ctx.Doer)
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, users)
+}
+
+// IsCollaborator check if a user is a collaborator of a repository
+func IsCollaborator(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/collaborators/{collaborator} repository repoCheckCollaborator
+ // ---
+ // summary: Check if a user is a collaborator of a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: collaborator
+ // in: path
+ // description: username of the collaborator
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ user, err := user_model.GetUserByName(ctx, ctx.Params(":collaborator"))
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+ isColab, err := repo_model.IsCollaborator(ctx, ctx.Repo.Repository.ID, user.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsCollaborator", err)
+ return
+ }
+ if isColab {
+ ctx.Status(http.StatusNoContent)
+ } else {
+ ctx.NotFound()
+ }
+}
+
+// AddCollaborator add a collaborator to a repository
+func AddCollaborator(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/collaborators/{collaborator} repository repoAddCollaborator
+ // ---
+ // summary: Add a collaborator to a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: collaborator
+ // in: path
+ // description: username of the collaborator to add
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/AddCollaboratorOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ form := web.GetForm(ctx).(*api.AddCollaboratorOption)
+
+ collaborator, err := user_model.GetUserByName(ctx, ctx.Params(":collaborator"))
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+
+ if !collaborator.IsActive {
+ ctx.Error(http.StatusInternalServerError, "InactiveCollaborator", errors.New("collaborator's account is inactive"))
+ return
+ }
+
+ if err := repo_module.AddCollaborator(ctx, ctx.Repo.Repository, collaborator); err != nil {
+ if errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, "AddCollaborator", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "AddCollaborator", err)
+ }
+ return
+ }
+
+ if form.Permission != nil {
+ if err := repo_model.ChangeCollaborationAccessMode(ctx, ctx.Repo.Repository, collaborator.ID, perm.ParseAccessMode(*form.Permission)); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeCollaborationAccessMode", err)
+ return
+ }
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteCollaborator delete a collaborator from a repository
+func DeleteCollaborator(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/collaborators/{collaborator} repository repoDeleteCollaborator
+ // ---
+ // summary: Delete a collaborator from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: collaborator
+ // in: path
+ // description: username of the collaborator to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ collaborator, err := user_model.GetUserByName(ctx, ctx.Params(":collaborator"))
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+
+ if err := repo_service.DeleteCollaboration(ctx, ctx.Repo.Repository, collaborator.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteCollaboration", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetRepoPermissions gets repository permissions for a user
+func GetRepoPermissions(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/collaborators/{collaborator}/permission repository repoGetRepoPermissions
+ // ---
+ // summary: Get repository permissions for a user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: collaborator
+ // in: path
+ // description: username of the collaborator
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepoCollaboratorPermission"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ if !ctx.Doer.IsAdmin && ctx.Doer.LoginName != ctx.Params(":collaborator") && !ctx.IsUserRepoAdmin() {
+ ctx.Error(http.StatusForbidden, "User", "Only admins can query all permissions, repo admins can query all repo permissions, collaborators can query only their own")
+ return
+ }
+
+ collaborator, err := user_model.GetUserByName(ctx, ctx.Params(":collaborator"))
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusNotFound, "GetUserByName", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+
+ permission, err := access_model.GetUserRepoPermission(ctx, ctx.Repo.Repository, collaborator)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToUserAndPermission(ctx, collaborator, ctx.ContextUser, permission.AccessMode))
+}
+
+// GetReviewers return all users that can be requested to review in this repo
+func GetReviewers(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/reviewers repository repoGetReviewers
+ // ---
+ // summary: Return all users that can be requested to review in this repo
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ reviewers, err := repo_model.GetReviewers(ctx, ctx.Repo.Repository, ctx.Doer.ID, 0)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListCollaborators", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToUsers(ctx, ctx.Doer, reviewers))
+}
+
+// GetAssignees return all users that have write access and can be assigned to issues
+func GetAssignees(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/assignees repository repoGetAssignees
+ // ---
+ // summary: Return all users that have write access and can be assigned to issues
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ assignees, err := repo_model.GetRepoAssignees(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListCollaborators", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToUsers(ctx, ctx.Doer, assignees))
+}
diff --git a/routers/api/v1/repo/commits.go b/routers/api/v1/repo/commits.go
new file mode 100644
index 0000000..c5e8cf9
--- /dev/null
+++ b/routers/api/v1/repo/commits.go
@@ -0,0 +1,376 @@
+// Copyright 2018 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "math"
+ "net/http"
+ "strconv"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// GetSingleCommit get a commit via sha
+func GetSingleCommit(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/commits/{sha} repository repoGetSingleCommit
+ // ---
+ // summary: Get a single commit from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: a git ref or commit sha
+ // type: string
+ // required: true
+ // - name: stat
+ // in: query
+ // description: include diff stats for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // - name: verification
+ // in: query
+ // description: include verification for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // - name: files
+ // in: query
+ // description: include a list of affected files for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Commit"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ sha := ctx.Params(":sha")
+ if !git.IsValidRefPattern(sha) {
+ ctx.Error(http.StatusUnprocessableEntity, "no valid ref or sha", fmt.Sprintf("no valid ref or sha: %s", sha))
+ return
+ }
+
+ getCommit(ctx, sha, convert.ParseCommitOptions(ctx))
+}
+
+func getCommit(ctx *context.APIContext, identifier string, toCommitOpts convert.ToCommitOptions) {
+ commit, err := ctx.Repo.GitRepo.GetCommit(identifier)
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound(identifier)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "gitRepo.GetCommit", err)
+ return
+ }
+
+ json, err := convert.ToCommit(ctx, ctx.Repo.Repository, ctx.Repo.GitRepo, commit, nil, toCommitOpts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "toCommit", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, json)
+}
+
+// GetAllCommits get all commits via
+func GetAllCommits(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/commits repository repoGetAllCommits
+ // ---
+ // summary: Get a list of all commits from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: query
+ // description: SHA or branch to start listing commits from (usually 'master')
+ // type: string
+ // - name: path
+ // in: query
+ // description: filepath of a file/dir
+ // type: string
+ // - name: stat
+ // in: query
+ // description: include diff stats for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // - name: verification
+ // in: query
+ // description: include verification for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // - name: files
+ // in: query
+ // description: include a list of affected files for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results (ignored if used with 'path')
+ // type: integer
+ // - name: not
+ // in: query
+ // description: commits that match the given specifier will not be listed.
+ // type: string
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CommitList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/EmptyRepository"
+
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.JSON(http.StatusConflict, api.APIError{
+ Message: "Git Repository is empty.",
+ URL: setting.API.SwaggerURL,
+ })
+ return
+ }
+
+ listOptions := utils.GetListOptions(ctx)
+ if listOptions.Page <= 0 {
+ listOptions.Page = 1
+ }
+
+ if listOptions.PageSize > setting.Git.CommitsRangeSize {
+ listOptions.PageSize = setting.Git.CommitsRangeSize
+ }
+
+ sha := ctx.FormString("sha")
+ path := ctx.FormString("path")
+ not := ctx.FormString("not")
+
+ var (
+ commitsCountTotal int64
+ commits []*git.Commit
+ err error
+ )
+
+ if len(path) == 0 {
+ var baseCommit *git.Commit
+ if len(sha) == 0 {
+ // no sha supplied - use default branch
+ head, err := ctx.Repo.GitRepo.GetHEADBranch()
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetHEADBranch", err)
+ return
+ }
+
+ baseCommit, err = ctx.Repo.GitRepo.GetBranchCommit(head.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommit", err)
+ return
+ }
+ } else {
+ // get commit specified by sha
+ baseCommit, err = ctx.Repo.GitRepo.GetCommit(sha)
+ if err != nil {
+ ctx.NotFoundOrServerError("GetCommit", git.IsErrNotExist, err)
+ return
+ }
+ }
+
+ // Total commit count
+ commitsCountTotal, err = git.CommitsCount(ctx.Repo.GitRepo.Ctx, git.CommitsCountOptions{
+ RepoPath: ctx.Repo.GitRepo.Path,
+ Not: not,
+ Revision: []string{baseCommit.ID.String()},
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommitsCount", err)
+ return
+ }
+
+ // Query commits
+ commits, err = baseCommit.CommitsByRange(listOptions.Page, listOptions.PageSize, not)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CommitsByRange", err)
+ return
+ }
+ } else {
+ if len(sha) == 0 {
+ sha = ctx.Repo.Repository.DefaultBranch
+ }
+
+ commitsCountTotal, err = git.CommitsCount(ctx,
+ git.CommitsCountOptions{
+ RepoPath: ctx.Repo.GitRepo.Path,
+ Not: not,
+ Revision: []string{sha},
+ RelPath: []string{path},
+ })
+
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FileCommitsCount", err)
+ return
+ } else if commitsCountTotal == 0 {
+ ctx.NotFound("FileCommitsCount", nil)
+ return
+ }
+
+ commits, err = ctx.Repo.GitRepo.CommitsByFileAndRange(
+ git.CommitsByFileAndRangeOptions{
+ Revision: sha,
+ File: path,
+ Not: not,
+ Page: listOptions.Page,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CommitsByFileAndRange", err)
+ return
+ }
+ }
+
+ pageCount := int(math.Ceil(float64(commitsCountTotal) / float64(listOptions.PageSize)))
+ userCache := make(map[string]*user_model.User)
+ apiCommits := make([]*api.Commit, len(commits))
+
+ for i, commit := range commits {
+ // Create json struct
+ apiCommits[i], err = convert.ToCommit(ctx, ctx.Repo.Repository, ctx.Repo.GitRepo, commit, userCache, convert.ParseCommitOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "toCommit", err)
+ return
+ }
+ }
+
+ ctx.SetLinkHeader(int(commitsCountTotal), listOptions.PageSize)
+ ctx.SetTotalCountHeader(commitsCountTotal)
+
+ // kept for backwards compatibility
+ ctx.RespHeader().Set("X-Page", strconv.Itoa(listOptions.Page))
+ ctx.RespHeader().Set("X-PerPage", strconv.Itoa(listOptions.PageSize))
+ ctx.RespHeader().Set("X-Total", strconv.FormatInt(commitsCountTotal, 10))
+ ctx.RespHeader().Set("X-PageCount", strconv.Itoa(pageCount))
+ ctx.RespHeader().Set("X-HasMore", strconv.FormatBool(listOptions.Page < pageCount))
+ ctx.AppendAccessControlExposeHeaders("X-Page", "X-PerPage", "X-Total", "X-PageCount", "X-HasMore")
+
+ ctx.JSON(http.StatusOK, &apiCommits)
+}
+
+// DownloadCommitDiffOrPatch render a commit's raw diff or patch
+func DownloadCommitDiffOrPatch(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/commits/{sha}.{diffType} repository repoDownloadCommitDiffOrPatch
+ // ---
+ // summary: Get a commit's diff or patch
+ // produces:
+ // - text/plain
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: SHA of the commit to get
+ // type: string
+ // required: true
+ // - name: diffType
+ // in: path
+ // description: whether the output is diff or patch
+ // type: string
+ // enum: [diff, patch]
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/string"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ sha := ctx.Params(":sha")
+ diffType := git.RawDiffType(ctx.Params(":diffType"))
+
+ if err := git.GetRawDiff(ctx.Repo.GitRepo, sha, diffType, ctx.Resp); err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound(sha)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "DownloadCommitDiffOrPatch", err)
+ return
+ }
+}
+
+// GetCommitPullRequest returns the pull request of the commit
+func GetCommitPullRequest(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/commits/{sha}/pull repository repoGetCommitPullRequest
+ // ---
+ // summary: Get the pull request of the commit
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: SHA of the commit to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullRequest"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ pr, err := issues_model.GetPullRequestByMergedCommit(ctx, ctx.Repo.Repository.ID, ctx.Params("ref"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.Error(http.StatusNotFound, "GetPullRequestByMergedCommit", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if err = pr.LoadBaseRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadBaseRepo", err)
+ return
+ }
+ if err = pr.LoadHeadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadHeadRepo", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIPullRequest(ctx, pr, ctx.Doer))
+}
diff --git a/routers/api/v1/repo/compare.go b/routers/api/v1/repo/compare.go
new file mode 100644
index 0000000..429145c
--- /dev/null
+++ b/routers/api/v1/repo/compare.go
@@ -0,0 +1,99 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "strings"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/gitrepo"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// CompareDiff compare two branches or commits
+func CompareDiff(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/compare/{basehead} repository repoCompareDiff
+ // ---
+ // summary: Get commit comparison information
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: basehead
+ // in: path
+ // description: compare two branches or commits
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Compare"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if ctx.Repo.GitRepo == nil {
+ gitRepo, err := gitrepo.OpenRepository(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
+ return
+ }
+ ctx.Repo.GitRepo = gitRepo
+ defer gitRepo.Close()
+ }
+
+ infoPath := ctx.Params("*")
+ infos := []string{ctx.Repo.Repository.DefaultBranch, ctx.Repo.Repository.DefaultBranch}
+ if infoPath != "" {
+ infos = strings.SplitN(infoPath, "...", 2)
+ if len(infos) != 2 {
+ if infos = strings.SplitN(infoPath, "..", 2); len(infos) != 2 {
+ infos = []string{ctx.Repo.Repository.DefaultBranch, infoPath}
+ }
+ }
+ }
+
+ _, headGitRepo, ci, _, _ := parseCompareInfo(ctx, api.CreatePullRequestOption{
+ Base: infos[0],
+ Head: infos[1],
+ })
+ if ctx.Written() {
+ return
+ }
+ defer headGitRepo.Close()
+
+ verification := ctx.FormString("verification") == "" || ctx.FormBool("verification")
+ files := ctx.FormString("files") == "" || ctx.FormBool("files")
+
+ apiCommits := make([]*api.Commit, 0, len(ci.Commits))
+ userCache := make(map[string]*user_model.User)
+ for i := 0; i < len(ci.Commits); i++ {
+ apiCommit, err := convert.ToCommit(ctx, ctx.Repo.Repository, ctx.Repo.GitRepo, ci.Commits[i], userCache,
+ convert.ToCommitOptions{
+ Stat: true,
+ Verification: verification,
+ Files: files,
+ })
+ if err != nil {
+ ctx.ServerError("toCommit", err)
+ return
+ }
+ apiCommits = append(apiCommits, apiCommit)
+ }
+
+ ctx.JSON(http.StatusOK, &api.Compare{
+ TotalCommits: len(ci.Commits),
+ Commits: apiCommits,
+ })
+}
diff --git a/routers/api/v1/repo/file.go b/routers/api/v1/repo/file.go
new file mode 100644
index 0000000..1fa44d5
--- /dev/null
+++ b/routers/api/v1/repo/file.go
@@ -0,0 +1,1014 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "path"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models"
+ git_model "code.gitea.io/gitea/models/git"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/httpcache"
+ "code.gitea.io/gitea/modules/lfs"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/common"
+ "code.gitea.io/gitea/services/context"
+ archiver_service "code.gitea.io/gitea/services/repository/archiver"
+ files_service "code.gitea.io/gitea/services/repository/files"
+)
+
+const (
+ giteaObjectTypeHeader = "X-Gitea-Object-Type"
+ forgejoObjectTypeHeader = "X-Forgejo-Object-Type"
+)
+
+// GetRawFile get a file by path on a repository
+func GetRawFile(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/raw/{filepath} repository repoGetRawFile
+ // ---
+ // summary: Get a file from a repository
+ // produces:
+ // - application/octet-stream
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: filepath
+ // in: path
+ // description: filepath of the file to get
+ // type: string
+ // required: true
+ // - name: ref
+ // in: query
+ // description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
+ // type: string
+ // required: false
+ // responses:
+ // 200:
+ // description: Returns raw file content.
+ // schema:
+ // type: file
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.NotFound()
+ return
+ }
+
+ blob, entry, lastModified := getBlobForEntry(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.RespHeader().Set(giteaObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
+ ctx.RespHeader().Set(forgejoObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
+
+ if err := common.ServeBlob(ctx.Base, ctx.Repo.TreePath, blob, lastModified); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ServeBlob", err)
+ }
+}
+
+// GetRawFileOrLFS get a file by repo's path, redirecting to LFS if necessary.
+func GetRawFileOrLFS(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/media/{filepath} repository repoGetRawFileOrLFS
+ // ---
+ // summary: Get a file or it's LFS object from a repository
+ // produces:
+ // - application/octet-stream
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: filepath
+ // in: path
+ // description: filepath of the file to get
+ // type: string
+ // required: true
+ // - name: ref
+ // in: query
+ // description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
+ // type: string
+ // required: false
+ // responses:
+ // 200:
+ // description: Returns raw file content.
+ // schema:
+ // type: file
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.NotFound()
+ return
+ }
+
+ blob, entry, lastModified := getBlobForEntry(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.RespHeader().Set(giteaObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
+ ctx.RespHeader().Set(forgejoObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
+
+ // LFS Pointer files are at most 1024 bytes - so any blob greater than 1024 bytes cannot be an LFS file
+ if blob.Size() > 1024 {
+ // First handle caching for the blob
+ if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
+ return
+ }
+
+ // OK not cached - serve!
+ if err := common.ServeBlob(ctx.Base, ctx.Repo.TreePath, blob, lastModified); err != nil {
+ ctx.ServerError("ServeBlob", err)
+ }
+ return
+ }
+
+ // OK, now the blob is known to have at most 1024 bytes we can simply read this in one go (This saves reading it twice)
+ dataRc, err := blob.DataAsync()
+ if err != nil {
+ ctx.ServerError("DataAsync", err)
+ return
+ }
+
+ // FIXME: code from #19689, what if the file is large ... OOM ...
+ buf, err := io.ReadAll(dataRc)
+ if err != nil {
+ _ = dataRc.Close()
+ ctx.ServerError("DataAsync", err)
+ return
+ }
+
+ if err := dataRc.Close(); err != nil {
+ log.Error("Error whilst closing blob %s reader in %-v. Error: %v", blob.ID, ctx.Repo.Repository, err)
+ }
+
+ // Check if the blob represents a pointer
+ pointer, _ := lfs.ReadPointer(bytes.NewReader(buf))
+
+ // if it's not a pointer, just serve the data directly
+ if !pointer.IsValid() {
+ // First handle caching for the blob
+ if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
+ return
+ }
+
+ // OK not cached - serve!
+ common.ServeContentByReader(ctx.Base, ctx.Repo.TreePath, blob.Size(), bytes.NewReader(buf))
+ return
+ }
+
+ // Now check if there is a MetaObject for this pointer
+ meta, err := git_model.GetLFSMetaObjectByOid(ctx, ctx.Repo.Repository.ID, pointer.Oid)
+
+ // If there isn't one, just serve the data directly
+ if err == git_model.ErrLFSObjectNotExist {
+ // Handle caching for the blob SHA (not the LFS object OID)
+ if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
+ return
+ }
+
+ common.ServeContentByReader(ctx.Base, ctx.Repo.TreePath, blob.Size(), bytes.NewReader(buf))
+ return
+ } else if err != nil {
+ ctx.ServerError("GetLFSMetaObjectByOid", err)
+ return
+ }
+
+ // Handle caching for the LFS object OID
+ if httpcache.HandleGenericETagCache(ctx.Req, ctx.Resp, `"`+pointer.Oid+`"`) {
+ return
+ }
+
+ if setting.LFS.Storage.MinioConfig.ServeDirect {
+ // If we have a signed url (S3, object storage), redirect to this directly.
+ u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name())
+ if u != nil && err == nil {
+ ctx.Redirect(u.String())
+ return
+ }
+ }
+
+ lfsDataRc, err := lfs.ReadMetaObject(meta.Pointer)
+ if err != nil {
+ ctx.ServerError("ReadMetaObject", err)
+ return
+ }
+ defer lfsDataRc.Close()
+
+ common.ServeContentByReadSeeker(ctx.Base, ctx.Repo.TreePath, lastModified, lfsDataRc)
+}
+
+func getBlobForEntry(ctx *context.APIContext) (blob *git.Blob, entry *git.TreeEntry, lastModified *time.Time) {
+ entry, err := ctx.Repo.Commit.GetTreeEntryByPath(ctx.Repo.TreePath)
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetTreeEntryByPath", err)
+ }
+ return nil, nil, nil
+ }
+
+ if entry.IsDir() || entry.IsSubModule() {
+ ctx.NotFound("getBlobForEntry", nil)
+ return nil, nil, nil
+ }
+
+ info, _, err := git.Entries([]*git.TreeEntry{entry}).GetCommitsInfo(ctx, ctx.Repo.Commit, path.Dir("/" + ctx.Repo.TreePath)[1:])
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommitsInfo", err)
+ return nil, nil, nil
+ }
+
+ if len(info) == 1 {
+ // Not Modified
+ lastModified = &info[0].Commit.Committer.When
+ }
+ blob = entry.Blob()
+
+ return blob, entry, lastModified
+}
+
+// GetArchive get archive of a repository
+func GetArchive(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/archive/{archive} repository repoGetArchive
+ // ---
+ // summary: Get an archive of a repository
+ // produces:
+ // - application/octet-stream
+ // - application/zip
+ // - application/gzip
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: archive
+ // in: path
+ // description: the git reference for download with attached archive format (e.g. master.zip)
+ // type: string
+ // required: true
+ // responses:
+ // 200:
+ // description: success
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if ctx.Repo.GitRepo == nil {
+ gitRepo, err := gitrepo.OpenRepository(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
+ return
+ }
+ ctx.Repo.GitRepo = gitRepo
+ defer gitRepo.Close()
+ }
+
+ archiveDownload(ctx)
+}
+
+func archiveDownload(ctx *context.APIContext) {
+ uri := ctx.Params("*")
+ aReq, err := archiver_service.NewRequest(ctx, ctx.Repo.Repository.ID, ctx.Repo.GitRepo, uri)
+ if err != nil {
+ if errors.Is(err, archiver_service.ErrUnknownArchiveFormat{}) {
+ ctx.Error(http.StatusBadRequest, "unknown archive format", err)
+ } else if errors.Is(err, archiver_service.RepoRefNotFoundError{}) {
+ ctx.Error(http.StatusNotFound, "unrecognized reference", err)
+ } else {
+ ctx.ServerError("archiver_service.NewRequest", err)
+ }
+ return
+ }
+
+ archiver, err := aReq.Await(ctx)
+ if err != nil {
+ ctx.ServerError("archiver.Await", err)
+ return
+ }
+
+ download(ctx, aReq.GetArchiveName(), archiver)
+}
+
+func download(ctx *context.APIContext, archiveName string, archiver *repo_model.RepoArchiver) {
+ downloadName := ctx.Repo.Repository.Name + "-" + archiveName
+
+ // Add nix format link header so tarballs lock correctly:
+ // https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
+ ctx.Resp.Header().Add("Link", fmt.Sprintf("<%s/archive/%s.tar.gz?rev=%s>; rel=\"immutable\"",
+ ctx.Repo.Repository.APIURL(),
+ archiver.CommitID, archiver.CommitID))
+
+ rPath := archiver.RelativePath()
+ if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
+ // If we have a signed url (S3, object storage), redirect to this directly.
+ u, err := storage.RepoArchives.URL(rPath, downloadName)
+ if u != nil && err == nil {
+ ctx.Redirect(u.String())
+ return
+ }
+ }
+
+ // If we have matched and access to release or issue
+ fr, err := storage.RepoArchives.Open(rPath)
+ if err != nil {
+ ctx.ServerError("Open", err)
+ return
+ }
+ defer fr.Close()
+
+ contentType := ""
+ switch archiver.Type {
+ case git.ZIP:
+ contentType = "application/zip"
+ case git.TARGZ:
+ // Per RFC6713.
+ contentType = "application/gzip"
+ }
+
+ ctx.ServeContent(fr, &context.ServeHeaderOptions{
+ ContentType: contentType,
+ Filename: downloadName,
+ LastModified: archiver.CreatedUnix.AsLocalTime(),
+ })
+}
+
+// GetEditorconfig get editor config of a repository
+func GetEditorconfig(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/editorconfig/{filepath} repository repoGetEditorConfig
+ // ---
+ // summary: Get the EditorConfig definitions of a file in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: filepath
+ // in: path
+ // description: filepath of file to get
+ // type: string
+ // required: true
+ // - name: ref
+ // in: query
+ // description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
+ // type: string
+ // required: false
+ // responses:
+ // 200:
+ // description: success
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ ec, _, err := ctx.Repo.GetEditorconfig(ctx.Repo.Commit)
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetEditorconfig", err)
+ }
+ return
+ }
+
+ fileName := ctx.Params("filename")
+ def, err := ec.GetDefinitionForFilename(fileName)
+ if def == nil {
+ ctx.NotFound(err)
+ return
+ }
+ ctx.JSON(http.StatusOK, def)
+}
+
+// canWriteFiles returns true if repository is editable and user has proper access level.
+func canWriteFiles(ctx *context.APIContext, branch string) bool {
+ return ctx.Repo.CanWriteToBranch(ctx, ctx.Doer, branch) &&
+ !ctx.Repo.Repository.IsMirror &&
+ !ctx.Repo.Repository.IsArchived
+}
+
+// canReadFiles returns true if repository is readable and user has proper access level.
+func canReadFiles(r *context.Repository) bool {
+ return r.Permission.CanRead(unit.TypeCode)
+}
+
+func base64Reader(s string) (io.ReadSeeker, error) {
+ b, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewReader(b), nil
+}
+
+// ChangeFiles handles API call for modifying multiple files
+func ChangeFiles(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/contents repository repoChangeFiles
+ // ---
+ // summary: Modify multiple files in a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/ChangeFilesOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/FilesResponse"
+ // "403":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/error"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ apiOpts := web.GetForm(ctx).(*api.ChangeFilesOptions)
+
+ if apiOpts.BranchName == "" {
+ apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
+ }
+
+ var files []*files_service.ChangeRepoFile
+ for _, file := range apiOpts.Files {
+ contentReader, err := base64Reader(file.ContentBase64)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
+ return
+ }
+ changeRepoFile := &files_service.ChangeRepoFile{
+ Operation: file.Operation,
+ TreePath: file.Path,
+ FromTreePath: file.FromPath,
+ ContentReader: contentReader,
+ SHA: file.SHA,
+ }
+ files = append(files, changeRepoFile)
+ }
+
+ opts := &files_service.ChangeRepoFilesOptions{
+ Files: files,
+ Message: apiOpts.Message,
+ OldBranch: apiOpts.BranchName,
+ NewBranch: apiOpts.NewBranchName,
+ Committer: &files_service.IdentityOptions{
+ Name: apiOpts.Committer.Name,
+ Email: apiOpts.Committer.Email,
+ },
+ Author: &files_service.IdentityOptions{
+ Name: apiOpts.Author.Name,
+ Email: apiOpts.Author.Email,
+ },
+ Dates: &files_service.CommitDateOptions{
+ Author: apiOpts.Dates.Author,
+ Committer: apiOpts.Dates.Committer,
+ },
+ Signoff: apiOpts.Signoff,
+ }
+ if opts.Dates.Author.IsZero() {
+ opts.Dates.Author = time.Now()
+ }
+ if opts.Dates.Committer.IsZero() {
+ opts.Dates.Committer = time.Now()
+ }
+
+ if opts.Message == "" {
+ opts.Message = changeFilesCommitMessage(ctx, files)
+ }
+
+ if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
+ handleCreateOrUpdateFileError(ctx, err)
+ } else {
+ ctx.JSON(http.StatusCreated, filesResponse)
+ }
+}
+
+// CreateFile handles API call for creating a file
+func CreateFile(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/contents/{filepath} repository repoCreateFile
+ // ---
+ // summary: Create a file in a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: filepath
+ // in: path
+ // description: path of the file to create
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreateFileOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/FileResponse"
+ // "403":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/error"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ apiOpts := web.GetForm(ctx).(*api.CreateFileOptions)
+
+ if apiOpts.BranchName == "" {
+ apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
+ }
+
+ contentReader, err := base64Reader(apiOpts.ContentBase64)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
+ return
+ }
+
+ opts := &files_service.ChangeRepoFilesOptions{
+ Files: []*files_service.ChangeRepoFile{
+ {
+ Operation: "create",
+ TreePath: ctx.Params("*"),
+ ContentReader: contentReader,
+ },
+ },
+ Message: apiOpts.Message,
+ OldBranch: apiOpts.BranchName,
+ NewBranch: apiOpts.NewBranchName,
+ Committer: &files_service.IdentityOptions{
+ Name: apiOpts.Committer.Name,
+ Email: apiOpts.Committer.Email,
+ },
+ Author: &files_service.IdentityOptions{
+ Name: apiOpts.Author.Name,
+ Email: apiOpts.Author.Email,
+ },
+ Dates: &files_service.CommitDateOptions{
+ Author: apiOpts.Dates.Author,
+ Committer: apiOpts.Dates.Committer,
+ },
+ Signoff: apiOpts.Signoff,
+ }
+ if opts.Dates.Author.IsZero() {
+ opts.Dates.Author = time.Now()
+ }
+ if opts.Dates.Committer.IsZero() {
+ opts.Dates.Committer = time.Now()
+ }
+
+ if opts.Message == "" {
+ opts.Message = changeFilesCommitMessage(ctx, opts.Files)
+ }
+
+ if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
+ handleCreateOrUpdateFileError(ctx, err)
+ } else {
+ fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
+ ctx.JSON(http.StatusCreated, fileResponse)
+ }
+}
+
+// UpdateFile handles API call for updating a file
+func UpdateFile(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/contents/{filepath} repository repoUpdateFile
+ // ---
+ // summary: Update a file in a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: filepath
+ // in: path
+ // description: path of the file to update
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/UpdateFileOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/FileResponse"
+ // "403":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/error"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ apiOpts := web.GetForm(ctx).(*api.UpdateFileOptions)
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.Error(http.StatusUnprocessableEntity, "RepoIsEmpty", fmt.Errorf("repo is empty"))
+ return
+ }
+
+ if apiOpts.BranchName == "" {
+ apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
+ }
+
+ contentReader, err := base64Reader(apiOpts.ContentBase64)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
+ return
+ }
+
+ opts := &files_service.ChangeRepoFilesOptions{
+ Files: []*files_service.ChangeRepoFile{
+ {
+ Operation: "update",
+ ContentReader: contentReader,
+ SHA: apiOpts.SHA,
+ FromTreePath: apiOpts.FromPath,
+ TreePath: ctx.Params("*"),
+ },
+ },
+ Message: apiOpts.Message,
+ OldBranch: apiOpts.BranchName,
+ NewBranch: apiOpts.NewBranchName,
+ Committer: &files_service.IdentityOptions{
+ Name: apiOpts.Committer.Name,
+ Email: apiOpts.Committer.Email,
+ },
+ Author: &files_service.IdentityOptions{
+ Name: apiOpts.Author.Name,
+ Email: apiOpts.Author.Email,
+ },
+ Dates: &files_service.CommitDateOptions{
+ Author: apiOpts.Dates.Author,
+ Committer: apiOpts.Dates.Committer,
+ },
+ Signoff: apiOpts.Signoff,
+ }
+ if opts.Dates.Author.IsZero() {
+ opts.Dates.Author = time.Now()
+ }
+ if opts.Dates.Committer.IsZero() {
+ opts.Dates.Committer = time.Now()
+ }
+
+ if opts.Message == "" {
+ opts.Message = changeFilesCommitMessage(ctx, opts.Files)
+ }
+
+ if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
+ handleCreateOrUpdateFileError(ctx, err)
+ } else {
+ fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
+ ctx.JSON(http.StatusOK, fileResponse)
+ }
+}
+
+func handleCreateOrUpdateFileError(ctx *context.APIContext, err error) {
+ if models.IsErrUserCannotCommit(err) || models.IsErrFilePathProtected(err) {
+ ctx.Error(http.StatusForbidden, "Access", err)
+ return
+ }
+ if git_model.IsErrBranchAlreadyExists(err) || models.IsErrFilenameInvalid(err) || models.IsErrSHADoesNotMatch(err) ||
+ models.IsErrFilePathInvalid(err) || models.IsErrRepoFileAlreadyExists(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Invalid", err)
+ return
+ }
+ if git_model.IsErrBranchNotExist(err) || git.IsErrBranchNotExist(err) {
+ ctx.Error(http.StatusNotFound, "BranchDoesNotExist", err)
+ return
+ }
+
+ ctx.Error(http.StatusInternalServerError, "UpdateFile", err)
+}
+
+// Called from both CreateFile or UpdateFile to handle both
+func createOrUpdateFiles(ctx *context.APIContext, opts *files_service.ChangeRepoFilesOptions) (*api.FilesResponse, error) {
+ if !canWriteFiles(ctx, opts.OldBranch) {
+ return nil, repo_model.ErrUserDoesNotHaveAccessToRepo{
+ UserID: ctx.Doer.ID,
+ RepoName: ctx.Repo.Repository.LowerName,
+ }
+ }
+
+ return files_service.ChangeRepoFiles(ctx, ctx.Repo.Repository, ctx.Doer, opts)
+}
+
+// format commit message if empty
+func changeFilesCommitMessage(ctx *context.APIContext, files []*files_service.ChangeRepoFile) string {
+ var (
+ createFiles []string
+ updateFiles []string
+ deleteFiles []string
+ )
+ for _, file := range files {
+ switch file.Operation {
+ case "create":
+ createFiles = append(createFiles, file.TreePath)
+ case "update":
+ updateFiles = append(updateFiles, file.TreePath)
+ case "delete":
+ deleteFiles = append(deleteFiles, file.TreePath)
+ }
+ }
+ message := ""
+ if len(createFiles) != 0 {
+ message += ctx.Locale.TrString("repo.editor.add", strings.Join(createFiles, ", ")+"\n")
+ }
+ if len(updateFiles) != 0 {
+ message += ctx.Locale.TrString("repo.editor.update", strings.Join(updateFiles, ", ")+"\n")
+ }
+ if len(deleteFiles) != 0 {
+ message += ctx.Locale.TrString("repo.editor.delete", strings.Join(deleteFiles, ", "))
+ }
+ return strings.Trim(message, "\n")
+}
+
+// DeleteFile Delete a file in a repository
+func DeleteFile(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/contents/{filepath} repository repoDeleteFile
+ // ---
+ // summary: Delete a file in a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: filepath
+ // in: path
+ // description: path of the file to delete
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/DeleteFileOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/FileDeleteResponse"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ apiOpts := web.GetForm(ctx).(*api.DeleteFileOptions)
+ if !canWriteFiles(ctx, apiOpts.BranchName) {
+ ctx.Error(http.StatusForbidden, "DeleteFile", repo_model.ErrUserDoesNotHaveAccessToRepo{
+ UserID: ctx.Doer.ID,
+ RepoName: ctx.Repo.Repository.LowerName,
+ })
+ return
+ }
+
+ if apiOpts.BranchName == "" {
+ apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
+ }
+
+ opts := &files_service.ChangeRepoFilesOptions{
+ Files: []*files_service.ChangeRepoFile{
+ {
+ Operation: "delete",
+ SHA: apiOpts.SHA,
+ TreePath: ctx.Params("*"),
+ },
+ },
+ Message: apiOpts.Message,
+ OldBranch: apiOpts.BranchName,
+ NewBranch: apiOpts.NewBranchName,
+ Committer: &files_service.IdentityOptions{
+ Name: apiOpts.Committer.Name,
+ Email: apiOpts.Committer.Email,
+ },
+ Author: &files_service.IdentityOptions{
+ Name: apiOpts.Author.Name,
+ Email: apiOpts.Author.Email,
+ },
+ Dates: &files_service.CommitDateOptions{
+ Author: apiOpts.Dates.Author,
+ Committer: apiOpts.Dates.Committer,
+ },
+ Signoff: apiOpts.Signoff,
+ }
+ if opts.Dates.Author.IsZero() {
+ opts.Dates.Author = time.Now()
+ }
+ if opts.Dates.Committer.IsZero() {
+ opts.Dates.Committer = time.Now()
+ }
+
+ if opts.Message == "" {
+ opts.Message = changeFilesCommitMessage(ctx, opts.Files)
+ }
+
+ if filesResponse, err := files_service.ChangeRepoFiles(ctx, ctx.Repo.Repository, ctx.Doer, opts); err != nil {
+ if git.IsErrBranchNotExist(err) || models.IsErrRepoFileDoesNotExist(err) || git.IsErrNotExist(err) {
+ ctx.Error(http.StatusNotFound, "DeleteFile", err)
+ return
+ } else if git_model.IsErrBranchAlreadyExists(err) ||
+ models.IsErrFilenameInvalid(err) ||
+ models.IsErrSHADoesNotMatch(err) ||
+ models.IsErrCommitIDDoesNotMatch(err) ||
+ models.IsErrSHAOrCommitIDNotProvided(err) {
+ ctx.Error(http.StatusBadRequest, "DeleteFile", err)
+ return
+ } else if models.IsErrUserCannotCommit(err) {
+ ctx.Error(http.StatusForbidden, "DeleteFile", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "DeleteFile", err)
+ } else {
+ fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
+ ctx.JSON(http.StatusOK, fileResponse) // FIXME on APIv2: return http.StatusNoContent
+ }
+}
+
+// GetContents Get the metadata and contents (if a file) of an entry in a repository, or a list of entries if a dir
+func GetContents(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/contents/{filepath} repository repoGetContents
+ // ---
+ // summary: Gets the metadata and contents (if a file) of an entry in a repository, or a list of entries if a dir
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: filepath
+ // in: path
+ // description: path of the dir, file, symlink or submodule in the repo
+ // type: string
+ // required: true
+ // - name: ref
+ // in: query
+ // description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
+ // type: string
+ // required: false
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ContentsResponse"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !canReadFiles(ctx.Repo) {
+ ctx.Error(http.StatusInternalServerError, "GetContentsOrList", repo_model.ErrUserDoesNotHaveAccessToRepo{
+ UserID: ctx.Doer.ID,
+ RepoName: ctx.Repo.Repository.LowerName,
+ })
+ return
+ }
+
+ treePath := ctx.Params("*")
+ ref := ctx.FormTrim("ref")
+
+ if fileList, err := files_service.GetContentsOrList(ctx, ctx.Repo.Repository, treePath, ref); err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound("GetContentsOrList", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetContentsOrList", err)
+ } else {
+ ctx.JSON(http.StatusOK, fileList)
+ }
+}
+
+// GetContentsList Get the metadata of all the entries of the root dir
+func GetContentsList(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/contents repository repoGetContentsList
+ // ---
+ // summary: Gets the metadata of all the entries of the root dir
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: ref
+ // in: query
+ // description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
+ // type: string
+ // required: false
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ContentsListResponse"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ // same as GetContents(), this function is here because swagger fails if path is empty in GetContents() interface
+ GetContents(ctx)
+}
diff --git a/routers/api/v1/repo/flags.go b/routers/api/v1/repo/flags.go
new file mode 100644
index 0000000..ac5cb2e
--- /dev/null
+++ b/routers/api/v1/repo/flags.go
@@ -0,0 +1,245 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+)
+
+func ListFlags(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/flags repository repoListFlags
+ // ---
+ // summary: List a repository's flags
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/StringSlice"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repoFlags, err := ctx.Repo.Repository.ListFlags(ctx)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ flags := make([]string, len(repoFlags))
+ for i := range repoFlags {
+ flags[i] = repoFlags[i].Name
+ }
+
+ ctx.SetTotalCountHeader(int64(len(repoFlags)))
+ ctx.JSON(http.StatusOK, flags)
+}
+
+func ReplaceAllFlags(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/flags repository repoReplaceAllFlags
+ // ---
+ // summary: Replace all flags of a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/ReplaceFlagsOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ flagsForm := web.GetForm(ctx).(*api.ReplaceFlagsOption)
+
+ if err := ctx.Repo.Repository.ReplaceAllFlags(ctx, flagsForm.Flags); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+func DeleteAllFlags(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/flags repository repoDeleteAllFlags
+ // ---
+ // summary: Remove all flags from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := ctx.Repo.Repository.ReplaceAllFlags(ctx, nil); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+func HasFlag(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/flags/{flag} repository repoCheckFlag
+ // ---
+ // summary: Check if a repository has a given flag
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: flag
+ // in: path
+ // description: name of the flag
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ hasFlag := ctx.Repo.Repository.HasFlag(ctx, ctx.Params(":flag"))
+ if hasFlag {
+ ctx.Status(http.StatusNoContent)
+ } else {
+ ctx.NotFound()
+ }
+}
+
+func AddFlag(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/flags/{flag} repository repoAddFlag
+ // ---
+ // summary: Add a flag to a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: flag
+ // in: path
+ // description: name of the flag
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ flag := ctx.Params(":flag")
+
+ if ctx.Repo.Repository.HasFlag(ctx, flag) {
+ ctx.Status(http.StatusNoContent)
+ return
+ }
+
+ if err := ctx.Repo.Repository.AddFlag(ctx, flag); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+func DeleteFlag(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/flags/{flag} repository repoDeleteFlag
+ // ---
+ // summary: Remove a flag from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: flag
+ // in: path
+ // description: name of the flag
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ flag := ctx.Params(":flag")
+
+ if _, err := ctx.Repo.Repository.DeleteFlag(ctx, flag); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/fork.go b/routers/api/v1/repo/fork.go
new file mode 100644
index 0000000..c9dc968
--- /dev/null
+++ b/routers/api/v1/repo/fork.go
@@ -0,0 +1,167 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ quota_model "code.gitea.io/gitea/models/quota"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// ListForks list a repository's forks
+func ListForks(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/forks repository listForks
+ // ---
+ // summary: List a repository's forks
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ forks, total, err := repo_model.GetForks(ctx, ctx.Repo.Repository, ctx.Doer, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetForks", err)
+ return
+ }
+ apiForks := make([]*api.Repository, len(forks))
+ for i, fork := range forks {
+ permission, err := access_model.GetUserRepoPermission(ctx, fork, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return
+ }
+ apiForks[i] = convert.ToRepo(ctx, fork, permission)
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, apiForks)
+}
+
+// CreateFork create a fork of a repo
+func CreateFork(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/forks repository createFork
+ // ---
+ // summary: Fork a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to fork
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to fork
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateForkOption"
+ // responses:
+ // "202":
+ // "$ref": "#/responses/Repository"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // description: The repository with the same name already exists.
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateForkOption)
+ repo := ctx.Repo.Repository
+ var forker *user_model.User // user/org that will own the fork
+ if form.Organization == nil {
+ forker = ctx.Doer
+ } else {
+ org, err := organization.GetOrgByName(ctx, *form.Organization)
+ if err != nil {
+ if organization.IsErrOrgNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetOrgByName", err)
+ }
+ return
+ }
+ isMember, err := org.IsOrgMember(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOrgMember", err)
+ return
+ } else if !isMember {
+ ctx.Error(http.StatusForbidden, "isMemberNot", fmt.Sprintf("User is no Member of Organisation '%s'", org.Name))
+ return
+ }
+ forker = org.AsUser()
+ }
+
+ if !ctx.CheckQuota(quota_model.LimitSubjectSizeReposAll, forker.ID, forker.Name) {
+ return
+ }
+
+ var name string
+ if form.Name == nil {
+ name = repo.Name
+ } else {
+ name = *form.Name
+ }
+
+ fork, err := repo_service.ForkRepositoryAndUpdates(ctx, ctx.Doer, forker, repo_service.ForkRepoOptions{
+ BaseRepo: repo,
+ Name: name,
+ Description: repo.Description,
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrAlreadyExist) || repo_model.IsErrReachLimitOfRepo(err) {
+ ctx.Error(http.StatusConflict, "ForkRepositoryAndUpdates", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "ForkRepositoryAndUpdates", err)
+ }
+ return
+ }
+
+ // TODO change back to 201
+ ctx.JSON(http.StatusAccepted, convert.ToRepo(ctx, fork, access_model.Permission{AccessMode: perm.AccessModeOwner}))
+}
diff --git a/routers/api/v1/repo/git_hook.go b/routers/api/v1/repo/git_hook.go
new file mode 100644
index 0000000..26ae84d
--- /dev/null
+++ b/routers/api/v1/repo/git_hook.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/git"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListGitHooks list all Git hooks of a repository
+func ListGitHooks(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/hooks/git repository repoListGitHooks
+ // ---
+ // summary: List the Git hooks in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GitHookList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ hooks, err := ctx.Repo.GitRepo.Hooks()
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "Hooks", err)
+ return
+ }
+
+ apiHooks := make([]*api.GitHook, len(hooks))
+ for i := range hooks {
+ apiHooks[i] = convert.ToGitHook(hooks[i])
+ }
+ ctx.JSON(http.StatusOK, &apiHooks)
+}
+
+// GetGitHook get a repo's Git hook by id
+func GetGitHook(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/hooks/git/{id} repository repoGetGitHook
+ // ---
+ // summary: Get a Git hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GitHook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ hookID := ctx.Params(":id")
+ hook, err := ctx.Repo.GitRepo.GetHook(hookID)
+ if err != nil {
+ if err == git.ErrNotValidHook {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetHook", err)
+ }
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToGitHook(hook))
+}
+
+// EditGitHook modify a Git hook of a repository
+func EditGitHook(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/hooks/git/{id} repository repoEditGitHook
+ // ---
+ // summary: Edit a Git hook in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to get
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditGitHookOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GitHook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.EditGitHookOption)
+ hookID := ctx.Params(":id")
+ hook, err := ctx.Repo.GitRepo.GetHook(hookID)
+ if err != nil {
+ if err == git.ErrNotValidHook {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetHook", err)
+ }
+ return
+ }
+
+ hook.Content = form.Content
+ if err = hook.Update(); err != nil {
+ ctx.Error(http.StatusInternalServerError, "hook.Update", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToGitHook(hook))
+}
+
+// DeleteGitHook delete a Git hook of a repository
+func DeleteGitHook(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/hooks/git/{id} repository repoDeleteGitHook
+ // ---
+ // summary: Delete a Git hook in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to get
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ hookID := ctx.Params(":id")
+ hook, err := ctx.Repo.GitRepo.GetHook(hookID)
+ if err != nil {
+ if err == git.ErrNotValidHook {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetHook", err)
+ }
+ return
+ }
+
+ hook.Content = ""
+ if err = hook.Update(); err != nil {
+ ctx.Error(http.StatusInternalServerError, "hook.Update", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/git_ref.go b/routers/api/v1/repo/git_ref.go
new file mode 100644
index 0000000..54da5ee
--- /dev/null
+++ b/routers/api/v1/repo/git_ref.go
@@ -0,0 +1,107 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "net/url"
+
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+)
+
+// GetGitAllRefs get ref or an list all the refs of a repository
+func GetGitAllRefs(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/refs repository repoListAllGitRefs
+ // ---
+ // summary: Get specified ref or filtered repository's refs
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // # "$ref": "#/responses/Reference" TODO: swagger doesn't support different output formats by ref
+ // "$ref": "#/responses/ReferenceList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ getGitRefsInternal(ctx, "")
+}
+
+// GetGitRefs get ref or an filteresd list of refs of a repository
+func GetGitRefs(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/refs/{ref} repository repoListGitRefs
+ // ---
+ // summary: Get specified ref or filtered repository's refs
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: ref
+ // in: path
+ // description: part or full name of the ref
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // # "$ref": "#/responses/Reference" TODO: swagger doesn't support different output formats by ref
+ // "$ref": "#/responses/ReferenceList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ getGitRefsInternal(ctx, ctx.Params("*"))
+}
+
+func getGitRefsInternal(ctx *context.APIContext, filter string) {
+ refs, lastMethodName, err := utils.GetGitRefs(ctx, filter)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, lastMethodName, err)
+ return
+ }
+
+ if len(refs) == 0 {
+ ctx.NotFound()
+ return
+ }
+
+ apiRefs := make([]*api.Reference, len(refs))
+ for i := range refs {
+ apiRefs[i] = &api.Reference{
+ Ref: refs[i].Name,
+ URL: ctx.Repo.Repository.APIURL() + "/git/" + util.PathEscapeSegments(refs[i].Name),
+ Object: &api.GitObject{
+ SHA: refs[i].Object.String(),
+ Type: refs[i].Type,
+ URL: ctx.Repo.Repository.APIURL() + "/git/" + url.PathEscape(refs[i].Type) + "s/" + url.PathEscape(refs[i].Object.String()),
+ },
+ }
+ }
+ // If single reference is found and it matches filter exactly return it as object
+ if len(apiRefs) == 1 && apiRefs[0].Ref == filter {
+ ctx.JSON(http.StatusOK, &apiRefs[0])
+ return
+ }
+ ctx.JSON(http.StatusOK, &apiRefs)
+}
diff --git a/routers/api/v1/repo/hook.go b/routers/api/v1/repo/hook.go
new file mode 100644
index 0000000..ffd2313
--- /dev/null
+++ b/routers/api/v1/repo/hook.go
@@ -0,0 +1,308 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ "code.gitea.io/gitea/models/webhook"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ webhook_service "code.gitea.io/gitea/services/webhook"
+)
+
+// ListHooks list all hooks of a repository
+func ListHooks(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/hooks repository repoListHooks
+ // ---
+ // summary: List the hooks in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/HookList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opts := &webhook.ListWebhookOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepoID: ctx.Repo.Repository.ID,
+ }
+
+ hooks, count, err := db.FindAndCount[webhook.Webhook](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiHooks := make([]*api.Hook, len(hooks))
+ for i := range hooks {
+ apiHooks[i], err = webhook_service.ToHook(ctx.Repo.RepoLink, hooks[i])
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, &apiHooks)
+}
+
+// GetHook get a repo's hook by id
+func GetHook(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/hooks/{id} repository repoGetHook
+ // ---
+ // summary: Get a hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo
+ hookID := ctx.ParamsInt64(":id")
+ hook, err := utils.GetRepoHook(ctx, repo.Repository.ID, hookID)
+ if err != nil {
+ return
+ }
+ apiHook, err := webhook_service.ToHook(repo.RepoLink, hook)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiHook)
+}
+
+// TestHook tests a hook
+func TestHook(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/hooks/{id}/tests repository repoTestHook
+ // ---
+ // summary: Test a push webhook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to test
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: ref
+ // in: query
+ // description: "The name of the commit/branch/tag, indicates which commit will be loaded to the webhook payload."
+ // type: string
+ // required: false
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if ctx.Repo.Commit == nil {
+ // if repo does not have any commits, then don't send a webhook
+ ctx.Status(http.StatusNoContent)
+ return
+ }
+
+ ref := git.BranchPrefix + ctx.Repo.Repository.DefaultBranch
+ if r := ctx.FormTrim("ref"); r != "" {
+ ref = r
+ }
+
+ hookID := ctx.ParamsInt64(":id")
+ hook, err := utils.GetRepoHook(ctx, ctx.Repo.Repository.ID, hookID)
+ if err != nil {
+ return
+ }
+
+ commit := convert.ToPayloadCommit(ctx, ctx.Repo.Repository, ctx.Repo.Commit)
+
+ commitID := ctx.Repo.Commit.ID.String()
+ if err := webhook_service.PrepareWebhook(ctx, hook, webhook_module.HookEventPush, &api.PushPayload{
+ Ref: ref,
+ Before: commitID,
+ After: commitID,
+ CompareURL: setting.AppURL + ctx.Repo.Repository.ComposeCompareURL(commitID, commitID),
+ Commits: []*api.PayloadCommit{commit},
+ TotalCommits: 1,
+ HeadCommit: commit,
+ Repo: convert.ToRepo(ctx, ctx.Repo.Repository, access_model.Permission{AccessMode: perm.AccessModeNone}),
+ Pusher: convert.ToUserWithAccessMode(ctx, ctx.Doer, perm.AccessModeNone),
+ Sender: convert.ToUserWithAccessMode(ctx, ctx.Doer, perm.AccessModeNone),
+ }); err != nil {
+ ctx.Error(http.StatusInternalServerError, "PrepareWebhook: ", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreateHook create a hook for a repository
+func CreateHook(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/hooks repository repoCreateHook
+ // ---
+ // summary: Create a hook
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateHookOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Hook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ utils.AddRepoHook(ctx, web.GetForm(ctx).(*api.CreateHookOption))
+}
+
+// EditHook modify a hook of a repository
+func EditHook(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/hooks/{id} repository repoEditHook
+ // ---
+ // summary: Edit a hook in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: index of the hook
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditHookOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.EditHookOption)
+ hookID := ctx.ParamsInt64(":id")
+ utils.EditRepoHook(ctx, form, hookID)
+}
+
+// DeleteHook delete a hook of a repository
+func DeleteHook(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/hooks/{id} repository repoDeleteHook
+ // ---
+ // summary: Delete a hook in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the hook to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ if err := webhook.DeleteWebhookByRepoID(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":id")); err != nil {
+ if webhook.IsErrWebhookNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteWebhookByRepoID", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/hook_test.go b/routers/api/v1/repo/hook_test.go
new file mode 100644
index 0000000..a8065e4
--- /dev/null
+++ b/routers/api/v1/repo/hook_test.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/models/webhook"
+ "code.gitea.io/gitea/services/contexttest"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestTestHook(t *testing.T) {
+ unittest.PrepareTestEnv(t)
+
+ ctx, _ := contexttest.MockAPIContext(t, "user2/repo1/wiki/_pages")
+ ctx.SetParams(":id", "1")
+ contexttest.LoadUser(t, ctx, 2)
+ contexttest.LoadRepo(t, ctx, 1)
+ contexttest.LoadGitRepo(t, ctx)
+ defer ctx.Repo.GitRepo.Close()
+ contexttest.LoadRepoCommit(t, ctx)
+ TestHook(ctx)
+ assert.EqualValues(t, http.StatusNoContent, ctx.Resp.Status())
+
+ unittest.AssertExistsAndLoadBean(t, &webhook.HookTask{
+ HookID: 1,
+ }, unittest.Cond("is_delivered=?", false))
+}
diff --git a/routers/api/v1/repo/issue.go b/routers/api/v1/repo/issue.go
new file mode 100644
index 0000000..99cd980
--- /dev/null
+++ b/routers/api/v1/repo/issue.go
@@ -0,0 +1,1041 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/organization"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ issue_service "code.gitea.io/gitea/services/issue"
+)
+
+// SearchIssues searches for issues across the repositories that the user has access to
+func SearchIssues(ctx *context.APIContext) {
+ // swagger:operation GET /repos/issues/search issue issueSearchIssues
+ // ---
+ // summary: Search for issues across the repositories that the user has access to
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: state
+ // in: query
+ // description: whether issue is open or closed
+ // type: string
+ // - name: labels
+ // in: query
+ // description: comma separated list of labels. Fetch only issues that have any of this labels. Non existent labels are discarded
+ // type: string
+ // - name: milestones
+ // in: query
+ // description: comma separated list of milestone names. Fetch only issues that have any of this milestones. Non existent are discarded
+ // type: string
+ // - name: q
+ // in: query
+ // description: search string
+ // type: string
+ // - name: priority_repo_id
+ // in: query
+ // description: repository to prioritize in the results
+ // type: integer
+ // format: int64
+ // - name: type
+ // in: query
+ // description: filter by type (issues / pulls) if set
+ // type: string
+ // - name: since
+ // in: query
+ // description: Only show notifications updated after the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // required: false
+ // - name: before
+ // in: query
+ // description: Only show notifications updated before the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // required: false
+ // - name: assigned
+ // in: query
+ // description: filter (issues / pulls) assigned to you, default is false
+ // type: boolean
+ // - name: created
+ // in: query
+ // description: filter (issues / pulls) created by you, default is false
+ // type: boolean
+ // - name: mentioned
+ // in: query
+ // description: filter (issues / pulls) mentioning you, default is false
+ // type: boolean
+ // - name: review_requested
+ // in: query
+ // description: filter pulls requesting your review, default is false
+ // type: boolean
+ // - name: reviewed
+ // in: query
+ // description: filter pulls reviewed by you, default is false
+ // type: boolean
+ // - name: owner
+ // in: query
+ // description: filter by owner
+ // type: string
+ // - name: team
+ // in: query
+ // description: filter by team (requires organization owner parameter to be provided)
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/IssueList"
+
+ before, since, err := context.GetQueryBeforeSince(ctx.Base)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+
+ var isClosed optional.Option[bool]
+ switch ctx.FormString("state") {
+ case "closed":
+ isClosed = optional.Some(true)
+ case "all":
+ isClosed = optional.None[bool]()
+ default:
+ isClosed = optional.Some(false)
+ }
+
+ var (
+ repoIDs []int64
+ allPublic bool
+ )
+ {
+ // find repos user can access (for issue search)
+ opts := &repo_model.SearchRepoOptions{
+ Private: false,
+ AllPublic: true,
+ TopicOnly: false,
+ Collaborate: optional.None[bool](),
+ // This needs to be a column that is not nil in fixtures or
+ // MySQL will return different results when sorting by null in some cases
+ OrderBy: db.SearchOrderByAlphabetically,
+ Actor: ctx.Doer,
+ }
+ if ctx.IsSigned {
+ opts.Private = !ctx.PublicOnly
+ opts.AllLimited = true
+ }
+ if ctx.FormString("owner") != "" {
+ owner, err := user_model.GetUserByName(ctx, ctx.FormString("owner"))
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusBadRequest, "Owner not found", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+ opts.OwnerID = owner.ID
+ opts.AllLimited = false
+ opts.AllPublic = false
+ opts.Collaborate = optional.Some(false)
+ }
+ if ctx.FormString("team") != "" {
+ if ctx.FormString("owner") == "" {
+ ctx.Error(http.StatusBadRequest, "", "Owner organisation is required for filtering on team")
+ return
+ }
+ team, err := organization.GetTeam(ctx, opts.OwnerID, ctx.FormString("team"))
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusBadRequest, "Team not found", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+ opts.TeamID = team.ID
+ }
+
+ if opts.AllPublic {
+ allPublic = true
+ opts.AllPublic = false // set it false to avoid returning too many repos, we could filter by indexer
+ }
+ repoIDs, _, err = repo_model.SearchRepositoryIDs(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchRepositoryIDs", err)
+ return
+ }
+ if len(repoIDs) == 0 {
+ // no repos found, don't let the indexer return all repos
+ repoIDs = []int64{0}
+ }
+ }
+
+ keyword := ctx.FormTrim("q")
+ if strings.IndexByte(keyword, 0) >= 0 {
+ keyword = ""
+ }
+
+ var isPull optional.Option[bool]
+ switch ctx.FormString("type") {
+ case "pulls":
+ isPull = optional.Some(true)
+ case "issues":
+ isPull = optional.Some(false)
+ default:
+ isPull = optional.None[bool]()
+ }
+
+ var includedAnyLabels []int64
+ {
+ labels := ctx.FormTrim("labels")
+ var includedLabelNames []string
+ if len(labels) > 0 {
+ includedLabelNames = strings.Split(labels, ",")
+ }
+ includedAnyLabels, err = issues_model.GetLabelIDsByNames(ctx, includedLabelNames)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelIDsByNames", err)
+ return
+ }
+ }
+
+ var includedMilestones []int64
+ {
+ milestones := ctx.FormTrim("milestones")
+ var includedMilestoneNames []string
+ if len(milestones) > 0 {
+ includedMilestoneNames = strings.Split(milestones, ",")
+ }
+ includedMilestones, err = issues_model.GetMilestoneIDsByNames(ctx, includedMilestoneNames)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetMilestoneIDsByNames", err)
+ return
+ }
+ }
+
+ // this api is also used in UI,
+ // so the default limit is set to fit UI needs
+ limit := ctx.FormInt("limit")
+ if limit == 0 {
+ limit = setting.UI.IssuePagingNum
+ } else if limit > setting.API.MaxResponseItems {
+ limit = setting.API.MaxResponseItems
+ }
+
+ searchOpt := &issue_indexer.SearchOptions{
+ Paginator: &db.ListOptions{
+ PageSize: limit,
+ Page: ctx.FormInt("page"),
+ },
+ Keyword: keyword,
+ RepoIDs: repoIDs,
+ AllPublic: allPublic,
+ IsPull: isPull,
+ IsClosed: isClosed,
+ IncludedAnyLabelIDs: includedAnyLabels,
+ MilestoneIDs: includedMilestones,
+ SortBy: issue_indexer.SortByCreatedDesc,
+ }
+
+ if since != 0 {
+ searchOpt.UpdatedAfterUnix = optional.Some(since)
+ }
+ if before != 0 {
+ searchOpt.UpdatedBeforeUnix = optional.Some(before)
+ }
+
+ if ctx.IsSigned {
+ ctxUserID := ctx.Doer.ID
+ if ctx.FormBool("created") {
+ searchOpt.PosterID = optional.Some(ctxUserID)
+ }
+ if ctx.FormBool("assigned") {
+ searchOpt.AssigneeID = optional.Some(ctxUserID)
+ }
+ if ctx.FormBool("mentioned") {
+ searchOpt.MentionID = optional.Some(ctxUserID)
+ }
+ if ctx.FormBool("review_requested") {
+ searchOpt.ReviewRequestedID = optional.Some(ctxUserID)
+ }
+ if ctx.FormBool("reviewed") {
+ searchOpt.ReviewedID = optional.Some(ctxUserID)
+ }
+ }
+
+ // FIXME: It's unsupported to sort by priority repo when searching by indexer,
+ // it's indeed an regression, but I think it is worth to support filtering by indexer first.
+ _ = ctx.FormInt64("priority_repo_id")
+
+ ids, total, err := issue_indexer.SearchIssues(ctx, searchOpt)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchIssues", err)
+ return
+ }
+ issues, err := issues_model.GetIssuesByIDs(ctx, ids, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindIssuesByIDs", err)
+ return
+ }
+
+ ctx.SetLinkHeader(int(total), limit)
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, ctx.Doer, issues))
+}
+
+// ListIssues list the issues of a repository
+func ListIssues(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues issue issueListIssues
+ // ---
+ // summary: List a repository's issues
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: state
+ // in: query
+ // description: whether issue is open or closed
+ // type: string
+ // enum: [closed, open, all]
+ // - name: labels
+ // in: query
+ // description: comma separated list of labels. Fetch only issues that have any of this labels. Non existent labels are discarded
+ // type: string
+ // - name: q
+ // in: query
+ // description: search string
+ // type: string
+ // - name: type
+ // in: query
+ // description: filter by type (issues / pulls) if set
+ // type: string
+ // enum: [issues, pulls]
+ // - name: milestones
+ // in: query
+ // description: comma separated list of milestone names or ids. It uses names and fall back to ids. Fetch only issues that have any of this milestones. Non existent milestones are discarded
+ // type: string
+ // - name: since
+ // in: query
+ // description: Only show items updated after the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // required: false
+ // - name: before
+ // in: query
+ // description: Only show items updated before the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // required: false
+ // - name: created_by
+ // in: query
+ // description: Only show items which were created by the given user
+ // type: string
+ // - name: assigned_by
+ // in: query
+ // description: Only show items for which the given user is assigned
+ // type: string
+ // - name: mentioned_by
+ // in: query
+ // description: Only show items in which the given user was mentioned
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/IssueList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ before, since, err := context.GetQueryBeforeSince(ctx.Base)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+
+ var isClosed optional.Option[bool]
+ switch ctx.FormString("state") {
+ case "closed":
+ isClosed = optional.Some(true)
+ case "all":
+ isClosed = optional.None[bool]()
+ default:
+ isClosed = optional.Some(false)
+ }
+
+ keyword := ctx.FormTrim("q")
+ if strings.IndexByte(keyword, 0) >= 0 {
+ keyword = ""
+ }
+
+ var labelIDs []int64
+ if split := strings.Split(ctx.FormString("labels"), ","); len(split) > 0 {
+ labelIDs, err = issues_model.GetLabelIDsInRepoByNames(ctx, ctx.Repo.Repository.ID, split)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelIDsInRepoByNames", err)
+ return
+ }
+ }
+
+ var mileIDs []int64
+ if part := strings.Split(ctx.FormString("milestones"), ","); len(part) > 0 {
+ for i := range part {
+ // uses names and fall back to ids
+ // non existent milestones are discarded
+ mile, err := issues_model.GetMilestoneByRepoIDANDName(ctx, ctx.Repo.Repository.ID, part[i])
+ if err == nil {
+ mileIDs = append(mileIDs, mile.ID)
+ continue
+ }
+ if !issues_model.IsErrMilestoneNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetMilestoneByRepoIDANDName", err)
+ return
+ }
+ id, err := strconv.ParseInt(part[i], 10, 64)
+ if err != nil {
+ continue
+ }
+ mile, err = issues_model.GetMilestoneByRepoID(ctx, ctx.Repo.Repository.ID, id)
+ if err == nil {
+ mileIDs = append(mileIDs, mile.ID)
+ continue
+ }
+ if issues_model.IsErrMilestoneNotExist(err) {
+ continue
+ }
+ ctx.Error(http.StatusInternalServerError, "GetMilestoneByRepoID", err)
+ }
+ }
+
+ listOptions := utils.GetListOptions(ctx)
+
+ isPull := optional.None[bool]()
+ switch ctx.FormString("type") {
+ case "pulls":
+ isPull = optional.Some(true)
+ case "issues":
+ isPull = optional.Some(false)
+ }
+
+ if isPull.Has() && !ctx.Repo.CanReadIssuesOrPulls(isPull.Value()) {
+ ctx.NotFound()
+ return
+ }
+
+ if !isPull.Has() {
+ canReadIssues := ctx.Repo.CanRead(unit.TypeIssues)
+ canReadPulls := ctx.Repo.CanRead(unit.TypePullRequests)
+ if !canReadIssues && !canReadPulls {
+ ctx.NotFound()
+ return
+ } else if !canReadIssues {
+ isPull = optional.Some(true)
+ } else if !canReadPulls {
+ isPull = optional.Some(false)
+ }
+ }
+
+ // FIXME: we should be more efficient here
+ createdByID := getUserIDForFilter(ctx, "created_by")
+ if ctx.Written() {
+ return
+ }
+ assignedByID := getUserIDForFilter(ctx, "assigned_by")
+ if ctx.Written() {
+ return
+ }
+ mentionedByID := getUserIDForFilter(ctx, "mentioned_by")
+ if ctx.Written() {
+ return
+ }
+
+ searchOpt := &issue_indexer.SearchOptions{
+ Paginator: &listOptions,
+ Keyword: keyword,
+ RepoIDs: []int64{ctx.Repo.Repository.ID},
+ IsPull: isPull,
+ IsClosed: isClosed,
+ SortBy: issue_indexer.SortByCreatedDesc,
+ }
+ if since != 0 {
+ searchOpt.UpdatedAfterUnix = optional.Some(since)
+ }
+ if before != 0 {
+ searchOpt.UpdatedBeforeUnix = optional.Some(before)
+ }
+ if len(labelIDs) == 1 && labelIDs[0] == 0 {
+ searchOpt.NoLabelOnly = true
+ } else {
+ for _, labelID := range labelIDs {
+ if labelID > 0 {
+ searchOpt.IncludedLabelIDs = append(searchOpt.IncludedLabelIDs, labelID)
+ } else {
+ searchOpt.ExcludedLabelIDs = append(searchOpt.ExcludedLabelIDs, -labelID)
+ }
+ }
+ }
+
+ if len(mileIDs) == 1 && mileIDs[0] == db.NoConditionID {
+ searchOpt.MilestoneIDs = []int64{0}
+ } else {
+ searchOpt.MilestoneIDs = mileIDs
+ }
+
+ if createdByID > 0 {
+ searchOpt.PosterID = optional.Some(createdByID)
+ }
+ if assignedByID > 0 {
+ searchOpt.AssigneeID = optional.Some(assignedByID)
+ }
+ if mentionedByID > 0 {
+ searchOpt.MentionID = optional.Some(mentionedByID)
+ }
+
+ ids, total, err := issue_indexer.SearchIssues(ctx, searchOpt)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchIssues", err)
+ return
+ }
+ issues, err := issues_model.GetIssuesByIDs(ctx, ids, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindIssuesByIDs", err)
+ return
+ }
+
+ ctx.SetLinkHeader(int(total), listOptions.PageSize)
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, ctx.Doer, issues))
+}
+
+func getUserIDForFilter(ctx *context.APIContext, queryName string) int64 {
+ userName := ctx.FormString(queryName)
+ if len(userName) == 0 {
+ return 0
+ }
+
+ user, err := user_model.GetUserByName(ctx, userName)
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound(err)
+ return 0
+ }
+
+ if err != nil {
+ ctx.InternalServerError(err)
+ return 0
+ }
+
+ return user.ID
+}
+
+// GetIssue get an issue of a repository
+func GetIssue(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index} issue issueGetIssue
+ // ---
+ // summary: Get an issue
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Issue"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ issue, err := issues_model.GetIssueWithAttrsByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+ if !ctx.Repo.CanReadIssuesOrPulls(issue.IsPull) {
+ ctx.NotFound()
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIIssue(ctx, ctx.Doer, issue))
+}
+
+// CreateIssue create an issue of a repository
+func CreateIssue(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues issue issueCreateIssue
+ // ---
+ // summary: Create an issue. If using deadline only the date will be taken into account, and time of day ignored.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateIssueOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Issue"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "412":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ form := web.GetForm(ctx).(*api.CreateIssueOption)
+ var deadlineUnix timeutil.TimeStamp
+ if form.Deadline != nil && ctx.Repo.CanWrite(unit.TypeIssues) {
+ deadlineUnix = timeutil.TimeStamp(form.Deadline.Unix())
+ }
+
+ issue := &issues_model.Issue{
+ RepoID: ctx.Repo.Repository.ID,
+ Repo: ctx.Repo.Repository,
+ Title: form.Title,
+ PosterID: ctx.Doer.ID,
+ Poster: ctx.Doer,
+ Content: form.Body,
+ Ref: form.Ref,
+ DeadlineUnix: deadlineUnix,
+ }
+
+ assigneeIDs := make([]int64, 0)
+ var err error
+ if ctx.Repo.CanWrite(unit.TypeIssues) {
+ issue.MilestoneID = form.Milestone
+ assigneeIDs, err = issues_model.MakeIDsFromAPIAssigneesToAdd(ctx, form.Assignee, form.Assignees)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("Assignee does not exist: [name: %s]", err))
+ } else {
+ ctx.Error(http.StatusInternalServerError, "AddAssigneeByName", err)
+ }
+ return
+ }
+
+ // Check if the passed assignees is assignable
+ for _, aID := range assigneeIDs {
+ assignee, err := user_model.GetUserByID(ctx, aID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserByID", err)
+ return
+ }
+
+ valid, err := access_model.CanBeAssigned(ctx, assignee, ctx.Repo.Repository, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "canBeAssigned", err)
+ return
+ }
+ if !valid {
+ ctx.Error(http.StatusUnprocessableEntity, "canBeAssigned", repo_model.ErrUserDoesNotHaveAccessToRepo{UserID: aID, RepoName: ctx.Repo.Repository.Name})
+ return
+ }
+ }
+ } else {
+ // setting labels is not allowed if user is not a writer
+ form.Labels = make([]int64, 0)
+ }
+
+ if err := issue_service.NewIssue(ctx, ctx.Repo.Repository, issue, form.Labels, nil, assigneeIDs); err != nil {
+ if errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, "BlockedByUser", err)
+ return
+ } else if repo_model.IsErrUserDoesNotHaveAccessToRepo(err) {
+ ctx.Error(http.StatusBadRequest, "UserDoesNotHaveAccessToRepo", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "NewIssue", err)
+ return
+ }
+
+ if form.Closed {
+ if err := issue_service.ChangeStatus(ctx, issue, ctx.Doer, "", true); err != nil {
+ if issues_model.IsErrDependenciesLeft(err) {
+ ctx.Error(http.StatusPreconditionFailed, "DependenciesLeft", "cannot close this issue because it still has open dependencies")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "ChangeStatus", err)
+ return
+ }
+ }
+
+ // Refetch from database to assign some automatic values
+ issue, err = issues_model.GetIssueByID(ctx, issue.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByID", err)
+ return
+ }
+ ctx.JSON(http.StatusCreated, convert.ToAPIIssue(ctx, ctx.Doer, issue))
+}
+
+// EditIssue modify an issue of a repository
+func EditIssue(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/issues/{index} issue issueEditIssue
+ // ---
+ // summary: Edit an issue. If using deadline only the date will be taken into account, and time of day ignored.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditIssueOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Issue"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "412":
+ // "$ref": "#/responses/error"
+
+ form := web.GetForm(ctx).(*api.EditIssueOption)
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+ issue.Repo = ctx.Repo.Repository
+ canWrite := ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull)
+
+ err = issue.LoadAttributes(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ if !issue.IsPoster(ctx.Doer.ID) && !canWrite {
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ err = issue_service.SetIssueUpdateDate(ctx, issue, form.Updated, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return
+ }
+
+ if len(form.Title) > 0 {
+ err = issue_service.ChangeTitle(ctx, issue, ctx.Doer, form.Title)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeTitle", err)
+ return
+ }
+ }
+ if form.Body != nil {
+ err = issue_service.ChangeContent(ctx, issue, ctx.Doer, *form.Body, issue.ContentVersion)
+ if err != nil {
+ if errors.Is(err, issues_model.ErrIssueAlreadyChanged) {
+ ctx.Error(http.StatusBadRequest, "ChangeContent", err)
+ return
+ }
+
+ ctx.Error(http.StatusInternalServerError, "ChangeContent", err)
+ return
+ }
+ }
+ if form.Ref != nil {
+ err = issue_service.ChangeIssueRef(ctx, issue, ctx.Doer, *form.Ref)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateRef", err)
+ return
+ }
+ }
+
+ // Update or remove the deadline, only if set and allowed
+ if (form.Deadline != nil || form.RemoveDeadline != nil) && canWrite {
+ var deadlineUnix timeutil.TimeStamp
+
+ if form.RemoveDeadline == nil || !*form.RemoveDeadline {
+ if form.Deadline == nil {
+ ctx.Error(http.StatusBadRequest, "", "The due_date cannot be empty")
+ return
+ }
+ if !form.Deadline.IsZero() {
+ deadline := time.Date(form.Deadline.Year(), form.Deadline.Month(), form.Deadline.Day(),
+ 23, 59, 59, 0, form.Deadline.Location())
+ deadlineUnix = timeutil.TimeStamp(deadline.Unix())
+ }
+ }
+
+ if err := issues_model.UpdateIssueDeadline(ctx, issue, deadlineUnix, ctx.Doer); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateIssueDeadline", err)
+ return
+ }
+ issue.DeadlineUnix = deadlineUnix
+ }
+
+ // Add/delete assignees
+
+ // Deleting is done the GitHub way (quote from their api documentation):
+ // https://developer.github.com/v3/issues/#edit-an-issue
+ // "assignees" (array): Logins for Users to assign to this issue.
+ // Pass one or more user logins to replace the set of assignees on this Issue.
+ // Send an empty array ([]) to clear all assignees from the Issue.
+
+ if canWrite && (form.Assignees != nil || form.Assignee != nil) {
+ oneAssignee := ""
+ if form.Assignee != nil {
+ oneAssignee = *form.Assignee
+ }
+
+ err = issue_service.UpdateAssignees(ctx, issue, oneAssignee, form.Assignees, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateAssignees", err)
+ return
+ }
+ }
+
+ if canWrite && form.Milestone != nil &&
+ issue.MilestoneID != *form.Milestone {
+ oldMilestoneID := issue.MilestoneID
+ issue.MilestoneID = *form.Milestone
+ if err = issue_service.ChangeMilestoneAssign(ctx, issue, ctx.Doer, oldMilestoneID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeMilestoneAssign", err)
+ return
+ }
+ }
+ if form.State != nil {
+ if issue.IsPull {
+ if err := issue.LoadPullRequest(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequest", err)
+ return
+ }
+ if issue.PullRequest.HasMerged {
+ ctx.Error(http.StatusPreconditionFailed, "MergedPRState", "cannot change state of this pull request, it was already merged")
+ return
+ }
+ }
+ isClosed := api.StateClosed == api.StateType(*form.State)
+ if issue.IsClosed != isClosed {
+ if err := issue_service.ChangeStatus(ctx, issue, ctx.Doer, "", isClosed); err != nil {
+ if issues_model.IsErrDependenciesLeft(err) {
+ ctx.Error(http.StatusPreconditionFailed, "DependenciesLeft", "cannot close this issue because it still has open dependencies")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "ChangeStatus", err)
+ return
+ }
+ }
+ }
+
+ // Refetch from database to assign some automatic values
+ issue, err = issues_model.GetIssueByID(ctx, issue.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if err = issue.LoadMilestone(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusCreated, convert.ToAPIIssue(ctx, ctx.Doer, issue))
+}
+
+func DeleteIssue(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index} issue issueDelete
+ // ---
+ // summary: Delete an issue
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of issue to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByID", err)
+ }
+ return
+ }
+
+ if err = issue_service.DeleteIssue(ctx, ctx.Doer, ctx.Repo.GitRepo, issue); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteIssueByID", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// UpdateIssueDeadline updates an issue deadline
+func UpdateIssueDeadline(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/deadline issue issueEditIssueDeadline
+ // ---
+ // summary: Set an issue deadline. If set to null, the deadline is deleted. If using deadline only the date will be taken into account, and time of day ignored.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue to create or update a deadline on
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditDeadlineOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/IssueDeadline"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.EditDeadlineOption)
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) {
+ ctx.Error(http.StatusForbidden, "", "Not repo writer")
+ return
+ }
+
+ var deadlineUnix timeutil.TimeStamp
+ var deadline time.Time
+ if form.Deadline != nil && !form.Deadline.IsZero() {
+ deadline = time.Date(form.Deadline.Year(), form.Deadline.Month(), form.Deadline.Day(),
+ 23, 59, 59, 0, time.Local)
+ deadlineUnix = timeutil.TimeStamp(deadline.Unix())
+ }
+
+ if err := issues_model.UpdateIssueDeadline(ctx, issue, deadlineUnix, ctx.Doer); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateIssueDeadline", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, api.IssueDeadline{Deadline: &deadline})
+}
diff --git a/routers/api/v1/repo/issue_attachment.go b/routers/api/v1/repo/issue_attachment.go
new file mode 100644
index 0000000..a972ab0
--- /dev/null
+++ b/routers/api/v1/repo/issue_attachment.go
@@ -0,0 +1,411 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "time"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/attachment"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/context/upload"
+ "code.gitea.io/gitea/services/convert"
+ issue_service "code.gitea.io/gitea/services/issue"
+)
+
+// GetIssueAttachment gets a single attachment of the issue
+func GetIssueAttachment(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/assets/{attachment_id} issue issueGetIssueAttachment
+ // ---
+ // summary: Get an issue attachment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Attachment"
+ // "404":
+ // "$ref": "#/responses/error"
+
+ issue := getIssueFromContext(ctx)
+ if issue == nil {
+ return
+ }
+
+ attach := getIssueAttachmentSafeRead(ctx, issue)
+ if attach == nil {
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIAttachment(ctx.Repo.Repository, attach))
+}
+
+// ListIssueAttachments lists all attachments of the issue
+func ListIssueAttachments(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/assets issue issueListIssueAttachments
+ // ---
+ // summary: List issue's attachments
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/AttachmentList"
+ // "404":
+ // "$ref": "#/responses/error"
+
+ issue := getIssueFromContext(ctx)
+ if issue == nil {
+ return
+ }
+
+ if err := issue.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIIssue(ctx, ctx.Doer, issue).Attachments)
+}
+
+// CreateIssueAttachment creates an attachment and saves the given file
+func CreateIssueAttachment(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/assets issue issueCreateIssueAttachment
+ // ---
+ // summary: Create an issue attachment
+ // produces:
+ // - application/json
+ // consumes:
+ // - multipart/form-data
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: name
+ // in: query
+ // description: name of the attachment
+ // type: string
+ // required: false
+ // - name: updated_at
+ // in: query
+ // description: time of the attachment's creation. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: attachment
+ // in: formData
+ // description: attachment to upload
+ // type: file
+ // required: true
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Attachment"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ issue := getIssueFromContext(ctx)
+ if issue == nil {
+ return
+ }
+
+ if !canUserWriteIssueAttachment(ctx, issue) {
+ return
+ }
+
+ updatedAt := ctx.Req.FormValue("updated_at")
+ if len(updatedAt) != 0 {
+ updated, err := time.Parse(time.RFC3339, updatedAt)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "time.Parse", err)
+ return
+ }
+ err = issue_service.SetIssueUpdateDate(ctx, issue, &updated, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return
+ }
+ }
+
+ // Get uploaded file from request
+ file, header, err := ctx.Req.FormFile("attachment")
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FormFile", err)
+ return
+ }
+ defer file.Close()
+
+ filename := header.Filename
+ if query := ctx.FormString("name"); query != "" {
+ filename = query
+ }
+
+ attachment, err := attachment.UploadAttachment(ctx, file, setting.Attachment.AllowedTypes, header.Size, &repo_model.Attachment{
+ Name: filename,
+ UploaderID: ctx.Doer.ID,
+ RepoID: ctx.Repo.Repository.ID,
+ IssueID: issue.ID,
+ NoAutoTime: issue.NoAutoTime,
+ CreatedUnix: issue.UpdatedUnix,
+ })
+ if err != nil {
+ if upload.IsErrFileTypeForbidden(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UploadAttachment", err)
+ }
+ return
+ }
+
+ issue.Attachments = append(issue.Attachments, attachment)
+
+ if err := issue_service.ChangeContent(ctx, issue, ctx.Doer, issue.Content, issue.ContentVersion); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeContent", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attachment))
+}
+
+// EditIssueAttachment updates the given attachment
+func EditIssueAttachment(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/issues/{index}/assets/{attachment_id} issue issueEditIssueAttachment
+ // ---
+ // summary: Edit an issue attachment
+ // produces:
+ // - application/json
+ // consumes:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditAttachmentOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Attachment"
+ // "404":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ attachment := getIssueAttachmentSafeWrite(ctx)
+ if attachment == nil {
+ return
+ }
+
+ // do changes to attachment. only meaningful change is name.
+ form := web.GetForm(ctx).(*api.EditAttachmentOptions)
+ if form.Name != "" {
+ attachment.Name = form.Name
+ }
+
+ if err := repo_model.UpdateAttachment(ctx, attachment); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateAttachment", err)
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attachment))
+}
+
+// DeleteIssueAttachment delete a given attachment
+func DeleteIssueAttachment(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/assets/{attachment_id} issue issueDeleteIssueAttachment
+ // ---
+ // summary: Delete an issue attachment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/error"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ attachment := getIssueAttachmentSafeWrite(ctx)
+ if attachment == nil {
+ return
+ }
+
+ if err := repo_model.DeleteAttachment(ctx, attachment, true); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteAttachment", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+func getIssueFromContext(ctx *context.APIContext) *issues_model.Issue {
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64("index"))
+ if err != nil {
+ ctx.NotFoundOrServerError("GetIssueByIndex", issues_model.IsErrIssueNotExist, err)
+ return nil
+ }
+
+ issue.Repo = ctx.Repo.Repository
+
+ return issue
+}
+
+func getIssueAttachmentSafeWrite(ctx *context.APIContext) *repo_model.Attachment {
+ issue := getIssueFromContext(ctx)
+ if issue == nil {
+ return nil
+ }
+
+ if !canUserWriteIssueAttachment(ctx, issue) {
+ return nil
+ }
+
+ return getIssueAttachmentSafeRead(ctx, issue)
+}
+
+func getIssueAttachmentSafeRead(ctx *context.APIContext, issue *issues_model.Issue) *repo_model.Attachment {
+ attachment, err := repo_model.GetAttachmentByID(ctx, ctx.ParamsInt64("attachment_id"))
+ if err != nil {
+ ctx.NotFoundOrServerError("GetAttachmentByID", repo_model.IsErrAttachmentNotExist, err)
+ return nil
+ }
+ if !attachmentBelongsToRepoOrIssue(ctx, attachment, issue) {
+ return nil
+ }
+ return attachment
+}
+
+func canUserWriteIssueAttachment(ctx *context.APIContext, issue *issues_model.Issue) bool {
+ canEditIssue := ctx.IsSigned && (ctx.Doer.ID == issue.PosterID || ctx.IsUserRepoAdmin() || ctx.IsUserSiteAdmin() || ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull))
+ if !canEditIssue {
+ ctx.Error(http.StatusForbidden, "", "user should have permission to write issue")
+ return false
+ }
+
+ return true
+}
+
+func attachmentBelongsToRepoOrIssue(ctx *context.APIContext, attachment *repo_model.Attachment, issue *issues_model.Issue) bool {
+ if attachment.RepoID != ctx.Repo.Repository.ID {
+ log.Debug("Requested attachment[%d] does not belong to repo[%-v].", attachment.ID, ctx.Repo.Repository)
+ ctx.NotFound("no such attachment in repo")
+ return false
+ }
+ if attachment.IssueID == 0 {
+ log.Debug("Requested attachment[%d] is not in an issue.", attachment.ID)
+ ctx.NotFound("no such attachment in issue")
+ return false
+ } else if issue != nil && attachment.IssueID != issue.ID {
+ log.Debug("Requested attachment[%d] does not belong to issue[%d, #%d].", attachment.ID, issue.ID, issue.Index)
+ ctx.NotFound("no such attachment in issue")
+ return false
+ }
+ return true
+}
diff --git a/routers/api/v1/repo/issue_comment.go b/routers/api/v1/repo/issue_comment.go
new file mode 100644
index 0000000..1ff755c
--- /dev/null
+++ b/routers/api/v1/repo/issue_comment.go
@@ -0,0 +1,691 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ stdCtx "context"
+ "errors"
+ "net/http"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ issue_service "code.gitea.io/gitea/services/issue"
+)
+
+// ListIssueComments list all the comments of an issue
+func ListIssueComments(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/comments issue issueGetComments
+ // ---
+ // summary: List all comments on an issue
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: since
+ // in: query
+ // description: if provided, only comments updated since the specified time are returned.
+ // type: string
+ // format: date-time
+ // - name: before
+ // in: query
+ // description: if provided, only comments updated before the provided time are returned.
+ // type: string
+ // format: date-time
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CommentList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ before, since, err := context.GetQueryBeforeSince(ctx.Base)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetRawIssueByIndex", err)
+ return
+ }
+ if !ctx.Repo.CanReadIssuesOrPulls(issue.IsPull) {
+ ctx.NotFound()
+ return
+ }
+
+ issue.Repo = ctx.Repo.Repository
+
+ opts := &issues_model.FindCommentsOptions{
+ IssueID: issue.ID,
+ Since: since,
+ Before: before,
+ Type: issues_model.CommentTypeComment,
+ }
+
+ comments, err := issues_model.FindComments(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindComments", err)
+ return
+ }
+
+ totalCount, err := issues_model.CountComments(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ if err := comments.LoadPosters(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadPosters", err)
+ return
+ }
+
+ if err := comments.LoadAttachments(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttachments", err)
+ return
+ }
+
+ apiComments := make([]*api.Comment, len(comments))
+ for i, comment := range comments {
+ comment.Issue = issue
+ apiComments[i] = convert.ToAPIComment(ctx, ctx.Repo.Repository, comments[i])
+ }
+
+ ctx.SetTotalCountHeader(totalCount)
+ ctx.JSON(http.StatusOK, &apiComments)
+}
+
+// ListIssueCommentsAndTimeline list all the comments and events of an issue
+func ListIssueCommentsAndTimeline(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/timeline issue issueGetCommentsAndTimeline
+ // ---
+ // summary: List all comments and events on an issue
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: since
+ // in: query
+ // description: if provided, only comments updated since the specified time are returned.
+ // type: string
+ // format: date-time
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // - name: before
+ // in: query
+ // description: if provided, only comments updated before the provided time are returned.
+ // type: string
+ // format: date-time
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TimelineList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ before, since, err := context.GetQueryBeforeSince(ctx.Base)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetRawIssueByIndex", err)
+ return
+ }
+ issue.Repo = ctx.Repo.Repository
+
+ opts := &issues_model.FindCommentsOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ IssueID: issue.ID,
+ Since: since,
+ Before: before,
+ Type: issues_model.CommentTypeUndefined,
+ }
+
+ comments, err := issues_model.FindComments(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindComments", err)
+ return
+ }
+
+ if err := comments.LoadPosters(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadPosters", err)
+ return
+ }
+
+ var apiComments []*api.TimelineComment
+ for _, comment := range comments {
+ if comment.Type != issues_model.CommentTypeCode && isXRefCommentAccessible(ctx, ctx.Doer, comment, issue.RepoID) {
+ comment.Issue = issue
+ apiComments = append(apiComments, convert.ToTimelineComment(ctx, issue.Repo, comment, ctx.Doer))
+ }
+ }
+
+ ctx.SetTotalCountHeader(int64(len(apiComments)))
+ ctx.JSON(http.StatusOK, &apiComments)
+}
+
+func isXRefCommentAccessible(ctx stdCtx.Context, user *user_model.User, c *issues_model.Comment, issueRepoID int64) bool {
+ // Remove comments that the user has no permissions to see
+ if issues_model.CommentTypeIsRef(c.Type) && c.RefRepoID != issueRepoID && c.RefRepoID != 0 {
+ var err error
+ // Set RefRepo for description in template
+ c.RefRepo, err = repo_model.GetRepositoryByID(ctx, c.RefRepoID)
+ if err != nil {
+ return false
+ }
+ perm, err := access_model.GetUserRepoPermission(ctx, c.RefRepo, user)
+ if err != nil {
+ return false
+ }
+ if !perm.CanReadIssuesOrPulls(c.RefIsPull) {
+ return false
+ }
+ }
+ return true
+}
+
+// ListRepoIssueComments returns all issue-comments for a repo
+func ListRepoIssueComments(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/comments issue issueGetRepoComments
+ // ---
+ // summary: List all comments in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: since
+ // in: query
+ // description: if provided, only comments updated since the provided time are returned.
+ // type: string
+ // format: date-time
+ // - name: before
+ // in: query
+ // description: if provided, only comments updated before the provided time are returned.
+ // type: string
+ // format: date-time
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CommentList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ before, since, err := context.GetQueryBeforeSince(ctx.Base)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+
+ var isPull optional.Option[bool]
+ canReadIssue := ctx.Repo.CanRead(unit.TypeIssues)
+ canReadPull := ctx.Repo.CanRead(unit.TypePullRequests)
+ if canReadIssue && canReadPull {
+ isPull = optional.None[bool]()
+ } else if canReadIssue {
+ isPull = optional.Some(false)
+ } else if canReadPull {
+ isPull = optional.Some(true)
+ } else {
+ ctx.NotFound()
+ return
+ }
+
+ opts := &issues_model.FindCommentsOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepoID: ctx.Repo.Repository.ID,
+ Type: issues_model.CommentTypeComment,
+ Since: since,
+ Before: before,
+ IsPull: isPull,
+ }
+
+ comments, err := issues_model.FindComments(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindComments", err)
+ return
+ }
+
+ totalCount, err := issues_model.CountComments(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ if err = comments.LoadPosters(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadPosters", err)
+ return
+ }
+
+ apiComments := make([]*api.Comment, len(comments))
+ if err := comments.LoadIssues(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssues", err)
+ return
+ }
+ if err := comments.LoadAttachments(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttachments", err)
+ return
+ }
+ if _, err := comments.Issues().LoadRepositories(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadRepositories", err)
+ return
+ }
+ for i := range comments {
+ apiComments[i] = convert.ToAPIComment(ctx, ctx.Repo.Repository, comments[i])
+ }
+
+ ctx.SetTotalCountHeader(totalCount)
+ ctx.JSON(http.StatusOK, &apiComments)
+}
+
+// CreateIssueComment create a comment for an issue
+func CreateIssueComment(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/comments issue issueCreateComment
+ // ---
+ // summary: Add a comment to an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateIssueCommentOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Comment"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ form := web.GetForm(ctx).(*api.CreateIssueCommentOption)
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ return
+ }
+
+ if !ctx.Repo.CanReadIssuesOrPulls(issue.IsPull) {
+ ctx.NotFound()
+ return
+ }
+
+ if issue.IsLocked && !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) && !ctx.Doer.IsAdmin {
+ ctx.Error(http.StatusForbidden, "CreateIssueComment", errors.New(ctx.Locale.TrString("repo.issues.comment_on_locked")))
+ return
+ }
+
+ err = issue_service.SetIssueUpdateDate(ctx, issue, form.Updated, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return
+ }
+
+ comment, err := issue_service.CreateIssueComment(ctx, ctx.Doer, ctx.Repo.Repository, issue, form.Body, nil)
+ if err != nil {
+ if errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, "CreateIssueComment", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateIssueComment", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIComment(ctx, ctx.Repo.Repository, comment))
+}
+
+// GetIssueComment Get a comment by ID
+func GetIssueComment(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/comments/{id} issue issueGetComment
+ // ---
+ // summary: Get a comment
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Comment"
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ comment := ctx.Comment
+
+ if comment.Type != issues_model.CommentTypeComment {
+ ctx.Status(http.StatusNoContent)
+ return
+ }
+
+ if err := comment.LoadPoster(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "comment.LoadPoster", err)
+ return
+ }
+
+ if err := comment.LoadAttachments(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttachments", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIComment(ctx, ctx.Repo.Repository, comment))
+}
+
+// EditIssueComment modify a comment of an issue
+func EditIssueComment(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/issues/comments/{id} issue issueEditComment
+ // ---
+ // summary: Edit a comment
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditIssueCommentOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Comment"
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ form := web.GetForm(ctx).(*api.EditIssueCommentOption)
+ editIssueComment(ctx, *form)
+}
+
+// EditIssueCommentDeprecated modify a comment of an issue
+func EditIssueCommentDeprecated(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/issues/{index}/comments/{id} issue issueEditCommentDeprecated
+ // ---
+ // summary: Edit a comment
+ // deprecated: true
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: this parameter is ignored
+ // type: integer
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditIssueCommentOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Comment"
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.EditIssueCommentOption)
+ editIssueComment(ctx, *form)
+}
+
+func editIssueComment(ctx *context.APIContext, form api.EditIssueCommentOption) {
+ comment := ctx.Comment
+
+ if !ctx.IsSigned || (ctx.Doer.ID != comment.PosterID && !ctx.Repo.CanWriteIssuesOrPulls(comment.Issue.IsPull)) {
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ if !comment.Type.HasContentSupport() {
+ ctx.Status(http.StatusNoContent)
+ return
+ }
+
+ err := comment.LoadIssue(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssue", err)
+ return
+ }
+ err = issue_service.SetIssueUpdateDate(ctx, comment.Issue, form.Updated, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return
+ }
+
+ oldContent := comment.Content
+ comment.Content = form.Body
+ if err := issue_service.UpdateComment(ctx, comment, comment.ContentVersion, ctx.Doer, oldContent); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateComment", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIComment(ctx, ctx.Repo.Repository, comment))
+}
+
+// DeleteIssueComment delete a comment from an issue
+func DeleteIssueComment(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/comments/{id} issue issueDeleteComment
+ // ---
+ // summary: Delete a comment
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of comment to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ deleteIssueComment(ctx, issues_model.CommentTypeComment)
+}
+
+// DeleteIssueCommentDeprecated delete a comment from an issue
+func DeleteIssueCommentDeprecated(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/comments/{id} issue issueDeleteCommentDeprecated
+ // ---
+ // summary: Delete a comment
+ // deprecated: true
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: this parameter is ignored
+ // type: integer
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of comment to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ deleteIssueComment(ctx, issues_model.CommentTypeComment)
+}
+
+func deleteIssueComment(ctx *context.APIContext, commentType issues_model.CommentType) {
+ comment := ctx.Comment
+
+ if !ctx.IsSigned || (ctx.Doer.ID != comment.PosterID && !ctx.Repo.CanWriteIssuesOrPulls(comment.Issue.IsPull)) {
+ ctx.Status(http.StatusForbidden)
+ return
+ } else if comment.Type != commentType {
+ ctx.Status(http.StatusNoContent)
+ return
+ }
+
+ if err := issue_service.DeleteComment(ctx, ctx.Doer, comment); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteCommentByID", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/issue_comment_attachment.go b/routers/api/v1/repo/issue_comment_attachment.go
new file mode 100644
index 0000000..c45e2eb
--- /dev/null
+++ b/routers/api/v1/repo/issue_comment_attachment.go
@@ -0,0 +1,400 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "time"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/attachment"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/context/upload"
+ "code.gitea.io/gitea/services/convert"
+ issue_service "code.gitea.io/gitea/services/issue"
+)
+
+// GetIssueCommentAttachment gets a single attachment of the comment
+func GetIssueCommentAttachment(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/comments/{id}/assets/{attachment_id} issue issueGetIssueCommentAttachment
+ // ---
+ // summary: Get a comment attachment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Attachment"
+ // "404":
+ // "$ref": "#/responses/error"
+
+ comment := ctx.Comment
+ attachment := getIssueCommentAttachmentSafeRead(ctx)
+ if attachment == nil {
+ return
+ }
+ if attachment.CommentID != comment.ID {
+ log.Debug("User requested attachment[%d] is not in comment[%d].", attachment.ID, comment.ID)
+ ctx.NotFound("attachment not in comment")
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIAttachment(ctx.Repo.Repository, attachment))
+}
+
+// ListIssueCommentAttachments lists all attachments of the comment
+func ListIssueCommentAttachments(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/comments/{id}/assets issue issueListIssueCommentAttachments
+ // ---
+ // summary: List comment's attachments
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/AttachmentList"
+ // "404":
+ // "$ref": "#/responses/error"
+ comment := ctx.Comment
+
+ if err := comment.LoadAttachments(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttachments", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIAttachments(ctx.Repo.Repository, comment.Attachments))
+}
+
+// CreateIssueCommentAttachment creates an attachment and saves the given file
+func CreateIssueCommentAttachment(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/comments/{id}/assets issue issueCreateIssueCommentAttachment
+ // ---
+ // summary: Create a comment attachment
+ // produces:
+ // - application/json
+ // consumes:
+ // - multipart/form-data
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: name
+ // in: query
+ // description: name of the attachment
+ // type: string
+ // required: false
+ // - name: updated_at
+ // in: query
+ // description: time of the attachment's creation. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: attachment
+ // in: formData
+ // description: attachment to upload
+ // type: file
+ // required: true
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Attachment"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ // Check if comment exists and load comment
+
+ if !canUserWriteIssueCommentAttachment(ctx) {
+ return
+ }
+
+ comment := ctx.Comment
+
+ updatedAt := ctx.Req.FormValue("updated_at")
+ if len(updatedAt) != 0 {
+ updated, err := time.Parse(time.RFC3339, updatedAt)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "time.Parse", err)
+ return
+ }
+ err = comment.LoadIssue(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssue", err)
+ return
+ }
+ err = issue_service.SetIssueUpdateDate(ctx, comment.Issue, &updated, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return
+ }
+ }
+
+ // Get uploaded file from request
+ file, header, err := ctx.Req.FormFile("attachment")
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FormFile", err)
+ return
+ }
+ defer file.Close()
+
+ filename := header.Filename
+ if query := ctx.FormString("name"); query != "" {
+ filename = query
+ }
+
+ attachment, err := attachment.UploadAttachment(ctx, file, setting.Attachment.AllowedTypes, header.Size, &repo_model.Attachment{
+ Name: filename,
+ UploaderID: ctx.Doer.ID,
+ RepoID: ctx.Repo.Repository.ID,
+ IssueID: comment.IssueID,
+ CommentID: comment.ID,
+ NoAutoTime: comment.Issue.NoAutoTime,
+ CreatedUnix: comment.Issue.UpdatedUnix,
+ })
+ if err != nil {
+ if upload.IsErrFileTypeForbidden(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UploadAttachment", err)
+ }
+ return
+ }
+
+ if err := comment.LoadAttachments(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttachments", err)
+ return
+ }
+
+ if err = issue_service.UpdateComment(ctx, comment, comment.ContentVersion, ctx.Doer, comment.Content); err != nil {
+ ctx.ServerError("UpdateComment", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attachment))
+}
+
+// EditIssueCommentAttachment updates the given attachment
+func EditIssueCommentAttachment(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/issues/comments/{id}/assets/{attachment_id} issue issueEditIssueCommentAttachment
+ // ---
+ // summary: Edit a comment attachment
+ // produces:
+ // - application/json
+ // consumes:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditAttachmentOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Attachment"
+ // "404":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ attach := getIssueCommentAttachmentSafeWrite(ctx)
+ if attach == nil {
+ return
+ }
+
+ form := web.GetForm(ctx).(*api.EditAttachmentOptions)
+ if form.Name != "" {
+ attach.Name = form.Name
+ }
+
+ if err := repo_model.UpdateAttachment(ctx, attach); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateAttachment", attach)
+ }
+ ctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attach))
+}
+
+// DeleteIssueCommentAttachment delete a given attachment
+func DeleteIssueCommentAttachment(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/comments/{id}/assets/{attachment_id} issue issueDeleteIssueCommentAttachment
+ // ---
+ // summary: Delete a comment attachment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/error"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ attach := getIssueCommentAttachmentSafeWrite(ctx)
+ if attach == nil {
+ return
+ }
+
+ if err := repo_model.DeleteAttachment(ctx, attach, true); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteAttachment", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+func getIssueCommentAttachmentSafeWrite(ctx *context.APIContext) *repo_model.Attachment {
+ if !canUserWriteIssueCommentAttachment(ctx) {
+ return nil
+ }
+ return getIssueCommentAttachmentSafeRead(ctx)
+}
+
+func canUserWriteIssueCommentAttachment(ctx *context.APIContext) bool {
+ // ctx.Comment is assumed to be set in a safe way via a middleware
+ comment := ctx.Comment
+
+ canEditComment := ctx.IsSigned && (ctx.Doer.ID == comment.PosterID || ctx.IsUserRepoAdmin() || ctx.IsUserSiteAdmin()) && ctx.Repo.CanWriteIssuesOrPulls(comment.Issue.IsPull)
+ if !canEditComment {
+ ctx.Error(http.StatusForbidden, "", "user should have permission to edit comment")
+ return false
+ }
+
+ return true
+}
+
+func getIssueCommentAttachmentSafeRead(ctx *context.APIContext) *repo_model.Attachment {
+ // ctx.Comment is assumed to be set in a safe way via a middleware
+ comment := ctx.Comment
+
+ attachment, err := repo_model.GetAttachmentByID(ctx, ctx.ParamsInt64("attachment_id"))
+ if err != nil {
+ ctx.NotFoundOrServerError("GetAttachmentByID", repo_model.IsErrAttachmentNotExist, err)
+ return nil
+ }
+ if !attachmentBelongsToRepoOrComment(ctx, attachment, comment) {
+ return nil
+ }
+ return attachment
+}
+
+func attachmentBelongsToRepoOrComment(ctx *context.APIContext, attachment *repo_model.Attachment, comment *issues_model.Comment) bool {
+ if attachment.RepoID != ctx.Repo.Repository.ID {
+ log.Debug("Requested attachment[%d] does not belong to repo[%-v].", attachment.ID, ctx.Repo.Repository)
+ ctx.NotFound("no such attachment in repo")
+ return false
+ }
+ if attachment.IssueID == 0 || attachment.CommentID == 0 {
+ log.Debug("Requested attachment[%d] is not in a comment.", attachment.ID)
+ ctx.NotFound("no such attachment in comment")
+ return false
+ }
+ if comment != nil && attachment.CommentID != comment.ID {
+ log.Debug("Requested attachment[%d] does not belong to comment[%d].", attachment.ID, comment.ID)
+ ctx.NotFound("no such attachment in comment")
+ return false
+ }
+ return true
+}
diff --git a/routers/api/v1/repo/issue_dependency.go b/routers/api/v1/repo/issue_dependency.go
new file mode 100644
index 0000000..c40e92c
--- /dev/null
+++ b/routers/api/v1/repo/issue_dependency.go
@@ -0,0 +1,613 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// GetIssueDependencies list an issue's dependencies
+func GetIssueDependencies(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/dependencies issue issueListIssueDependencies
+ // ---
+ // summary: List an issue's dependencies, i.e all issues that block this issue.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/IssueList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ // If this issue's repository does not enable dependencies then there can be no dependencies by default
+ if !ctx.Repo.Repository.IsDependenciesEnabled(ctx) {
+ ctx.NotFound()
+ return
+ }
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound("IsErrIssueNotExist", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ // 1. We must be able to read this issue
+ if !ctx.Repo.Permission.CanReadIssuesOrPulls(issue.IsPull) {
+ ctx.NotFound()
+ return
+ }
+
+ page := ctx.FormInt("page")
+ if page <= 1 {
+ page = 1
+ }
+ limit := ctx.FormInt("limit")
+ if limit == 0 {
+ limit = setting.API.DefaultPagingNum
+ } else if limit > setting.API.MaxResponseItems {
+ limit = setting.API.MaxResponseItems
+ }
+
+ canWrite := ctx.Repo.Permission.CanWriteIssuesOrPulls(issue.IsPull)
+
+ blockerIssues := make([]*issues_model.Issue, 0, limit)
+
+ // 2. Get the issues this issue depends on, i.e. the `<#b>`: `<issue> <- <#b>`
+ blockersInfo, err := issue.BlockedByDependencies(ctx, db.ListOptions{
+ Page: page,
+ PageSize: limit,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "BlockedByDependencies", err)
+ return
+ }
+
+ repoPerms := make(map[int64]access_model.Permission)
+ repoPerms[ctx.Repo.Repository.ID] = ctx.Repo.Permission
+ for _, blocker := range blockersInfo {
+ // Get the permissions for this repository
+ // If the repo ID exists in the map, return the exist permissions
+ // else get the permission and add it to the map
+ var perm access_model.Permission
+ existPerm, ok := repoPerms[blocker.RepoID]
+ if ok {
+ perm = existPerm
+ } else {
+ var err error
+ perm, err = access_model.GetUserRepoPermission(ctx, &blocker.Repository, ctx.Doer)
+ if err != nil {
+ ctx.ServerError("GetUserRepoPermission", err)
+ return
+ }
+ repoPerms[blocker.RepoID] = perm
+ }
+
+ // check permission
+ if !perm.CanReadIssuesOrPulls(blocker.Issue.IsPull) {
+ if !canWrite {
+ hiddenBlocker := &issues_model.DependencyInfo{
+ Issue: issues_model.Issue{
+ Title: "HIDDEN",
+ },
+ }
+ blocker = hiddenBlocker
+ } else {
+ confidentialBlocker := &issues_model.DependencyInfo{
+ Issue: issues_model.Issue{
+ RepoID: blocker.Issue.RepoID,
+ Index: blocker.Index,
+ Title: blocker.Title,
+ IsClosed: blocker.IsClosed,
+ IsPull: blocker.IsPull,
+ },
+ Repository: repo_model.Repository{
+ ID: blocker.Issue.Repo.ID,
+ Name: blocker.Issue.Repo.Name,
+ OwnerName: blocker.Issue.Repo.OwnerName,
+ },
+ }
+ confidentialBlocker.Issue.Repo = &confidentialBlocker.Repository
+ blocker = confidentialBlocker
+ }
+ }
+ blockerIssues = append(blockerIssues, &blocker.Issue)
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, ctx.Doer, blockerIssues))
+}
+
+// CreateIssueDependency create a new issue dependencies
+func CreateIssueDependency(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/dependencies issue issueCreateIssueDependencies
+ // ---
+ // summary: Make the issue in the url depend on the issue in the form.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/IssueMeta"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Issue"
+ // "404":
+ // description: the issue does not exist
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ // We want to make <:index> depend on <Form>, i.e. <:index> is the target
+ target := getParamsIssue(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ // and <Form> represents the dependency
+ form := web.GetForm(ctx).(*api.IssueMeta)
+ dependency := getFormIssue(ctx, form)
+ if ctx.Written() {
+ return
+ }
+
+ dependencyPerm := getPermissionForRepo(ctx, target.Repo)
+ if ctx.Written() {
+ return
+ }
+
+ createIssueDependency(ctx, target, dependency, ctx.Repo.Permission, *dependencyPerm)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIIssue(ctx, ctx.Doer, target))
+}
+
+// RemoveIssueDependency remove an issue dependency
+func RemoveIssueDependency(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/dependencies issue issueRemoveIssueDependencies
+ // ---
+ // summary: Remove an issue dependency
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/IssueMeta"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Issue"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ // We want to make <:index> depend on <Form>, i.e. <:index> is the target
+ target := getParamsIssue(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ // and <Form> represents the dependency
+ form := web.GetForm(ctx).(*api.IssueMeta)
+ dependency := getFormIssue(ctx, form)
+ if ctx.Written() {
+ return
+ }
+
+ dependencyPerm := getPermissionForRepo(ctx, target.Repo)
+ if ctx.Written() {
+ return
+ }
+
+ removeIssueDependency(ctx, target, dependency, ctx.Repo.Permission, *dependencyPerm)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIIssue(ctx, ctx.Doer, target))
+}
+
+// GetIssueBlocks list issues that are blocked by this issue
+func GetIssueBlocks(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/blocks issue issueListBlocks
+ // ---
+ // summary: List issues that are blocked by this issue
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/IssueList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ // We need to list the issues that DEPEND on this issue not the other way round
+ // Therefore whether dependencies are enabled or not in this repository is potentially irrelevant.
+
+ issue := getParamsIssue(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ if !ctx.Repo.Permission.CanReadIssuesOrPulls(issue.IsPull) {
+ ctx.NotFound()
+ return
+ }
+
+ page := ctx.FormInt("page")
+ if page <= 1 {
+ page = 1
+ }
+ limit := ctx.FormInt("limit")
+ if limit <= 1 {
+ limit = setting.API.DefaultPagingNum
+ }
+
+ skip := (page - 1) * limit
+ max := page * limit
+
+ deps, err := issue.BlockingDependencies(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "BlockingDependencies", err)
+ return
+ }
+
+ var issues []*issues_model.Issue
+
+ repoPerms := make(map[int64]access_model.Permission)
+ repoPerms[ctx.Repo.Repository.ID] = ctx.Repo.Permission
+
+ for i, depMeta := range deps {
+ if i < skip || i >= max {
+ continue
+ }
+
+ // Get the permissions for this repository
+ // If the repo ID exists in the map, return the exist permissions
+ // else get the permission and add it to the map
+ var perm access_model.Permission
+ existPerm, ok := repoPerms[depMeta.RepoID]
+ if ok {
+ perm = existPerm
+ } else {
+ var err error
+ perm, err = access_model.GetUserRepoPermission(ctx, &depMeta.Repository, ctx.Doer)
+ if err != nil {
+ ctx.ServerError("GetUserRepoPermission", err)
+ return
+ }
+ repoPerms[depMeta.RepoID] = perm
+ }
+
+ if !perm.CanReadIssuesOrPulls(depMeta.Issue.IsPull) {
+ continue
+ }
+
+ depMeta.Issue.Repo = &depMeta.Repository
+ issues = append(issues, &depMeta.Issue)
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, ctx.Doer, issues))
+}
+
+// CreateIssueBlocking block the issue given in the body by the issue in path
+func CreateIssueBlocking(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/blocks issue issueCreateIssueBlocking
+ // ---
+ // summary: Block the issue given in the body by the issue in path
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/IssueMeta"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Issue"
+ // "404":
+ // description: the issue does not exist
+
+ dependency := getParamsIssue(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ form := web.GetForm(ctx).(*api.IssueMeta)
+ target := getFormIssue(ctx, form)
+ if ctx.Written() {
+ return
+ }
+
+ targetPerm := getPermissionForRepo(ctx, target.Repo)
+ if ctx.Written() {
+ return
+ }
+
+ createIssueDependency(ctx, target, dependency, *targetPerm, ctx.Repo.Permission)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIIssue(ctx, ctx.Doer, dependency))
+}
+
+// RemoveIssueBlocking unblock the issue given in the body by the issue in path
+func RemoveIssueBlocking(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/blocks issue issueRemoveIssueBlocking
+ // ---
+ // summary: Unblock the issue given in the body by the issue in path
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/IssueMeta"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Issue"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ dependency := getParamsIssue(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ form := web.GetForm(ctx).(*api.IssueMeta)
+ target := getFormIssue(ctx, form)
+ if ctx.Written() {
+ return
+ }
+
+ targetPerm := getPermissionForRepo(ctx, target.Repo)
+ if ctx.Written() {
+ return
+ }
+
+ removeIssueDependency(ctx, target, dependency, *targetPerm, ctx.Repo.Permission)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIIssue(ctx, ctx.Doer, dependency))
+}
+
+func getParamsIssue(ctx *context.APIContext) *issues_model.Issue {
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound("IsErrIssueNotExist", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return nil
+ }
+ issue.Repo = ctx.Repo.Repository
+ return issue
+}
+
+func getFormIssue(ctx *context.APIContext, form *api.IssueMeta) *issues_model.Issue {
+ var repo *repo_model.Repository
+ if form.Owner != ctx.Repo.Repository.OwnerName || form.Name != ctx.Repo.Repository.Name {
+ if !setting.Service.AllowCrossRepositoryDependencies {
+ ctx.JSON(http.StatusBadRequest, "CrossRepositoryDependencies not enabled")
+ return nil
+ }
+ var err error
+ repo, err = repo_model.GetRepositoryByOwnerAndName(ctx, form.Owner, form.Name)
+ if err != nil {
+ if repo_model.IsErrRepoNotExist(err) {
+ ctx.NotFound("IsErrRepoNotExist", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetRepositoryByOwnerAndName", err)
+ }
+ return nil
+ }
+ } else {
+ repo = ctx.Repo.Repository
+ }
+
+ issue, err := issues_model.GetIssueByIndex(ctx, repo.ID, form.Index)
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound("IsErrIssueNotExist", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return nil
+ }
+ issue.Repo = repo
+ return issue
+}
+
+func getPermissionForRepo(ctx *context.APIContext, repo *repo_model.Repository) *access_model.Permission {
+ if repo.ID == ctx.Repo.Repository.ID {
+ return &ctx.Repo.Permission
+ }
+
+ perm, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return nil
+ }
+
+ return &perm
+}
+
+func createIssueDependency(ctx *context.APIContext, target, dependency *issues_model.Issue, targetPerm, dependencyPerm access_model.Permission) {
+ if target.Repo.IsArchived || !target.Repo.IsDependenciesEnabled(ctx) {
+ // The target's repository doesn't have dependencies enabled
+ ctx.NotFound()
+ return
+ }
+
+ if !targetPerm.CanWriteIssuesOrPulls(target.IsPull) {
+ // We can't write to the target
+ ctx.NotFound()
+ return
+ }
+
+ if !dependencyPerm.CanReadIssuesOrPulls(dependency.IsPull) {
+ // We can't read the dependency
+ ctx.NotFound()
+ return
+ }
+
+ err := issues_model.CreateIssueDependency(ctx, ctx.Doer, target, dependency)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateIssueDependency", err)
+ return
+ }
+}
+
+func removeIssueDependency(ctx *context.APIContext, target, dependency *issues_model.Issue, targetPerm, dependencyPerm access_model.Permission) {
+ if target.Repo.IsArchived || !target.Repo.IsDependenciesEnabled(ctx) {
+ // The target's repository doesn't have dependencies enabled
+ ctx.NotFound()
+ return
+ }
+
+ if !targetPerm.CanWriteIssuesOrPulls(target.IsPull) {
+ // We can't write to the target
+ ctx.NotFound()
+ return
+ }
+
+ if !dependencyPerm.CanReadIssuesOrPulls(dependency.IsPull) {
+ // We can't read the dependency
+ ctx.NotFound()
+ return
+ }
+
+ err := issues_model.RemoveIssueDependency(ctx, ctx.Doer, target, dependency, issues_model.DependencyTypeBlockedBy)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateIssueDependency", err)
+ return
+ }
+}
diff --git a/routers/api/v1/repo/issue_label.go b/routers/api/v1/repo/issue_label.go
new file mode 100644
index 0000000..ae05544
--- /dev/null
+++ b/routers/api/v1/repo/issue_label.go
@@ -0,0 +1,385 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ issue_service "code.gitea.io/gitea/services/issue"
+)
+
+// ListIssueLabels list all the labels of an issue
+func ListIssueLabels(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/labels issue issueGetLabels
+ // ---
+ // summary: Get an issue's labels
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LabelList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if err := issue.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabelList(issue.Labels, ctx.Repo.Repository, ctx.Repo.Owner))
+}
+
+// AddIssueLabels add labels for an issue
+func AddIssueLabels(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/labels issue issueAddLabel
+ // ---
+ // summary: Add a label to an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/IssueLabelsOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LabelList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.IssueLabelsOption)
+ issue, labels, err := prepareForReplaceOrAdd(ctx, *form)
+ if err != nil {
+ return
+ }
+
+ if err = issue_service.AddLabels(ctx, issue, ctx.Doer, labels); err != nil {
+ ctx.Error(http.StatusInternalServerError, "AddLabels", err)
+ return
+ }
+
+ labels, err = issues_model.GetLabelsByIssueID(ctx, issue.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsByIssueID", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabelList(labels, ctx.Repo.Repository, ctx.Repo.Owner))
+}
+
+// DeleteIssueLabel delete a label for an issue
+func DeleteIssueLabel(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/labels/{id} issue issueRemoveLabel
+ // ---
+ // summary: Remove a label from an issue
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the label to remove
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/DeleteLabelsOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ form := web.GetForm(ctx).(*api.DeleteLabelsOption)
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) {
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ if err := issue_service.SetIssueUpdateDate(ctx, issue, form.Updated, ctx.Doer); err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return
+ }
+
+ label, err := issues_model.GetLabelByID(ctx, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if issues_model.IsErrLabelNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetLabelByID", err)
+ }
+ return
+ }
+
+ if err := issue_service.RemoveLabel(ctx, issue, ctx.Doer, label); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteIssueLabel", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// ReplaceIssueLabels replace labels for an issue
+func ReplaceIssueLabels(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/issues/{index}/labels issue issueReplaceLabels
+ // ---
+ // summary: Replace an issue's labels
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/IssueLabelsOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LabelList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.IssueLabelsOption)
+ issue, labels, err := prepareForReplaceOrAdd(ctx, *form)
+ if err != nil {
+ return
+ }
+
+ if err := issue_service.ReplaceLabels(ctx, issue, ctx.Doer, labels); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ReplaceLabels", err)
+ return
+ }
+
+ labels, err = issues_model.GetLabelsByIssueID(ctx, issue.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsByIssueID", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabelList(labels, ctx.Repo.Repository, ctx.Repo.Owner))
+}
+
+// ClearIssueLabels delete all the labels for an issue
+func ClearIssueLabels(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/labels issue issueClearLabels
+ // ---
+ // summary: Remove all labels from an issue
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/DeleteLabelsOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.DeleteLabelsOption)
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) {
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ if err := issue_service.SetIssueUpdateDate(ctx, issue, form.Updated, ctx.Doer); err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return
+ }
+
+ if err := issue_service.ClearLabels(ctx, issue, ctx.Doer); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ClearLabels", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+func prepareForReplaceOrAdd(ctx *context.APIContext, form api.IssueLabelsOption) (*issues_model.Issue, []*issues_model.Label, error) {
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return nil, nil, err
+ }
+
+ var (
+ labelIDs []int64
+ labelNames []string
+ )
+ for _, label := range form.Labels {
+ rv := reflect.ValueOf(label)
+ switch rv.Kind() {
+ case reflect.Float64:
+ labelIDs = append(labelIDs, int64(rv.Float()))
+ case reflect.String:
+ labelNames = append(labelNames, rv.String())
+ }
+ }
+ if len(labelIDs) > 0 && len(labelNames) > 0 {
+ ctx.Error(http.StatusBadRequest, "InvalidLabels", "labels should be an array of strings or integers")
+ return nil, nil, fmt.Errorf("invalid labels")
+ }
+ if len(labelNames) > 0 {
+ labelIDs, err = issues_model.GetLabelIDsInRepoByNames(ctx, ctx.Repo.Repository.ID, labelNames)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelIDsInRepoByNames", err)
+ return nil, nil, err
+ }
+ }
+
+ labels, err := issues_model.GetLabelsByIDs(ctx, labelIDs, "id", "repo_id", "org_id", "name", "exclusive")
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsByIDs", err)
+ return nil, nil, err
+ }
+
+ if !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) {
+ ctx.Status(http.StatusForbidden)
+ return nil, nil, nil
+ }
+
+ err = issue_service.SetIssueUpdateDate(ctx, issue, form.Updated, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusForbidden, "SetIssueUpdateDate", err)
+ return nil, nil, err
+ }
+
+ return issue, labels, err
+}
diff --git a/routers/api/v1/repo/issue_pin.go b/routers/api/v1/repo/issue_pin.go
new file mode 100644
index 0000000..af3e063
--- /dev/null
+++ b/routers/api/v1/repo/issue_pin.go
@@ -0,0 +1,309 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// PinIssue pins a issue
+func PinIssue(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/pin issue pinIssue
+ // ---
+ // summary: Pin an Issue
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of issue to pin
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else if issues_model.IsErrIssueMaxPinReached(err) {
+ ctx.Error(http.StatusBadRequest, "MaxPinReached", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ // If we don't do this, it will crash when trying to add the pin event to the comment history
+ err = issue.LoadRepo(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadRepo", err)
+ return
+ }
+
+ err = issue.Pin(ctx, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "PinIssue", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// UnpinIssue unpins a Issue
+func UnpinIssue(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/pin issue unpinIssue
+ // ---
+ // summary: Unpin an Issue
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of issue to unpin
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ // If we don't do this, it will crash when trying to add the unpin event to the comment history
+ err = issue.LoadRepo(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadRepo", err)
+ return
+ }
+
+ err = issue.Unpin(ctx, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UnpinIssue", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// MoveIssuePin moves a pinned Issue to a new Position
+func MoveIssuePin(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/issues/{index}/pin/{position} issue moveIssuePin
+ // ---
+ // summary: Moves the Pin to the given Position
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: position
+ // in: path
+ // description: the new position
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ err = issue.MovePin(ctx, int(ctx.ParamsInt64(":position")))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "MovePin", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// ListPinnedIssues returns a list of all pinned Issues
+func ListPinnedIssues(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/pinned repository repoListPinnedIssues
+ // ---
+ // summary: List a repo's pinned issues
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/IssueList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ issues, err := issues_model.GetPinnedIssues(ctx, ctx.Repo.Repository.ID, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadPinnedIssues", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, ctx.Doer, issues))
+}
+
+// ListPinnedPullRequests returns a list of all pinned PRs
+func ListPinnedPullRequests(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/pinned repository repoListPinnedPullRequests
+ // ---
+ // summary: List a repo's pinned pull requests
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullRequestList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ issues, err := issues_model.GetPinnedIssues(ctx, ctx.Repo.Repository.ID, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadPinnedPullRequests", err)
+ return
+ }
+
+ apiPrs := make([]*api.PullRequest, len(issues))
+ if err := issues.LoadPullRequests(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadPullRequests", err)
+ return
+ }
+ for i, currentIssue := range issues {
+ pr := currentIssue.PullRequest
+ if err = pr.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ if err = pr.LoadBaseRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadBaseRepo", err)
+ return
+ }
+
+ if err = pr.LoadHeadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadHeadRepo", err)
+ return
+ }
+
+ apiPrs[i] = convert.ToAPIPullRequest(ctx, pr, ctx.Doer)
+ }
+
+ ctx.JSON(http.StatusOK, &apiPrs)
+}
+
+// AreNewIssuePinsAllowed returns if new issues pins are allowed
+func AreNewIssuePinsAllowed(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/new_pin_allowed repository repoNewPinAllowed
+ // ---
+ // summary: Returns if new Issue Pins are allowed
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepoNewIssuePinsAllowed"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ pinsAllowed := api.NewIssuePinsAllowed{}
+ var err error
+
+ pinsAllowed.Issues, err = issues_model.IsNewPinAllowed(ctx, ctx.Repo.Repository.ID, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsNewIssuePinAllowed", err)
+ return
+ }
+
+ pinsAllowed.PullRequests, err = issues_model.IsNewPinAllowed(ctx, ctx.Repo.Repository.ID, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsNewPullRequestPinAllowed", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, pinsAllowed)
+}
diff --git a/routers/api/v1/repo/issue_reaction.go b/routers/api/v1/repo/issue_reaction.go
new file mode 100644
index 0000000..c395255
--- /dev/null
+++ b/routers/api/v1/repo/issue_reaction.go
@@ -0,0 +1,424 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "net/http"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ issue_service "code.gitea.io/gitea/services/issue"
+)
+
+// GetIssueCommentReactions list reactions of a comment from an issue
+func GetIssueCommentReactions(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/comments/{id}/reactions issue issueGetCommentReactions
+ // ---
+ // summary: Get a list of reactions from a comment of an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ReactionList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ comment := ctx.Comment
+
+ reactions, _, err := issues_model.FindCommentReactions(ctx, comment.IssueID, comment.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindCommentReactions", err)
+ return
+ }
+ _, err = reactions.LoadUsers(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ReactionList.LoadUsers()", err)
+ return
+ }
+
+ var result []api.Reaction
+ for _, r := range reactions {
+ result = append(result, api.Reaction{
+ User: convert.ToUser(ctx, r.User, ctx.Doer),
+ Reaction: r.Type,
+ Created: r.CreatedUnix.AsTime(),
+ })
+ }
+
+ ctx.JSON(http.StatusOK, result)
+}
+
+// PostIssueCommentReaction add a reaction to a comment of an issue
+func PostIssueCommentReaction(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/comments/{id}/reactions issue issuePostCommentReaction
+ // ---
+ // summary: Add a reaction to a comment of an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: content
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditReactionOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Reaction"
+ // "201":
+ // "$ref": "#/responses/Reaction"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.EditReactionOption)
+
+ changeIssueCommentReaction(ctx, *form, true)
+}
+
+// DeleteIssueCommentReaction remove a reaction from a comment of an issue
+func DeleteIssueCommentReaction(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/comments/{id}/reactions issue issueDeleteCommentReaction
+ // ---
+ // summary: Remove a reaction from a comment of an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the comment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: content
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditReactionOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.EditReactionOption)
+
+ changeIssueCommentReaction(ctx, *form, false)
+}
+
+func changeIssueCommentReaction(ctx *context.APIContext, form api.EditReactionOption, isCreateType bool) {
+ comment := ctx.Comment
+
+ if comment.Issue.IsLocked && !ctx.Repo.CanWriteIssuesOrPulls(comment.Issue.IsPull) {
+ ctx.Error(http.StatusForbidden, "ChangeIssueCommentReaction", errors.New("no permission to change reaction"))
+ return
+ }
+
+ if isCreateType {
+ // PostIssueCommentReaction part
+ reaction, err := issue_service.CreateCommentReaction(ctx, ctx.Doer, comment.Issue, comment, form.Reaction)
+ if err != nil {
+ if issues_model.IsErrForbiddenIssueReaction(err) || errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, err.Error(), err)
+ } else if issues_model.IsErrReactionAlreadyExist(err) {
+ ctx.JSON(http.StatusOK, api.Reaction{
+ User: convert.ToUser(ctx, ctx.Doer, ctx.Doer),
+ Reaction: reaction.Type,
+ Created: reaction.CreatedUnix.AsTime(),
+ })
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateCommentReaction", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, api.Reaction{
+ User: convert.ToUser(ctx, ctx.Doer, ctx.Doer),
+ Reaction: reaction.Type,
+ Created: reaction.CreatedUnix.AsTime(),
+ })
+ } else {
+ // DeleteIssueCommentReaction part
+ err := issues_model.DeleteCommentReaction(ctx, ctx.Doer.ID, comment.Issue.ID, comment.ID, form.Reaction)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteCommentReaction", err)
+ return
+ }
+ // ToDo respond 204
+ ctx.Status(http.StatusOK)
+ }
+}
+
+// GetIssueReactions list reactions of an issue
+func GetIssueReactions(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/reactions issue issueGetIssueReactions
+ // ---
+ // summary: Get a list reactions of an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ReactionList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ issue, err := issues_model.GetIssueWithAttrsByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if !ctx.Repo.CanReadIssuesOrPulls(issue.IsPull) {
+ ctx.Error(http.StatusForbidden, "GetIssueReactions", errors.New("no permission to get reactions"))
+ return
+ }
+
+ reactions, count, err := issues_model.FindIssueReactions(ctx, issue.ID, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindIssueReactions", err)
+ return
+ }
+ _, err = reactions.LoadUsers(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ReactionList.LoadUsers()", err)
+ return
+ }
+
+ var result []api.Reaction
+ for _, r := range reactions {
+ result = append(result, api.Reaction{
+ User: convert.ToUser(ctx, r.User, ctx.Doer),
+ Reaction: r.Type,
+ Created: r.CreatedUnix.AsTime(),
+ })
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, result)
+}
+
+// PostIssueReaction add a reaction to an issue
+func PostIssueReaction(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/reactions issue issuePostIssueReaction
+ // ---
+ // summary: Add a reaction to an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: content
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditReactionOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Reaction"
+ // "201":
+ // "$ref": "#/responses/Reaction"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.EditReactionOption)
+ changeIssueReaction(ctx, *form, true)
+}
+
+// DeleteIssueReaction remove a reaction from an issue
+func DeleteIssueReaction(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/reactions issue issueDeleteIssueReaction
+ // ---
+ // summary: Remove a reaction from an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: content
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditReactionOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.EditReactionOption)
+ changeIssueReaction(ctx, *form, false)
+}
+
+func changeIssueReaction(ctx *context.APIContext, form api.EditReactionOption, isCreateType bool) {
+ issue, err := issues_model.GetIssueWithAttrsByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if issue.IsLocked && !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) {
+ ctx.Error(http.StatusForbidden, "ChangeIssueCommentReaction", errors.New("no permission to change reaction"))
+ return
+ }
+
+ if isCreateType {
+ // PostIssueReaction part
+ reaction, err := issue_service.CreateIssueReaction(ctx, ctx.Doer, issue, form.Reaction)
+ if err != nil {
+ if issues_model.IsErrForbiddenIssueReaction(err) || errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, err.Error(), err)
+ } else if issues_model.IsErrReactionAlreadyExist(err) {
+ ctx.JSON(http.StatusOK, api.Reaction{
+ User: convert.ToUser(ctx, ctx.Doer, ctx.Doer),
+ Reaction: reaction.Type,
+ Created: reaction.CreatedUnix.AsTime(),
+ })
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateCommentReaction", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, api.Reaction{
+ User: convert.ToUser(ctx, ctx.Doer, ctx.Doer),
+ Reaction: reaction.Type,
+ Created: reaction.CreatedUnix.AsTime(),
+ })
+ } else {
+ // DeleteIssueReaction part
+ err = issues_model.DeleteIssueReaction(ctx, ctx.Doer.ID, issue.ID, form.Reaction)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteIssueReaction", err)
+ return
+ }
+ // ToDo respond 204
+ ctx.Status(http.StatusOK)
+ }
+}
diff --git a/routers/api/v1/repo/issue_stopwatch.go b/routers/api/v1/repo/issue_stopwatch.go
new file mode 100644
index 0000000..dd61967
--- /dev/null
+++ b/routers/api/v1/repo/issue_stopwatch.go
@@ -0,0 +1,245 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "net/http"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// StartIssueStopwatch creates a stopwatch for the given issue.
+func StartIssueStopwatch(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/stopwatch/start issue issueStartStopWatch
+ // ---
+ // summary: Start stopwatch on an issue.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue to create the stopwatch on
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "201":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // description: Not repo writer, user does not have rights to toggle stopwatch
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // description: Cannot start a stopwatch again if it already exists
+
+ issue, err := prepareIssueStopwatch(ctx, false)
+ if err != nil {
+ return
+ }
+
+ if err := issues_model.CreateIssueStopwatch(ctx, ctx.Doer, issue); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateOrStopIssueStopwatch", err)
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// StopIssueStopwatch stops a stopwatch for the given issue.
+func StopIssueStopwatch(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/issues/{index}/stopwatch/stop issue issueStopStopWatch
+ // ---
+ // summary: Stop an issue's existing stopwatch.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue to stop the stopwatch on
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "201":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // description: Not repo writer, user does not have rights to toggle stopwatch
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // description: Cannot stop a non existent stopwatch
+
+ issue, err := prepareIssueStopwatch(ctx, true)
+ if err != nil {
+ return
+ }
+
+ if err := issues_model.FinishIssueStopwatch(ctx, ctx.Doer, issue); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateOrStopIssueStopwatch", err)
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// DeleteIssueStopwatch delete a specific stopwatch
+func DeleteIssueStopwatch(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/stopwatch/delete issue issueDeleteStopWatch
+ // ---
+ // summary: Delete an issue's existing stopwatch.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue to stop the stopwatch on
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // description: Not repo writer, user does not have rights to toggle stopwatch
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // description: Cannot cancel a non existent stopwatch
+
+ issue, err := prepareIssueStopwatch(ctx, true)
+ if err != nil {
+ return
+ }
+
+ if err := issues_model.CancelStopwatch(ctx, ctx.Doer, issue); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CancelStopwatch", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+func prepareIssueStopwatch(ctx *context.APIContext, shouldExist bool) (*issues_model.Issue, error) {
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+
+ return nil, err
+ }
+
+ if !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) {
+ ctx.Status(http.StatusForbidden)
+ return nil, errors.New("Unable to write to PRs")
+ }
+
+ if !ctx.Repo.CanUseTimetracker(ctx, issue, ctx.Doer) {
+ ctx.Status(http.StatusForbidden)
+ return nil, errors.New("Cannot use time tracker")
+ }
+
+ if issues_model.StopwatchExists(ctx, ctx.Doer.ID, issue.ID) != shouldExist {
+ if shouldExist {
+ ctx.Error(http.StatusConflict, "StopwatchExists", "cannot stop/cancel a non existent stopwatch")
+ err = errors.New("cannot stop/cancel a non existent stopwatch")
+ } else {
+ ctx.Error(http.StatusConflict, "StopwatchExists", "cannot start a stopwatch again if it already exists")
+ err = errors.New("cannot start a stopwatch again if it already exists")
+ }
+ return nil, err
+ }
+
+ return issue, nil
+}
+
+// GetStopwatches get all stopwatches
+func GetStopwatches(ctx *context.APIContext) {
+ // swagger:operation GET /user/stopwatches user userGetStopWatches
+ // ---
+ // summary: Get list of all existing stopwatches
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/StopWatchList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ sws, err := issues_model.GetUserStopwatches(ctx, ctx.Doer.ID, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserStopwatches", err)
+ return
+ }
+
+ count, err := issues_model.CountUserStopwatches(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiSWs, err := convert.ToStopWatches(ctx, sws)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "APIFormat", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiSWs)
+}
diff --git a/routers/api/v1/repo/issue_subscription.go b/routers/api/v1/repo/issue_subscription.go
new file mode 100644
index 0000000..6b29218
--- /dev/null
+++ b/routers/api/v1/repo/issue_subscription.go
@@ -0,0 +1,294 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// AddIssueSubscription Subscribe user to issue
+func AddIssueSubscription(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/issues/{index}/subscriptions/{user} issue issueAddSubscription
+ // ---
+ // summary: Subscribe user to issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: user
+ // in: path
+ // description: user to subscribe
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // description: Already subscribed
+ // "201":
+ // description: Successfully Subscribed
+ // "304":
+ // description: User can only subscribe itself if he is no admin
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ setIssueSubscription(ctx, true)
+}
+
+// DelIssueSubscription Unsubscribe user from issue
+func DelIssueSubscription(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/issues/{index}/subscriptions/{user} issue issueDeleteSubscription
+ // ---
+ // summary: Unsubscribe user from issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: user
+ // in: path
+ // description: user witch unsubscribe
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // description: Already unsubscribed
+ // "201":
+ // description: Successfully Unsubscribed
+ // "304":
+ // description: User can only subscribe itself if he is no admin
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ setIssueSubscription(ctx, false)
+}
+
+func setIssueSubscription(ctx *context.APIContext, watch bool) {
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+
+ return
+ }
+
+ user, err := user_model.GetUserByName(ctx, ctx.Params(":user"))
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+
+ return
+ }
+
+ // only admin and user for itself can change subscription
+ if user.ID != ctx.Doer.ID && !ctx.Doer.IsAdmin {
+ ctx.Error(http.StatusForbidden, "User", fmt.Errorf("%s is not permitted to change subscriptions for %s", ctx.Doer.Name, user.Name))
+ return
+ }
+
+ current, err := issues_model.CheckIssueWatch(ctx, user, issue)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CheckIssueWatch", err)
+ return
+ }
+
+ // If watch state won't change
+ if current == watch {
+ ctx.Status(http.StatusOK)
+ return
+ }
+
+ // Update watch state
+ if err := issues_model.CreateOrUpdateIssueWatch(ctx, user.ID, issue.ID, watch); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateOrUpdateIssueWatch", err)
+ return
+ }
+
+ ctx.Status(http.StatusCreated)
+}
+
+// CheckIssueSubscription check if user is subscribed to an issue
+func CheckIssueSubscription(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/subscriptions/check issue issueCheckSubscription
+ // ---
+ // summary: Check if user is subscribed to an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/WatchInfo"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+
+ return
+ }
+
+ watching, err := issues_model.CheckIssueWatch(ctx, ctx.Doer, issue)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusOK, api.WatchInfo{
+ Subscribed: watching,
+ Ignored: !watching,
+ Reason: nil,
+ CreatedAt: issue.CreatedUnix.AsTime(),
+ URL: issue.APIURL(ctx) + "/subscriptions",
+ RepositoryURL: ctx.Repo.Repository.APIURL(),
+ })
+}
+
+// GetIssueSubscribers return subscribers of an issue
+func GetIssueSubscribers(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/subscriptions issue issueSubscriptions
+ // ---
+ // summary: Get users who subscribed on an issue.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+
+ return
+ }
+
+ iwl, err := issues_model.GetIssueWatchers(ctx, issue.ID, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetIssueWatchers", err)
+ return
+ }
+
+ userIDs := make([]int64, 0, len(iwl))
+ for _, iw := range iwl {
+ userIDs = append(userIDs, iw.UserID)
+ }
+
+ users, err := user_model.GetUsersByIDs(ctx, userIDs)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUsersByIDs", err)
+ return
+ }
+ apiUsers := make([]*api.User, 0, len(users))
+ for _, v := range users {
+ apiUsers = append(apiUsers, convert.ToUser(ctx, v, ctx.Doer))
+ }
+
+ count, err := issues_model.CountIssueWatchers(ctx, issue.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CountIssueWatchers", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiUsers)
+}
diff --git a/routers/api/v1/repo/issue_tracked_time.go b/routers/api/v1/repo/issue_tracked_time.go
new file mode 100644
index 0000000..3d8abfa
--- /dev/null
+++ b/routers/api/v1/repo/issue_tracked_time.go
@@ -0,0 +1,637 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListTrackedTimes list all the tracked times of an issue
+func ListTrackedTimes(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issues/{index}/times issue issueTrackedTimes
+ // ---
+ // summary: List an issue's tracked times
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: user
+ // in: query
+ // description: optional filter by user (available for issue managers)
+ // type: string
+ // - name: since
+ // in: query
+ // description: Only show times updated after the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: before
+ // in: query
+ // description: Only show times updated before the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TrackedTimeList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !ctx.Repo.Repository.IsTimetrackerEnabled(ctx) {
+ ctx.NotFound("Timetracker is disabled")
+ return
+ }
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ opts := &issues_model.FindTrackedTimesOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepositoryID: ctx.Repo.Repository.ID,
+ IssueID: issue.ID,
+ }
+
+ qUser := ctx.FormTrim("user")
+ if qUser != "" {
+ user, err := user_model.GetUserByName(ctx, qUser)
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusNotFound, "User does not exist", err)
+ } else if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ return
+ }
+ opts.UserID = user.ID
+ }
+
+ if opts.CreatedBeforeUnix, opts.CreatedAfterUnix, err = context.GetQueryBeforeSince(ctx.Base); err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+
+ cantSetUser := !ctx.Doer.IsAdmin &&
+ opts.UserID != ctx.Doer.ID &&
+ !ctx.IsUserRepoWriter([]unit.Type{unit.TypeIssues})
+
+ if cantSetUser {
+ if opts.UserID == 0 {
+ opts.UserID = ctx.Doer.ID
+ } else {
+ ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
+ return
+ }
+ }
+
+ count, err := issues_model.CountTrackedTimes(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ trackedTimes, err := issues_model.GetTrackedTimes(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTrackedTimes", err)
+ return
+ }
+ if err = trackedTimes.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(ctx, ctx.Doer, trackedTimes))
+}
+
+// AddTime add time manual to the given issue
+func AddTime(ctx *context.APIContext) {
+ // swagger:operation Post /repos/{owner}/{repo}/issues/{index}/times issue issueAddTime
+ // ---
+ // summary: Add tracked time to a issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/AddTimeOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TrackedTime"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.AddTimeOption)
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if !ctx.Repo.CanUseTimetracker(ctx, issue, ctx.Doer) {
+ if !ctx.Repo.Repository.IsTimetrackerEnabled(ctx) {
+ ctx.Error(http.StatusBadRequest, "", "time tracking disabled")
+ return
+ }
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ user := ctx.Doer
+ if form.User != "" {
+ if (ctx.IsUserRepoAdmin() && ctx.Doer.Name != form.User) || ctx.Doer.IsAdmin {
+ // allow only RepoAdmin, Admin and User to add time
+ user, err = user_model.GetUserByName(ctx, form.User)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ }
+ }
+
+ created := time.Time{}
+ if !form.Created.IsZero() {
+ created = form.Created
+ }
+
+ trackedTime, err := issues_model.AddTime(ctx, user, issue, form.Time, created)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "AddTime", err)
+ return
+ }
+ if err = trackedTime.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToTrackedTime(ctx, user, trackedTime))
+}
+
+// ResetIssueTime reset time manual to the given issue
+func ResetIssueTime(ctx *context.APIContext) {
+ // swagger:operation Delete /repos/{owner}/{repo}/issues/{index}/times issue issueResetTime
+ // ---
+ // summary: Reset a tracked time of an issue
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue to add tracked time to
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if !ctx.Repo.CanUseTimetracker(ctx, issue, ctx.Doer) {
+ if !ctx.Repo.Repository.IsTimetrackerEnabled(ctx) {
+ ctx.JSON(http.StatusBadRequest, struct{ Message string }{Message: "time tracking disabled"})
+ return
+ }
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ err = issues_model.DeleteIssueUserTimes(ctx, issue, ctx.Doer)
+ if err != nil {
+ if db.IsErrNotExist(err) {
+ ctx.Error(http.StatusNotFound, "DeleteIssueUserTimes", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteIssueUserTimes", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteTime delete a specific time by id
+func DeleteTime(ctx *context.APIContext) {
+ // swagger:operation Delete /repos/{owner}/{repo}/issues/{index}/times/{id} issue issueDeleteTime
+ // ---
+ // summary: Delete specific tracked time
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the issue
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of time to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ issue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrIssueNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
+ }
+ return
+ }
+
+ if !ctx.Repo.CanUseTimetracker(ctx, issue, ctx.Doer) {
+ if !ctx.Repo.Repository.IsTimetrackerEnabled(ctx) {
+ ctx.JSON(http.StatusBadRequest, struct{ Message string }{Message: "time tracking disabled"})
+ return
+ }
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ time, err := issues_model.GetTrackedTimeByID(ctx, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if db.IsErrNotExist(err) {
+ ctx.NotFound(err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTrackedTimeByID", err)
+ return
+ }
+ if time.Deleted {
+ ctx.NotFound(fmt.Errorf("tracked time [%d] already deleted", time.ID))
+ return
+ }
+
+ if !ctx.Doer.IsAdmin && time.UserID != ctx.Doer.ID {
+ // Only Admin and User itself can delete their time
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ err = issues_model.DeleteTime(ctx, time)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteTime", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// ListTrackedTimesByUser lists all tracked times of the user
+func ListTrackedTimesByUser(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/times/{user} repository userTrackedTimes
+ // ---
+ // summary: List a user's tracked times in a repo
+ // deprecated: true
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: user
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TrackedTimeList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !ctx.Repo.Repository.IsTimetrackerEnabled(ctx) {
+ ctx.Error(http.StatusBadRequest, "", "time tracking disabled")
+ return
+ }
+ user, err := user_model.GetUserByName(ctx, ctx.Params(":timetrackingusername"))
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return
+ }
+ if user == nil {
+ ctx.NotFound()
+ return
+ }
+
+ if !ctx.IsUserRepoAdmin() && !ctx.Doer.IsAdmin && ctx.Doer.ID != user.ID {
+ ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
+ return
+ }
+
+ opts := &issues_model.FindTrackedTimesOptions{
+ UserID: user.ID,
+ RepositoryID: ctx.Repo.Repository.ID,
+ }
+
+ trackedTimes, err := issues_model.GetTrackedTimes(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTrackedTimes", err)
+ return
+ }
+ if err = trackedTimes.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(ctx, ctx.Doer, trackedTimes))
+}
+
+// ListTrackedTimesByRepository lists all tracked times of the repository
+func ListTrackedTimesByRepository(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/times repository repoTrackedTimes
+ // ---
+ // summary: List a repo's tracked times
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: user
+ // in: query
+ // description: optional filter by user (available for issue managers)
+ // type: string
+ // - name: since
+ // in: query
+ // description: Only show times updated after the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: before
+ // in: query
+ // description: Only show times updated before the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TrackedTimeList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !ctx.Repo.Repository.IsTimetrackerEnabled(ctx) {
+ ctx.Error(http.StatusBadRequest, "", "time tracking disabled")
+ return
+ }
+
+ opts := &issues_model.FindTrackedTimesOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepositoryID: ctx.Repo.Repository.ID,
+ }
+
+ // Filters
+ qUser := ctx.FormTrim("user")
+ if qUser != "" {
+ user, err := user_model.GetUserByName(ctx, qUser)
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusNotFound, "User does not exist", err)
+ } else if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ return
+ }
+ opts.UserID = user.ID
+ }
+
+ var err error
+ if opts.CreatedBeforeUnix, opts.CreatedAfterUnix, err = context.GetQueryBeforeSince(ctx.Base); err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+
+ cantSetUser := !ctx.Doer.IsAdmin &&
+ opts.UserID != ctx.Doer.ID &&
+ !ctx.IsUserRepoWriter([]unit.Type{unit.TypeIssues})
+
+ if cantSetUser {
+ if opts.UserID == 0 {
+ opts.UserID = ctx.Doer.ID
+ } else {
+ ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
+ return
+ }
+ }
+
+ count, err := issues_model.CountTrackedTimes(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ trackedTimes, err := issues_model.GetTrackedTimes(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTrackedTimes", err)
+ return
+ }
+ if err = trackedTimes.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(ctx, ctx.Doer, trackedTimes))
+}
+
+// ListMyTrackedTimes lists all tracked times of the current user
+func ListMyTrackedTimes(ctx *context.APIContext) {
+ // swagger:operation GET /user/times user userCurrentTrackedTimes
+ // ---
+ // summary: List the current user's tracked times
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // - name: since
+ // in: query
+ // description: Only show times updated after the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // - name: before
+ // in: query
+ // description: Only show times updated before the given time. This is a timestamp in RFC 3339 format
+ // type: string
+ // format: date-time
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TrackedTimeList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ opts := &issues_model.FindTrackedTimesOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ UserID: ctx.Doer.ID,
+ }
+
+ var err error
+ if opts.CreatedBeforeUnix, opts.CreatedAfterUnix, err = context.GetQueryBeforeSince(ctx.Base); err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
+ return
+ }
+
+ count, err := issues_model.CountTrackedTimes(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ trackedTimes, err := issues_model.GetTrackedTimes(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTrackedTimesByUser", err)
+ return
+ }
+
+ if err = trackedTimes.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(ctx, ctx.Doer, trackedTimes))
+}
diff --git a/routers/api/v1/repo/key.go b/routers/api/v1/repo/key.go
new file mode 100644
index 0000000..88444a2
--- /dev/null
+++ b/routers/api/v1/repo/key.go
@@ -0,0 +1,292 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ stdCtx "context"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ asymkey_service "code.gitea.io/gitea/services/asymkey"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// appendPrivateInformation appends the owner and key type information to api.PublicKey
+func appendPrivateInformation(ctx stdCtx.Context, apiKey *api.DeployKey, key *asymkey_model.DeployKey, repository *repo_model.Repository) (*api.DeployKey, error) {
+ apiKey.ReadOnly = key.Mode == perm.AccessModeRead
+ if repository.ID == key.RepoID {
+ apiKey.Repository = convert.ToRepo(ctx, repository, access_model.Permission{AccessMode: key.Mode})
+ } else {
+ repo, err := repo_model.GetRepositoryByID(ctx, key.RepoID)
+ if err != nil {
+ return apiKey, err
+ }
+ apiKey.Repository = convert.ToRepo(ctx, repo, access_model.Permission{AccessMode: key.Mode})
+ }
+ return apiKey, nil
+}
+
+func composeDeployKeysAPILink(owner, name string) string {
+ return setting.AppURL + "api/v1/repos/" + url.PathEscape(owner) + "/" + url.PathEscape(name) + "/keys/"
+}
+
+// ListDeployKeys list all the deploy keys of a repository
+func ListDeployKeys(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/keys repository repoListKeys
+ // ---
+ // summary: List a repository's keys
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: key_id
+ // in: query
+ // description: the key_id to search for
+ // type: integer
+ // - name: fingerprint
+ // in: query
+ // description: fingerprint of the key
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/DeployKeyList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opts := asymkey_model.ListDeployKeysOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepoID: ctx.Repo.Repository.ID,
+ KeyID: ctx.FormInt64("key_id"),
+ Fingerprint: ctx.FormString("fingerprint"),
+ }
+
+ keys, count, err := db.FindAndCount[asymkey_model.DeployKey](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiLink := composeDeployKeysAPILink(ctx.Repo.Owner.Name, ctx.Repo.Repository.Name)
+ apiKeys := make([]*api.DeployKey, len(keys))
+ for i := range keys {
+ if err := keys[i].GetContent(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetContent", err)
+ return
+ }
+ apiKeys[i] = convert.ToDeployKey(apiLink, keys[i])
+ if ctx.Doer.IsAdmin || ((ctx.Repo.Repository.ID == keys[i].RepoID) && (ctx.Doer.ID == ctx.Repo.Owner.ID)) {
+ apiKeys[i], _ = appendPrivateInformation(ctx, apiKeys[i], keys[i], ctx.Repo.Repository)
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, &apiKeys)
+}
+
+// GetDeployKey get a deploy key by id
+func GetDeployKey(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/keys/{id} repository repoGetKey
+ // ---
+ // summary: Get a repository's key by id
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the key to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/DeployKey"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ key, err := asymkey_model.GetDeployKeyByID(ctx, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if asymkey_model.IsErrDeployKeyNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetDeployKeyByID", err)
+ }
+ return
+ }
+
+ // this check make it more consistent
+ if key.RepoID != ctx.Repo.Repository.ID {
+ ctx.NotFound()
+ return
+ }
+
+ if err = key.GetContent(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetContent", err)
+ return
+ }
+
+ apiLink := composeDeployKeysAPILink(ctx.Repo.Owner.Name, ctx.Repo.Repository.Name)
+ apiKey := convert.ToDeployKey(apiLink, key)
+ if ctx.Doer.IsAdmin || ((ctx.Repo.Repository.ID == key.RepoID) && (ctx.Doer.ID == ctx.Repo.Owner.ID)) {
+ apiKey, _ = appendPrivateInformation(ctx, apiKey, key, ctx.Repo.Repository)
+ }
+ ctx.JSON(http.StatusOK, apiKey)
+}
+
+// HandleCheckKeyStringError handle check key error
+func HandleCheckKeyStringError(ctx *context.APIContext, err error) {
+ if db.IsErrSSHDisabled(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", "SSH is disabled")
+ } else if asymkey_model.IsErrKeyUnableVerify(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Unable to verify key content")
+ } else {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("Invalid key content: %w", err))
+ }
+}
+
+// HandleAddKeyError handle add key error
+func HandleAddKeyError(ctx *context.APIContext, err error) {
+ switch {
+ case asymkey_model.IsErrDeployKeyAlreadyExist(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", "This key has already been added to this repository")
+ case asymkey_model.IsErrKeyAlreadyExist(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", "Key content has been used as non-deploy key")
+ case asymkey_model.IsErrKeyNameAlreadyUsed(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", "Key title has been used")
+ case asymkey_model.IsErrDeployKeyNameAlreadyUsed(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", "A key with the same name already exists")
+ default:
+ ctx.Error(http.StatusInternalServerError, "AddKey", err)
+ }
+}
+
+// CreateDeployKey create deploy key for a repository
+func CreateDeployKey(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/keys repository repoCreateKey
+ // ---
+ // summary: Add a key to a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateKeyOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/DeployKey"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateKeyOption)
+ content, err := asymkey_model.CheckPublicKeyString(form.Key)
+ if err != nil {
+ HandleCheckKeyStringError(ctx, err)
+ return
+ }
+
+ key, err := asymkey_model.AddDeployKey(ctx, ctx.Repo.Repository.ID, form.Title, content, form.ReadOnly)
+ if err != nil {
+ HandleAddKeyError(ctx, err)
+ return
+ }
+
+ key.Content = content
+ apiLink := composeDeployKeysAPILink(ctx.Repo.Owner.Name, ctx.Repo.Repository.Name)
+ ctx.JSON(http.StatusCreated, convert.ToDeployKey(apiLink, key))
+}
+
+// DeleteDeploykey delete deploy key for a repository
+func DeleteDeploykey(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/keys/{id} repository repoDeleteKey
+ // ---
+ // summary: Delete a key from a repository
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the key to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := asymkey_service.DeleteDeployKey(ctx, ctx.Doer, ctx.ParamsInt64(":id")); err != nil {
+ if asymkey_model.IsErrKeyAccessDenied(err) {
+ ctx.Error(http.StatusForbidden, "", "You do not have access to this key")
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteDeployKey", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/label.go b/routers/api/v1/repo/label.go
new file mode 100644
index 0000000..b6eb51f
--- /dev/null
+++ b/routers/api/v1/repo/label.go
@@ -0,0 +1,285 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "strconv"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/modules/label"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListLabels list all the labels of a repository
+func ListLabels(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/labels issue issueListLabels
+ // ---
+ // summary: Get all of a repository's labels
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/LabelList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ labels, err := issues_model.GetLabelsByRepoID(ctx, ctx.Repo.Repository.ID, ctx.FormString("sort"), utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsByRepoID", err)
+ return
+ }
+
+ count, err := issues_model.CountLabelsByRepoID(ctx, ctx.Repo.Repository.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, convert.ToLabelList(labels, ctx.Repo.Repository, nil))
+}
+
+// GetLabel get label by repository and label id
+func GetLabel(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/labels/{id} issue issueGetLabel
+ // ---
+ // summary: Get a single label
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the label to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Label"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ var (
+ l *issues_model.Label
+ err error
+ )
+ strID := ctx.Params(":id")
+ if intID, err2 := strconv.ParseInt(strID, 10, 64); err2 != nil {
+ l, err = issues_model.GetLabelInRepoByName(ctx, ctx.Repo.Repository.ID, strID)
+ } else {
+ l, err = issues_model.GetLabelInRepoByID(ctx, ctx.Repo.Repository.ID, intID)
+ }
+ if err != nil {
+ if issues_model.IsErrRepoLabelNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetLabelByRepoID", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabel(l, ctx.Repo.Repository, nil))
+}
+
+// CreateLabel create a label for a repository
+func CreateLabel(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/labels issue issueCreateLabel
+ // ---
+ // summary: Create a label
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateLabelOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Label"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateLabelOption)
+
+ color, err := label.NormalizeColor(form.Color)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "StringToColor", err)
+ return
+ }
+ form.Color = color
+ l := &issues_model.Label{
+ Name: form.Name,
+ Exclusive: form.Exclusive,
+ Color: form.Color,
+ RepoID: ctx.Repo.Repository.ID,
+ Description: form.Description,
+ }
+ l.SetArchived(form.IsArchived)
+ if err := issues_model.NewLabel(ctx, l); err != nil {
+ ctx.Error(http.StatusInternalServerError, "NewLabel", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToLabel(l, ctx.Repo.Repository, nil))
+}
+
+// EditLabel modify a label for a repository
+func EditLabel(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/labels/{id} issue issueEditLabel
+ // ---
+ // summary: Update a label
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the label to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditLabelOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Label"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.EditLabelOption)
+ l, err := issues_model.GetLabelInRepoByID(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if issues_model.IsErrRepoLabelNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetLabelByRepoID", err)
+ }
+ return
+ }
+
+ if form.Name != nil {
+ l.Name = *form.Name
+ }
+ if form.Exclusive != nil {
+ l.Exclusive = *form.Exclusive
+ }
+ if form.Color != nil {
+ color, err := label.NormalizeColor(*form.Color)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "StringToColor", err)
+ return
+ }
+ l.Color = color
+ }
+ if form.Description != nil {
+ l.Description = *form.Description
+ }
+ l.SetArchived(form.IsArchived != nil && *form.IsArchived)
+ if err := issues_model.UpdateLabel(ctx, l); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateLabel", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToLabel(l, ctx.Repo.Repository, nil))
+}
+
+// DeleteLabel delete a label for a repository
+func DeleteLabel(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/labels/{id} issue issueDeleteLabel
+ // ---
+ // summary: Delete a label
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the label to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := issues_model.DeleteLabel(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":id")); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteLabel", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/language.go b/routers/api/v1/repo/language.go
new file mode 100644
index 0000000..f1d5bbe
--- /dev/null
+++ b/routers/api/v1/repo/language.go
@@ -0,0 +1,81 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "bytes"
+ "net/http"
+ "strconv"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/services/context"
+)
+
+type languageResponse []*repo_model.LanguageStat
+
+func (l languageResponse) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ if _, err := buf.WriteString("{"); err != nil {
+ return nil, err
+ }
+ for i, lang := range l {
+ if i > 0 {
+ if _, err := buf.WriteString(","); err != nil {
+ return nil, err
+ }
+ }
+ if _, err := buf.WriteString(strconv.Quote(lang.Language)); err != nil {
+ return nil, err
+ }
+ if _, err := buf.WriteString(":"); err != nil {
+ return nil, err
+ }
+ if _, err := buf.WriteString(strconv.FormatInt(lang.Size, 10)); err != nil {
+ return nil, err
+ }
+ }
+ if _, err := buf.WriteString("}"); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+// GetLanguages returns languages and number of bytes of code written
+func GetLanguages(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/languages repository repoGetLanguages
+ // ---
+ // summary: Get languages and number of bytes of code written
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "200":
+ // "$ref": "#/responses/LanguageStatistics"
+
+ langs, err := repo_model.GetLanguageStats(ctx, ctx.Repo.Repository)
+ if err != nil {
+ log.Error("GetLanguageStats failed: %v", err)
+ ctx.InternalServerError(err)
+ return
+ }
+
+ resp := make(languageResponse, len(langs))
+ copy(resp, langs)
+
+ ctx.JSON(http.StatusOK, resp)
+}
diff --git a/routers/api/v1/repo/main_test.go b/routers/api/v1/repo/main_test.go
new file mode 100644
index 0000000..451f34d
--- /dev/null
+++ b/routers/api/v1/repo/main_test.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+ webhook_service "code.gitea.io/gitea/services/webhook"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m, &unittest.TestOptions{
+ SetUp: func() error {
+ setting.LoadQueueSettings()
+ return webhook_service.Init()
+ },
+ })
+}
diff --git a/routers/api/v1/repo/migrate.go b/routers/api/v1/repo/migrate.go
new file mode 100644
index 0000000..0991723
--- /dev/null
+++ b/routers/api/v1/repo/migrate.go
@@ -0,0 +1,281 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ quota_model "code.gitea.io/gitea/models/quota"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/graceful"
+ "code.gitea.io/gitea/modules/lfs"
+ "code.gitea.io/gitea/modules/log"
+ base "code.gitea.io/gitea/modules/migration"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ "code.gitea.io/gitea/services/forms"
+ "code.gitea.io/gitea/services/migrations"
+ notify_service "code.gitea.io/gitea/services/notify"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// Migrate migrate remote git repository to gitea
+func Migrate(ctx *context.APIContext) {
+ // swagger:operation POST /repos/migrate repository repoMigrate
+ // ---
+ // summary: Migrate a remote git repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/MigrateRepoOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Repository"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "409":
+ // description: The repository with the same name already exists.
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.MigrateRepoOptions)
+
+ // get repoOwner
+ var (
+ repoOwner *user_model.User
+ err error
+ )
+ if len(form.RepoOwner) != 0 {
+ repoOwner, err = user_model.GetUserByName(ctx, form.RepoOwner)
+ } else if form.RepoOwnerID != 0 {
+ repoOwner, err = user_model.GetUserByID(ctx, form.RepoOwnerID)
+ } else {
+ repoOwner = ctx.Doer
+ }
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUser", err)
+ }
+ return
+ }
+
+ if ctx.HasAPIError() {
+ ctx.Error(http.StatusUnprocessableEntity, "", ctx.GetErrMsg())
+ return
+ }
+
+ if !ctx.CheckQuota(quota_model.LimitSubjectSizeReposAll, repoOwner.ID, repoOwner.Name) {
+ return
+ }
+
+ if !ctx.Doer.IsAdmin {
+ if !repoOwner.IsOrganization() && ctx.Doer.ID != repoOwner.ID {
+ ctx.Error(http.StatusForbidden, "", "Given user is not an organization.")
+ return
+ }
+
+ if repoOwner.IsOrganization() {
+ // Check ownership of organization.
+ isOwner, err := organization.OrgFromUser(repoOwner).IsOwnedBy(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsOwnedBy", err)
+ return
+ } else if !isOwner {
+ ctx.Error(http.StatusForbidden, "", "Given user is not owner of organization.")
+ return
+ }
+ }
+ }
+
+ remoteAddr, err := forms.ParseRemoteAddr(form.CloneAddr, form.AuthUsername, form.AuthPassword)
+ if err == nil {
+ err = migrations.IsMigrateURLAllowed(remoteAddr, ctx.Doer)
+ }
+ if err != nil {
+ handleRemoteAddrError(ctx, err)
+ return
+ }
+
+ gitServiceType := convert.ToGitServiceType(form.Service)
+
+ if form.Mirror && setting.Mirror.DisableNewPull {
+ ctx.Error(http.StatusForbidden, "MirrorsGlobalDisabled", fmt.Errorf("the site administrator has disabled the creation of new pull mirrors"))
+ return
+ }
+
+ if setting.Repository.DisableMigrations {
+ ctx.Error(http.StatusForbidden, "MigrationsGlobalDisabled", fmt.Errorf("the site administrator has disabled migrations"))
+ return
+ }
+
+ form.LFS = form.LFS && setting.LFS.StartServer
+
+ if form.LFS && len(form.LFSEndpoint) > 0 {
+ ep := lfs.DetermineEndpoint("", form.LFSEndpoint)
+ if ep == nil {
+ ctx.Error(http.StatusInternalServerError, "", ctx.Tr("repo.migrate.invalid_lfs_endpoint"))
+ return
+ }
+ err = migrations.IsMigrateURLAllowed(ep.String(), ctx.Doer)
+ if err != nil {
+ handleRemoteAddrError(ctx, err)
+ return
+ }
+ }
+
+ opts := migrations.MigrateOptions{
+ CloneAddr: remoteAddr,
+ RepoName: form.RepoName,
+ Description: form.Description,
+ Private: form.Private || setting.Repository.ForcePrivate,
+ Mirror: form.Mirror,
+ LFS: form.LFS,
+ LFSEndpoint: form.LFSEndpoint,
+ AuthUsername: form.AuthUsername,
+ AuthPassword: form.AuthPassword,
+ AuthToken: form.AuthToken,
+ Wiki: form.Wiki,
+ Issues: form.Issues,
+ Milestones: form.Milestones,
+ Labels: form.Labels,
+ Comments: form.Issues || form.PullRequests,
+ PullRequests: form.PullRequests,
+ Releases: form.Releases,
+ GitServiceType: gitServiceType,
+ MirrorInterval: form.MirrorInterval,
+ }
+ if opts.Mirror {
+ opts.Issues = false
+ opts.Milestones = false
+ opts.Labels = false
+ opts.Comments = false
+ opts.PullRequests = false
+ opts.Releases = false
+ }
+
+ repo, err := repo_service.CreateRepositoryDirectly(ctx, ctx.Doer, repoOwner, repo_service.CreateRepoOptions{
+ Name: opts.RepoName,
+ Description: opts.Description,
+ OriginalURL: form.CloneAddr,
+ GitServiceType: gitServiceType,
+ IsPrivate: opts.Private || setting.Repository.ForcePrivate,
+ IsMirror: opts.Mirror,
+ Status: repo_model.RepositoryBeingMigrated,
+ })
+ if err != nil {
+ handleMigrateError(ctx, repoOwner, err)
+ return
+ }
+
+ opts.MigrateToRepoID = repo.ID
+
+ defer func() {
+ if e := recover(); e != nil {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Handler crashed with error: %v", log.Stack(2))
+
+ err = errors.New(buf.String())
+ }
+
+ if err == nil {
+ notify_service.MigrateRepository(ctx, ctx.Doer, repoOwner, repo)
+ return
+ }
+
+ if repo != nil {
+ if errDelete := repo_service.DeleteRepositoryDirectly(ctx, ctx.Doer, repo.ID); errDelete != nil {
+ log.Error("DeleteRepository: %v", errDelete)
+ }
+ }
+ }()
+
+ if repo, err = migrations.MigrateRepository(graceful.GetManager().HammerContext(), ctx.Doer, repoOwner.Name, opts, nil); err != nil {
+ handleMigrateError(ctx, repoOwner, err)
+ return
+ }
+
+ log.Trace("Repository migrated: %s/%s", repoOwner.Name, form.RepoName)
+ ctx.JSON(http.StatusCreated, convert.ToRepo(ctx, repo, access_model.Permission{AccessMode: perm.AccessModeAdmin}))
+}
+
+func handleMigrateError(ctx *context.APIContext, repoOwner *user_model.User, err error) {
+ switch {
+ case repo_model.IsErrRepoAlreadyExist(err):
+ ctx.Error(http.StatusConflict, "", "The repository with the same name already exists.")
+ case repo_model.IsErrRepoFilesAlreadyExist(err):
+ ctx.Error(http.StatusConflict, "", "Files already exist for this repository. Adopt them or delete them.")
+ case migrations.IsRateLimitError(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", "Remote visit addressed rate limitation.")
+ case migrations.IsTwoFactorAuthError(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", "Remote visit required two factors authentication.")
+ case repo_model.IsErrReachLimitOfRepo(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("You have already reached your limit of %d repositories.", repoOwner.MaxCreationLimit()))
+ case db.IsErrNameReserved(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("The username '%s' is reserved.", err.(db.ErrNameReserved).Name))
+ case db.IsErrNameCharsNotAllowed(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("The username '%s' contains invalid characters.", err.(db.ErrNameCharsNotAllowed).Name))
+ case db.IsErrNamePatternNotAllowed(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("The pattern '%s' is not allowed in a username.", err.(db.ErrNamePatternNotAllowed).Pattern))
+ case models.IsErrInvalidCloneAddr(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ case base.IsErrNotSupported(err):
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ default:
+ err = util.SanitizeErrorCredentialURLs(err)
+ if strings.Contains(err.Error(), "Authentication failed") ||
+ strings.Contains(err.Error(), "Bad credentials") ||
+ strings.Contains(err.Error(), "could not read Username") {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("Authentication failed: %v.", err))
+ } else if strings.Contains(err.Error(), "fatal:") {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("Migration failed: %v.", err))
+ } else {
+ ctx.Error(http.StatusInternalServerError, "MigrateRepository", err)
+ }
+ }
+}
+
+func handleRemoteAddrError(ctx *context.APIContext, err error) {
+ if models.IsErrInvalidCloneAddr(err) {
+ addrErr := err.(*models.ErrInvalidCloneAddr)
+ switch {
+ case addrErr.IsURLError:
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ case addrErr.IsPermissionDenied:
+ if addrErr.LocalPath {
+ ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import local repositories.")
+ } else {
+ ctx.Error(http.StatusUnprocessableEntity, "", "You can not import from disallowed hosts.")
+ }
+ case addrErr.IsInvalidPath:
+ ctx.Error(http.StatusUnprocessableEntity, "", "Invalid local path, it does not exist or not a directory.")
+ default:
+ ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", "Unknown error type (ErrInvalidCloneAddr): "+err.Error())
+ }
+ } else {
+ ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", err)
+ }
+}
diff --git a/routers/api/v1/repo/milestone.go b/routers/api/v1/repo/milestone.go
new file mode 100644
index 0000000..b953401
--- /dev/null
+++ b/routers/api/v1/repo/milestone.go
@@ -0,0 +1,309 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/modules/optional"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListMilestones list milestones for a repository
+func ListMilestones(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/milestones issue issueGetMilestonesList
+ // ---
+ // summary: Get all of a repository's opened milestones
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: state
+ // in: query
+ // description: Milestone state, Recognized values are open, closed and all. Defaults to "open"
+ // type: string
+ // - name: name
+ // in: query
+ // description: filter by milestone name
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/MilestoneList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ state := api.StateType(ctx.FormString("state"))
+ var isClosed optional.Option[bool]
+ switch state {
+ case api.StateClosed, api.StateOpen:
+ isClosed = optional.Some(state == api.StateClosed)
+ }
+
+ milestones, total, err := db.FindAndCount[issues_model.Milestone](ctx, issues_model.FindMilestoneOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepoID: ctx.Repo.Repository.ID,
+ IsClosed: isClosed,
+ Name: ctx.FormString("name"),
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "db.FindAndCount[issues_model.Milestone]", err)
+ return
+ }
+
+ apiMilestones := make([]*api.Milestone, len(milestones))
+ for i := range milestones {
+ apiMilestones[i] = convert.ToAPIMilestone(milestones[i])
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, &apiMilestones)
+}
+
+// GetMilestone get a milestone for a repository by ID and if not available by name
+func GetMilestone(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/milestones/{id} issue issueGetMilestone
+ // ---
+ // summary: Get a milestone
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: the milestone to get, identified by ID and if not available by name
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Milestone"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ milestone := getMilestoneByIDOrName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAPIMilestone(milestone))
+}
+
+// CreateMilestone create a milestone for a repository
+func CreateMilestone(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/milestones issue issueCreateMilestone
+ // ---
+ // summary: Create a milestone
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateMilestoneOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Milestone"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.CreateMilestoneOption)
+
+ if form.Deadline == nil {
+ defaultDeadline, _ := time.ParseInLocation("2006-01-02", "9999-12-31", time.Local)
+ form.Deadline = &defaultDeadline
+ }
+
+ milestone := &issues_model.Milestone{
+ RepoID: ctx.Repo.Repository.ID,
+ Name: form.Title,
+ Content: form.Description,
+ DeadlineUnix: timeutil.TimeStamp(form.Deadline.Unix()),
+ }
+
+ if form.State == "closed" {
+ milestone.IsClosed = true
+ milestone.ClosedDateUnix = timeutil.TimeStampNow()
+ }
+
+ if err := issues_model.NewMilestone(ctx, milestone); err != nil {
+ ctx.Error(http.StatusInternalServerError, "NewMilestone", err)
+ return
+ }
+ ctx.JSON(http.StatusCreated, convert.ToAPIMilestone(milestone))
+}
+
+// EditMilestone modify a milestone for a repository by ID and if not available by name
+func EditMilestone(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/milestones/{id} issue issueEditMilestone
+ // ---
+ // summary: Update a milestone
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: the milestone to edit, identified by ID and if not available by name
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditMilestoneOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Milestone"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ form := web.GetForm(ctx).(*api.EditMilestoneOption)
+ milestone := getMilestoneByIDOrName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ if len(form.Title) > 0 {
+ milestone.Name = form.Title
+ }
+ if form.Description != nil {
+ milestone.Content = *form.Description
+ }
+ if form.Deadline != nil && !form.Deadline.IsZero() {
+ milestone.DeadlineUnix = timeutil.TimeStamp(form.Deadline.Unix())
+ }
+
+ oldIsClosed := milestone.IsClosed
+ if form.State != nil {
+ milestone.IsClosed = *form.State == string(api.StateClosed)
+ }
+
+ if err := issues_model.UpdateMilestone(ctx, milestone, oldIsClosed); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateMilestone", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIMilestone(milestone))
+}
+
+// DeleteMilestone delete a milestone for a repository by ID and if not available by name
+func DeleteMilestone(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/milestones/{id} issue issueDeleteMilestone
+ // ---
+ // summary: Delete a milestone
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: the milestone to delete, identified by ID and if not available by name
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ m := getMilestoneByIDOrName(ctx)
+ if ctx.Written() {
+ return
+ }
+
+ if err := issues_model.DeleteMilestoneByRepoID(ctx, ctx.Repo.Repository.ID, m.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteMilestoneByRepoID", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// getMilestoneByIDOrName get milestone by ID and if not available by name
+func getMilestoneByIDOrName(ctx *context.APIContext) *issues_model.Milestone {
+ mile := ctx.Params(":id")
+ mileID, _ := strconv.ParseInt(mile, 0, 64)
+
+ if mileID != 0 {
+ milestone, err := issues_model.GetMilestoneByRepoID(ctx, ctx.Repo.Repository.ID, mileID)
+ if err == nil {
+ return milestone
+ } else if !issues_model.IsErrMilestoneNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetMilestoneByRepoID", err)
+ return nil
+ }
+ }
+
+ milestone, err := issues_model.GetMilestoneByRepoIDANDName(ctx, ctx.Repo.Repository.ID, mile)
+ if err != nil {
+ if issues_model.IsErrMilestoneNotExist(err) {
+ ctx.NotFound()
+ return nil
+ }
+ ctx.Error(http.StatusInternalServerError, "GetMilestoneByRepoID", err)
+ return nil
+ }
+
+ return milestone
+}
diff --git a/routers/api/v1/repo/mirror.go b/routers/api/v1/repo/mirror.go
new file mode 100644
index 0000000..ae727fd
--- /dev/null
+++ b/routers/api/v1/repo/mirror.go
@@ -0,0 +1,449 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ "code.gitea.io/gitea/services/forms"
+ "code.gitea.io/gitea/services/migrations"
+ mirror_service "code.gitea.io/gitea/services/mirror"
+)
+
+// MirrorSync adds a mirrored repository to the sync queue
+func MirrorSync(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/mirror-sync repository repoMirrorSync
+ // ---
+ // summary: Sync a mirrored repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to sync
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to sync
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+
+ repo := ctx.Repo.Repository
+
+ if !ctx.Repo.CanWrite(unit.TypeCode) {
+ ctx.Error(http.StatusForbidden, "MirrorSync", "Must have write access")
+ }
+
+ if !setting.Mirror.Enabled {
+ ctx.Error(http.StatusBadRequest, "MirrorSync", "Mirror feature is disabled")
+ return
+ }
+
+ if _, err := repo_model.GetMirrorByRepoID(ctx, repo.ID); err != nil {
+ if errors.Is(err, repo_model.ErrMirrorNotExist) {
+ ctx.Error(http.StatusBadRequest, "MirrorSync", "Repository is not a mirror")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "MirrorSync", err)
+ return
+ }
+
+ mirror_service.AddPullMirrorToQueue(repo.ID)
+
+ ctx.Status(http.StatusOK)
+}
+
+// PushMirrorSync adds all push mirrored repositories to the sync queue
+func PushMirrorSync(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/push_mirrors-sync repository repoPushMirrorSync
+ // ---
+ // summary: Sync all push mirrored repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to sync
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to sync
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/empty"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+
+ if !setting.Mirror.Enabled {
+ ctx.Error(http.StatusBadRequest, "PushMirrorSync", "Mirror feature is disabled")
+ return
+ }
+ // Get All push mirrors of a specific repo
+ pushMirrors, _, err := repo_model.GetPushMirrorsByRepoID(ctx, ctx.Repo.Repository.ID, db.ListOptions{})
+ if err != nil {
+ ctx.Error(http.StatusNotFound, "PushMirrorSync", err)
+ return
+ }
+ for _, mirror := range pushMirrors {
+ ok := mirror_service.SyncPushMirror(ctx, mirror.ID)
+ if !ok {
+ ctx.Error(http.StatusInternalServerError, "PushMirrorSync", "error occurred when syncing push mirror "+mirror.RemoteName)
+ return
+ }
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+// ListPushMirrors get list of push mirrors of a repository
+func ListPushMirrors(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/push_mirrors repository repoListPushMirrors
+ // ---
+ // summary: Get all push mirrors of the repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PushMirrorList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !setting.Mirror.Enabled {
+ ctx.Error(http.StatusBadRequest, "GetPushMirrorsByRepoID", "Mirror feature is disabled")
+ return
+ }
+
+ repo := ctx.Repo.Repository
+ // Get all push mirrors for the specified repository.
+ pushMirrors, count, err := repo_model.GetPushMirrorsByRepoID(ctx, repo.ID, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusNotFound, "GetPushMirrorsByRepoID", err)
+ return
+ }
+
+ responsePushMirrors := make([]*api.PushMirror, 0, len(pushMirrors))
+ for _, mirror := range pushMirrors {
+ m, err := convert.ToPushMirror(ctx, mirror)
+ if err == nil {
+ responsePushMirrors = append(responsePushMirrors, m)
+ }
+ }
+ ctx.SetLinkHeader(len(responsePushMirrors), utils.GetListOptions(ctx).PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, responsePushMirrors)
+}
+
+// GetPushMirrorByName get push mirror of a repository by name
+func GetPushMirrorByName(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/push_mirrors/{name} repository repoGetPushMirrorByRemoteName
+ // ---
+ // summary: Get push mirror of the repository by remoteName
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: remote name of push mirror
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PushMirror"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !setting.Mirror.Enabled {
+ ctx.Error(http.StatusBadRequest, "GetPushMirrorByRemoteName", "Mirror feature is disabled")
+ return
+ }
+
+ mirrorName := ctx.Params(":name")
+ // Get push mirror of a specific repo by remoteName
+ pushMirror, exist, err := db.Get[repo_model.PushMirror](ctx, repo_model.PushMirrorOptions{
+ RepoID: ctx.Repo.Repository.ID,
+ RemoteName: mirrorName,
+ }.ToConds())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetPushMirrors", err)
+ return
+ } else if !exist {
+ ctx.Error(http.StatusNotFound, "GetPushMirrors", nil)
+ return
+ }
+
+ m, err := convert.ToPushMirror(ctx, pushMirror)
+ if err != nil {
+ ctx.ServerError("GetPushMirrorByRemoteName", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, m)
+}
+
+// AddPushMirror adds a push mirror to a repository
+func AddPushMirror(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/push_mirrors repository repoAddPushMirror
+ // ---
+ // summary: add a push mirror to the repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreatePushMirrorOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PushMirror"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+
+ if !setting.Mirror.Enabled {
+ ctx.Error(http.StatusBadRequest, "AddPushMirror", "Mirror feature is disabled")
+ return
+ }
+
+ pushMirror := web.GetForm(ctx).(*api.CreatePushMirrorOption)
+ CreatePushMirror(ctx, pushMirror)
+}
+
+// DeletePushMirrorByRemoteName deletes a push mirror from a repository by remoteName
+func DeletePushMirrorByRemoteName(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/push_mirrors/{name} repository repoDeletePushMirror
+ // ---
+ // summary: deletes a push mirror from a repository by remoteName
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: name
+ // in: path
+ // description: remote name of the pushMirror
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "400":
+ // "$ref": "#/responses/error"
+
+ if !setting.Mirror.Enabled {
+ ctx.Error(http.StatusBadRequest, "DeletePushMirrorByName", "Mirror feature is disabled")
+ return
+ }
+
+ remoteName := ctx.Params(":name")
+ // Delete push mirror on repo by name.
+ err := repo_model.DeletePushMirrors(ctx, repo_model.PushMirrorOptions{RepoID: ctx.Repo.Repository.ID, RemoteName: remoteName})
+ if err != nil {
+ ctx.Error(http.StatusNotFound, "DeletePushMirrors", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+func CreatePushMirror(ctx *context.APIContext, mirrorOption *api.CreatePushMirrorOption) {
+ repo := ctx.Repo.Repository
+
+ interval, err := time.ParseDuration(mirrorOption.Interval)
+ if err != nil || (interval != 0 && interval < setting.Mirror.MinInterval) {
+ ctx.Error(http.StatusBadRequest, "CreatePushMirror", err)
+ return
+ }
+
+ if mirrorOption.UseSSH && !git.HasSSHExecutable {
+ ctx.Error(http.StatusBadRequest, "CreatePushMirror", "SSH authentication not available.")
+ return
+ }
+
+ if mirrorOption.UseSSH && (mirrorOption.RemoteUsername != "" || mirrorOption.RemotePassword != "") {
+ ctx.Error(http.StatusBadRequest, "CreatePushMirror", "'use_ssh' is mutually exclusive with 'remote_username' and 'remote_passoword'")
+ return
+ }
+
+ address, err := forms.ParseRemoteAddr(mirrorOption.RemoteAddress, mirrorOption.RemoteUsername, mirrorOption.RemotePassword)
+ if err == nil {
+ err = migrations.IsMigrateURLAllowed(address, ctx.ContextUser)
+ }
+ if err != nil {
+ HandleRemoteAddressError(ctx, err)
+ return
+ }
+
+ remoteSuffix, err := util.CryptoRandomString(10)
+ if err != nil {
+ ctx.ServerError("CryptoRandomString", err)
+ return
+ }
+
+ remoteAddress, err := util.SanitizeURL(address)
+ if err != nil {
+ ctx.ServerError("SanitizeURL", err)
+ return
+ }
+
+ pushMirror := &repo_model.PushMirror{
+ RepoID: repo.ID,
+ Repo: repo,
+ RemoteName: fmt.Sprintf("remote_mirror_%s", remoteSuffix),
+ Interval: interval,
+ SyncOnCommit: mirrorOption.SyncOnCommit,
+ RemoteAddress: remoteAddress,
+ }
+
+ var plainPrivateKey []byte
+ if mirrorOption.UseSSH {
+ publicKey, privateKey, err := util.GenerateSSHKeypair()
+ if err != nil {
+ ctx.ServerError("GenerateSSHKeypair", err)
+ return
+ }
+ plainPrivateKey = privateKey
+ pushMirror.PublicKey = string(publicKey)
+ }
+
+ if err = db.Insert(ctx, pushMirror); err != nil {
+ ctx.ServerError("InsertPushMirror", err)
+ return
+ }
+
+ if mirrorOption.UseSSH {
+ if err = pushMirror.SetPrivatekey(ctx, plainPrivateKey); err != nil {
+ ctx.ServerError("SetPrivatekey", err)
+ return
+ }
+ }
+
+ // if the registration of the push mirrorOption fails remove it from the database
+ if err = mirror_service.AddPushMirrorRemote(ctx, pushMirror, address); err != nil {
+ if err := repo_model.DeletePushMirrors(ctx, repo_model.PushMirrorOptions{ID: pushMirror.ID, RepoID: pushMirror.RepoID}); err != nil {
+ ctx.ServerError("DeletePushMirrors", err)
+ return
+ }
+ ctx.ServerError("AddPushMirrorRemote", err)
+ return
+ }
+ m, err := convert.ToPushMirror(ctx, pushMirror)
+ if err != nil {
+ ctx.ServerError("ToPushMirror", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, m)
+}
+
+func HandleRemoteAddressError(ctx *context.APIContext, err error) {
+ if models.IsErrInvalidCloneAddr(err) {
+ addrErr := err.(*models.ErrInvalidCloneAddr)
+ switch {
+ case addrErr.IsProtocolInvalid:
+ ctx.Error(http.StatusBadRequest, "CreatePushMirror", "Invalid mirror protocol")
+ case addrErr.IsURLError:
+ ctx.Error(http.StatusBadRequest, "CreatePushMirror", "Invalid Url ")
+ case addrErr.IsPermissionDenied:
+ ctx.Error(http.StatusUnauthorized, "CreatePushMirror", "Permission denied")
+ default:
+ ctx.Error(http.StatusBadRequest, "CreatePushMirror", "Unknown error")
+ }
+ return
+ }
+}
diff --git a/routers/api/v1/repo/notes.go b/routers/api/v1/repo/notes.go
new file mode 100644
index 0000000..a4a1d4e
--- /dev/null
+++ b/routers/api/v1/repo/notes.go
@@ -0,0 +1,104 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/modules/git"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// GetNote Get a note corresponding to a single commit from a repository
+func GetNote(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/notes/{sha} repository repoGetNote
+ // ---
+ // summary: Get a note corresponding to a single commit from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: a git ref or commit sha
+ // type: string
+ // required: true
+ // - name: verification
+ // in: query
+ // description: include verification for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // - name: files
+ // in: query
+ // description: include a list of affected files for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Note"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ sha := ctx.Params(":sha")
+ if !git.IsValidRefPattern(sha) {
+ ctx.Error(http.StatusUnprocessableEntity, "no valid ref or sha", fmt.Sprintf("no valid ref or sha: %s", sha))
+ return
+ }
+ getNote(ctx, sha)
+}
+
+func getNote(ctx *context.APIContext, identifier string) {
+ if ctx.Repo.GitRepo == nil {
+ ctx.InternalServerError(fmt.Errorf("no open git repo"))
+ return
+ }
+
+ commitID, err := ctx.Repo.GitRepo.ConvertToGitID(identifier)
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "ConvertToSHA1", err)
+ }
+ return
+ }
+
+ var note git.Note
+ if err := git.GetNote(ctx, ctx.Repo.GitRepo, commitID.String(), &note); err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound(identifier)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetNote", err)
+ return
+ }
+
+ verification := ctx.FormString("verification") == "" || ctx.FormBool("verification")
+ files := ctx.FormString("files") == "" || ctx.FormBool("files")
+
+ cmt, err := convert.ToCommit(ctx, ctx.Repo.Repository, ctx.Repo.GitRepo, note.Commit, nil,
+ convert.ToCommitOptions{
+ Stat: true,
+ Verification: verification,
+ Files: files,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ToCommit", err)
+ return
+ }
+ apiNote := api.Note{Message: string(note.Message), Commit: cmt}
+ ctx.JSON(http.StatusOK, apiNote)
+}
diff --git a/routers/api/v1/repo/patch.go b/routers/api/v1/repo/patch.go
new file mode 100644
index 0000000..27c5c17
--- /dev/null
+++ b/routers/api/v1/repo/patch.go
@@ -0,0 +1,114 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "time"
+
+ "code.gitea.io/gitea/models"
+ git_model "code.gitea.io/gitea/models/git"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/git"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/repository/files"
+)
+
+// ApplyDiffPatch handles API call for applying a patch
+func ApplyDiffPatch(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/diffpatch repository repoApplyDiffPatch
+ // ---
+ // summary: Apply diff patch to repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/UpdateFileOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/FileResponse"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ apiOpts := web.GetForm(ctx).(*api.ApplyDiffPatchFileOptions)
+
+ opts := &files.ApplyDiffPatchOptions{
+ Content: apiOpts.Content,
+ SHA: apiOpts.SHA,
+ Message: apiOpts.Message,
+ OldBranch: apiOpts.BranchName,
+ NewBranch: apiOpts.NewBranchName,
+ Committer: &files.IdentityOptions{
+ Name: apiOpts.Committer.Name,
+ Email: apiOpts.Committer.Email,
+ },
+ Author: &files.IdentityOptions{
+ Name: apiOpts.Author.Name,
+ Email: apiOpts.Author.Email,
+ },
+ Dates: &files.CommitDateOptions{
+ Author: apiOpts.Dates.Author,
+ Committer: apiOpts.Dates.Committer,
+ },
+ Signoff: apiOpts.Signoff,
+ }
+ if opts.Dates.Author.IsZero() {
+ opts.Dates.Author = time.Now()
+ }
+ if opts.Dates.Committer.IsZero() {
+ opts.Dates.Committer = time.Now()
+ }
+
+ if opts.Message == "" {
+ opts.Message = "apply-patch"
+ }
+
+ if !canWriteFiles(ctx, apiOpts.BranchName) {
+ ctx.Error(http.StatusInternalServerError, "ApplyPatch", repo_model.ErrUserDoesNotHaveAccessToRepo{
+ UserID: ctx.Doer.ID,
+ RepoName: ctx.Repo.Repository.LowerName,
+ })
+ return
+ }
+
+ fileResponse, err := files.ApplyDiffPatch(ctx, ctx.Repo.Repository, ctx.Doer, opts)
+ if err != nil {
+ if models.IsErrUserCannotCommit(err) || models.IsErrFilePathProtected(err) {
+ ctx.Error(http.StatusForbidden, "Access", err)
+ return
+ }
+ if git_model.IsErrBranchAlreadyExists(err) || models.IsErrFilenameInvalid(err) || models.IsErrSHADoesNotMatch(err) ||
+ models.IsErrFilePathInvalid(err) || models.IsErrRepoFileAlreadyExists(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Invalid", err)
+ return
+ }
+ if git_model.IsErrBranchNotExist(err) || git.IsErrBranchNotExist(err) {
+ ctx.Error(http.StatusNotFound, "BranchDoesNotExist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "ApplyPatch", err)
+ } else {
+ ctx.JSON(http.StatusCreated, fileResponse)
+ }
+}
diff --git a/routers/api/v1/repo/pull.go b/routers/api/v1/repo/pull.go
new file mode 100644
index 0000000..fcca180
--- /dev/null
+++ b/routers/api/v1/repo/pull.go
@@ -0,0 +1,1648 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models"
+ activities_model "code.gitea.io/gitea/models/activities"
+ git_model "code.gitea.io/gitea/models/git"
+ issues_model "code.gitea.io/gitea/models/issues"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ pull_model "code.gitea.io/gitea/models/pull"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ asymkey_service "code.gitea.io/gitea/services/asymkey"
+ "code.gitea.io/gitea/services/automerge"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ "code.gitea.io/gitea/services/forms"
+ "code.gitea.io/gitea/services/gitdiff"
+ issue_service "code.gitea.io/gitea/services/issue"
+ notify_service "code.gitea.io/gitea/services/notify"
+ pull_service "code.gitea.io/gitea/services/pull"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// ListPullRequests returns a list of all PRs
+func ListPullRequests(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls repository repoListPullRequests
+ // ---
+ // summary: List a repo's pull requests
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: state
+ // in: query
+ // description: "State of pull request: open or closed (optional)"
+ // type: string
+ // enum: [closed, open, all]
+ // - name: sort
+ // in: query
+ // description: "Type of sort"
+ // type: string
+ // enum: [oldest, recentupdate, leastupdate, mostcomment, leastcomment, priority]
+ // - name: milestone
+ // in: query
+ // description: "ID of the milestone"
+ // type: integer
+ // format: int64
+ // - name: labels
+ // in: query
+ // description: "Label IDs"
+ // type: array
+ // collectionFormat: multi
+ // items:
+ // type: integer
+ // format: int64
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullRequestList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ labelIDs, err := base.StringsToInt64s(ctx.FormStrings("labels"))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "PullRequests", err)
+ return
+ }
+ listOptions := utils.GetListOptions(ctx)
+ prs, maxResults, err := issues_model.PullRequests(ctx, ctx.Repo.Repository.ID, &issues_model.PullRequestsOptions{
+ ListOptions: listOptions,
+ State: ctx.FormTrim("state"),
+ SortType: ctx.FormTrim("sort"),
+ Labels: labelIDs,
+ MilestoneID: ctx.FormInt64("milestone"),
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "PullRequests", err)
+ return
+ }
+
+ apiPrs := make([]*api.PullRequest, len(prs))
+ // NOTE: load repository first, so that issue.Repo will be filled with pr.BaseRepo
+ if err := prs.LoadRepositories(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadRepositories", err)
+ return
+ }
+ issueList, err := prs.LoadIssues(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssues", err)
+ return
+ }
+
+ if err := issueList.LoadLabels(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadLabels", err)
+ return
+ }
+ if err := issueList.LoadPosters(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadPoster", err)
+ return
+ }
+ if err := issueList.LoadAttachments(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttachments", err)
+ return
+ }
+ if err := issueList.LoadMilestones(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadMilestones", err)
+ return
+ }
+ if err := issueList.LoadAssignees(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAssignees", err)
+ return
+ }
+
+ for i := range prs {
+ apiPrs[i] = convert.ToAPIPullRequest(ctx, prs[i], ctx.Doer)
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+ ctx.JSON(http.StatusOK, &apiPrs)
+}
+
+// GetPullRequest returns a single PR based on index
+func GetPullRequest(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index} repository repoGetPullRequest
+ // ---
+ // summary: Get a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullRequest"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if err = pr.LoadBaseRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadBaseRepo", err)
+ return
+ }
+ if err = pr.LoadHeadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadHeadRepo", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIPullRequest(ctx, pr, ctx.Doer))
+}
+
+// GetPullRequest returns a single PR based on index
+func GetPullRequestByBaseHead(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{base}/{head} repository repoGetPullRequestByBaseHead
+ // ---
+ // summary: Get a pull request by base and head
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: base
+ // in: path
+ // description: base of the pull request to get
+ // type: string
+ // required: true
+ // - name: head
+ // in: path
+ // description: head of the pull request to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullRequest"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ var headRepoID int64
+ var headBranch string
+ head := ctx.Params("*")
+ if strings.Contains(head, ":") {
+ split := strings.SplitN(head, ":", 2)
+ headBranch = split[1]
+ var owner, name string
+ if strings.Contains(split[0], "/") {
+ split = strings.Split(split[0], "/")
+ owner = split[0]
+ name = split[1]
+ } else {
+ owner = split[0]
+ name = ctx.Repo.Repository.Name
+ }
+ repo, err := repo_model.GetRepositoryByOwnerAndName(ctx, owner, name)
+ if err != nil {
+ if repo_model.IsErrRepoNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetRepositoryByOwnerName", err)
+ }
+ return
+ }
+ headRepoID = repo.ID
+ } else {
+ headRepoID = ctx.Repo.Repository.ID
+ headBranch = head
+ }
+
+ pr, err := issues_model.GetPullRequestByBaseHeadInfo(ctx, ctx.Repo.Repository.ID, headRepoID, ctx.Params(":base"), headBranch)
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByBaseHeadInfo", err)
+ }
+ return
+ }
+
+ if err = pr.LoadBaseRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadBaseRepo", err)
+ return
+ }
+ if err = pr.LoadHeadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadHeadRepo", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIPullRequest(ctx, pr, ctx.Doer))
+}
+
+// DownloadPullDiffOrPatch render a pull's raw diff or patch
+func DownloadPullDiffOrPatch(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}.{diffType} repository repoDownloadPullDiffOrPatch
+ // ---
+ // summary: Get a pull request diff or patch
+ // produces:
+ // - text/plain
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to get
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: diffType
+ // in: path
+ // description: whether the output is diff or patch
+ // type: string
+ // enum: [diff, patch]
+ // required: true
+ // - name: binary
+ // in: query
+ // description: whether to include binary file changes. if true, the diff is applicable with `git apply`
+ // type: boolean
+ // responses:
+ // "200":
+ // "$ref": "#/responses/string"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.InternalServerError(err)
+ }
+ return
+ }
+ var patch bool
+ if ctx.Params(":diffType") == "diff" {
+ patch = false
+ } else {
+ patch = true
+ }
+
+ binary := ctx.FormBool("binary")
+
+ if err := pull_service.DownloadDiffOrPatch(ctx, pr, ctx, patch, binary); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+}
+
+// CreatePullRequest does what it says
+func CreatePullRequest(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls repository repoCreatePullRequest
+ // ---
+ // summary: Create a pull request
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreatePullRequestOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/PullRequest"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ form := *web.GetForm(ctx).(*api.CreatePullRequestOption)
+ if form.Head == form.Base {
+ ctx.Error(http.StatusUnprocessableEntity, "BaseHeadSame",
+ "Invalid PullRequest: There are no changes between the head and the base")
+ return
+ }
+
+ var (
+ repo = ctx.Repo.Repository
+ labelIDs []int64
+ milestoneID int64
+ )
+
+ // Get repo/branch information
+ headRepo, headGitRepo, compareInfo, baseBranch, headBranch := parseCompareInfo(ctx, form)
+ if ctx.Written() {
+ return
+ }
+ defer headGitRepo.Close()
+
+ // Check if another PR exists with the same targets
+ existingPr, err := issues_model.GetUnmergedPullRequest(ctx, headRepo.ID, ctx.Repo.Repository.ID, headBranch, baseBranch, issues_model.PullRequestFlowGithub)
+ if err != nil {
+ if !issues_model.IsErrPullRequestNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetUnmergedPullRequest", err)
+ return
+ }
+ } else {
+ err = issues_model.ErrPullRequestAlreadyExists{
+ ID: existingPr.ID,
+ IssueID: existingPr.Index,
+ HeadRepoID: existingPr.HeadRepoID,
+ BaseRepoID: existingPr.BaseRepoID,
+ HeadBranch: existingPr.HeadBranch,
+ BaseBranch: existingPr.BaseBranch,
+ }
+ ctx.Error(http.StatusConflict, "GetUnmergedPullRequest", err)
+ return
+ }
+
+ if len(form.Labels) > 0 {
+ labels, err := issues_model.GetLabelsInRepoByIDs(ctx, ctx.Repo.Repository.ID, form.Labels)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsInRepoByIDs", err)
+ return
+ }
+
+ labelIDs = make([]int64, 0, len(labels))
+ for _, label := range labels {
+ labelIDs = append(labelIDs, label.ID)
+ }
+
+ if ctx.Repo.Owner.IsOrganization() {
+ orgLabels, err := issues_model.GetLabelsInOrgByIDs(ctx, ctx.Repo.Owner.ID, form.Labels)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsInOrgByIDs", err)
+ return
+ }
+
+ orgLabelIDs := make([]int64, 0, len(orgLabels))
+ for _, orgLabel := range orgLabels {
+ orgLabelIDs = append(orgLabelIDs, orgLabel.ID)
+ }
+ labelIDs = append(labelIDs, orgLabelIDs...)
+ }
+ }
+
+ if form.Milestone > 0 {
+ milestone, err := issues_model.GetMilestoneByRepoID(ctx, ctx.Repo.Repository.ID, form.Milestone)
+ if err != nil {
+ if issues_model.IsErrMilestoneNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetMilestoneByRepoID", err)
+ }
+ return
+ }
+
+ milestoneID = milestone.ID
+ }
+
+ var deadlineUnix timeutil.TimeStamp
+ if form.Deadline != nil {
+ deadlineUnix = timeutil.TimeStamp(form.Deadline.Unix())
+ }
+
+ prIssue := &issues_model.Issue{
+ RepoID: repo.ID,
+ Title: form.Title,
+ PosterID: ctx.Doer.ID,
+ Poster: ctx.Doer,
+ MilestoneID: milestoneID,
+ IsPull: true,
+ Content: form.Body,
+ DeadlineUnix: deadlineUnix,
+ }
+ pr := &issues_model.PullRequest{
+ HeadRepoID: headRepo.ID,
+ BaseRepoID: repo.ID,
+ HeadBranch: headBranch,
+ BaseBranch: baseBranch,
+ HeadRepo: headRepo,
+ BaseRepo: repo,
+ MergeBase: compareInfo.MergeBase,
+ Type: issues_model.PullRequestGitea,
+ }
+
+ // Get all assignee IDs
+ assigneeIDs, err := issues_model.MakeIDsFromAPIAssigneesToAdd(ctx, form.Assignee, form.Assignees)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("Assignee does not exist: [name: %s]", err))
+ } else {
+ ctx.Error(http.StatusInternalServerError, "AddAssigneeByName", err)
+ }
+ return
+ }
+ // Check if the passed assignees is assignable
+ for _, aID := range assigneeIDs {
+ assignee, err := user_model.GetUserByID(ctx, aID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserByID", err)
+ return
+ }
+
+ valid, err := access_model.CanBeAssigned(ctx, assignee, repo, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "canBeAssigned", err)
+ return
+ }
+ if !valid {
+ ctx.Error(http.StatusUnprocessableEntity, "canBeAssigned", repo_model.ErrUserDoesNotHaveAccessToRepo{UserID: aID, RepoName: repo.Name})
+ return
+ }
+ }
+
+ if err := pull_service.NewPullRequest(ctx, repo, prIssue, labelIDs, []string{}, pr, assigneeIDs); err != nil {
+ if errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, "BlockedByUser", err)
+ return
+ } else if repo_model.IsErrUserDoesNotHaveAccessToRepo(err) {
+ ctx.Error(http.StatusBadRequest, "UserDoesNotHaveAccessToRepo", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "NewPullRequest", err)
+ return
+ }
+
+ log.Trace("Pull request created: %d/%d", repo.ID, prIssue.ID)
+ ctx.JSON(http.StatusCreated, convert.ToAPIPullRequest(ctx, pr, ctx.Doer))
+}
+
+// EditPullRequest does what it says
+func EditPullRequest(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/pulls/{index} repository repoEditPullRequest
+ // ---
+ // summary: Update a pull request. If using deadline only the date will be taken into account, and time of day ignored.
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditPullRequestOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/PullRequest"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "412":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.EditPullRequestOption)
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ err = pr.LoadIssue(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssue", err)
+ return
+ }
+ issue := pr.Issue
+ issue.Repo = ctx.Repo.Repository
+
+ if err := issue.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+
+ if !issue.IsPoster(ctx.Doer.ID) && !ctx.Repo.CanWrite(unit.TypePullRequests) {
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ if len(form.Title) > 0 {
+ err = issue_service.ChangeTitle(ctx, issue, ctx.Doer, form.Title)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeTitle", err)
+ return
+ }
+ }
+ if form.Body != nil {
+ err = issue_service.ChangeContent(ctx, issue, ctx.Doer, *form.Body, issue.ContentVersion)
+ if err != nil {
+ if errors.Is(err, issues_model.ErrIssueAlreadyChanged) {
+ ctx.Error(http.StatusBadRequest, "ChangeContent", err)
+ return
+ }
+
+ ctx.Error(http.StatusInternalServerError, "ChangeContent", err)
+ return
+ }
+ }
+
+ // Update or remove deadline if set
+ if form.Deadline != nil || form.RemoveDeadline != nil {
+ var deadlineUnix timeutil.TimeStamp
+ if (form.RemoveDeadline == nil || !*form.RemoveDeadline) && !form.Deadline.IsZero() {
+ deadline := time.Date(form.Deadline.Year(), form.Deadline.Month(), form.Deadline.Day(),
+ 23, 59, 59, 0, form.Deadline.Location())
+ deadlineUnix = timeutil.TimeStamp(deadline.Unix())
+ }
+
+ if err := issues_model.UpdateIssueDeadline(ctx, issue, deadlineUnix, ctx.Doer); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateIssueDeadline", err)
+ return
+ }
+ issue.DeadlineUnix = deadlineUnix
+ }
+
+ // Add/delete assignees
+
+ // Deleting is done the GitHub way (quote from their api documentation):
+ // https://developer.github.com/v3/issues/#edit-an-issue
+ // "assignees" (array): Logins for Users to assign to this issue.
+ // Pass one or more user logins to replace the set of assignees on this Issue.
+ // Send an empty array ([]) to clear all assignees from the Issue.
+
+ if ctx.Repo.CanWrite(unit.TypePullRequests) && (form.Assignees != nil || len(form.Assignee) > 0) {
+ err = issue_service.UpdateAssignees(ctx, issue, form.Assignee, form.Assignees, ctx.Doer)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("Assignee does not exist: [name: %s]", err))
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UpdateAssignees", err)
+ }
+ return
+ }
+ }
+
+ if ctx.Repo.CanWrite(unit.TypePullRequests) && form.Milestone != 0 &&
+ issue.MilestoneID != form.Milestone {
+ oldMilestoneID := issue.MilestoneID
+ issue.MilestoneID = form.Milestone
+ if err = issue_service.ChangeMilestoneAssign(ctx, issue, ctx.Doer, oldMilestoneID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ChangeMilestoneAssign", err)
+ return
+ }
+ }
+
+ if ctx.Repo.CanWrite(unit.TypePullRequests) && form.Labels != nil {
+ labels, err := issues_model.GetLabelsInRepoByIDs(ctx, ctx.Repo.Repository.ID, form.Labels)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsInRepoByIDsError", err)
+ return
+ }
+
+ if ctx.Repo.Owner.IsOrganization() {
+ orgLabels, err := issues_model.GetLabelsInOrgByIDs(ctx, ctx.Repo.Owner.ID, form.Labels)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLabelsInOrgByIDs", err)
+ return
+ }
+
+ labels = append(labels, orgLabels...)
+ }
+
+ if err = issues_model.ReplaceIssueLabels(ctx, issue, labels, ctx.Doer); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ReplaceLabelsError", err)
+ return
+ }
+ }
+
+ if form.State != nil {
+ if pr.HasMerged {
+ ctx.Error(http.StatusPreconditionFailed, "MergedPRState", "cannot change state of this pull request, it was already merged")
+ return
+ }
+ isClosed := api.StateClosed == api.StateType(*form.State)
+ if issue.IsClosed != isClosed {
+ if err := issue_service.ChangeStatus(ctx, issue, ctx.Doer, "", isClosed); err != nil {
+ if issues_model.IsErrDependenciesLeft(err) {
+ ctx.Error(http.StatusPreconditionFailed, "DependenciesLeft", "cannot close this pull request because it still has open dependencies")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "ChangeStatus", err)
+ return
+ }
+ }
+ }
+
+ // change pull target branch
+ if !pr.HasMerged && len(form.Base) != 0 && form.Base != pr.BaseBranch {
+ if !ctx.Repo.GitRepo.IsBranchExist(form.Base) {
+ ctx.Error(http.StatusNotFound, "NewBaseBranchNotExist", fmt.Errorf("new base '%s' not exist", form.Base))
+ return
+ }
+ if err := pull_service.ChangeTargetBranch(ctx, pr, ctx.Doer, form.Base); err != nil {
+ if issues_model.IsErrPullRequestAlreadyExists(err) {
+ ctx.Error(http.StatusConflict, "IsErrPullRequestAlreadyExists", err)
+ return
+ } else if issues_model.IsErrIssueIsClosed(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "IsErrIssueIsClosed", err)
+ return
+ } else if models.IsErrPullRequestHasMerged(err) {
+ ctx.Error(http.StatusConflict, "IsErrPullRequestHasMerged", err)
+ return
+ }
+ ctx.InternalServerError(err)
+ return
+ }
+ notify_service.PullRequestChangeTargetBranch(ctx, ctx.Doer, pr, form.Base)
+ }
+
+ // update allow edits
+ if form.AllowMaintainerEdit != nil {
+ if err := pull_service.SetAllowEdits(ctx, ctx.Doer, pr, *form.AllowMaintainerEdit); err != nil {
+ if errors.Is(err, pull_service.ErrUserHasNoPermissionForAction) {
+ ctx.Error(http.StatusForbidden, "SetAllowEdits", fmt.Sprintf("SetAllowEdits: %s", err))
+ return
+ }
+ ctx.ServerError("SetAllowEdits", err)
+ return
+ }
+ }
+
+ // Refetch from database
+ pr, err = issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, pr.Index)
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ // TODO this should be 200, not 201
+ ctx.JSON(http.StatusCreated, convert.ToAPIPullRequest(ctx, pr, ctx.Doer))
+}
+
+// IsPullRequestMerged checks if a PR exists given an index
+func IsPullRequestMerged(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}/merge repository repoPullRequestIsMerged
+ // ---
+ // summary: Check if a pull request has been merged
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // description: pull request has been merged
+ // "404":
+ // description: pull request has not been merged
+
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if pr.HasMerged {
+ ctx.Status(http.StatusNoContent)
+ }
+ ctx.NotFound()
+}
+
+// MergePullRequest merges a PR given an index
+func MergePullRequest(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/merge repository repoMergePullRequest
+ // ---
+ // summary: Merge a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to merge
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // $ref: "#/definitions/MergePullRequestOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "405":
+ // "$ref": "#/responses/empty"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ form := web.GetForm(ctx).(*forms.MergePullRequestForm)
+
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound("GetPullRequestByIndex", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if err := pr.LoadHeadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadHeadRepo", err)
+ return
+ }
+
+ if err := pr.LoadIssue(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssue", err)
+ return
+ }
+ pr.Issue.Repo = ctx.Repo.Repository
+
+ if ctx.IsSigned {
+ // Update issue-user.
+ if err = activities_model.SetIssueReadBy(ctx, pr.Issue.ID, ctx.Doer.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ReadBy", err)
+ return
+ }
+ }
+
+ manuallyMerged := repo_model.MergeStyle(form.Do) == repo_model.MergeStyleManuallyMerged
+
+ mergeCheckType := pull_service.MergeCheckTypeGeneral
+ if form.MergeWhenChecksSucceed {
+ mergeCheckType = pull_service.MergeCheckTypeAuto
+ }
+ if manuallyMerged {
+ mergeCheckType = pull_service.MergeCheckTypeManually
+ }
+
+ // start with merging by checking
+ if err := pull_service.CheckPullMergeable(ctx, ctx.Doer, &ctx.Repo.Permission, pr, mergeCheckType, form.ForceMerge); err != nil {
+ if errors.Is(err, pull_service.ErrIsClosed) {
+ ctx.NotFound()
+ } else if errors.Is(err, pull_service.ErrUserNotAllowedToMerge) {
+ ctx.Error(http.StatusMethodNotAllowed, "Merge", "User not allowed to merge PR")
+ } else if errors.Is(err, pull_service.ErrHasMerged) {
+ ctx.Error(http.StatusMethodNotAllowed, "PR already merged", "")
+ } else if errors.Is(err, pull_service.ErrIsWorkInProgress) {
+ ctx.Error(http.StatusMethodNotAllowed, "PR is a work in progress", "Work in progress PRs cannot be merged")
+ } else if errors.Is(err, pull_service.ErrNotMergeableState) {
+ ctx.Error(http.StatusMethodNotAllowed, "PR not in mergeable state", "Please try again later")
+ } else if models.IsErrDisallowedToMerge(err) {
+ ctx.Error(http.StatusMethodNotAllowed, "PR is not ready to be merged", err)
+ } else if asymkey_service.IsErrWontSign(err) {
+ ctx.Error(http.StatusMethodNotAllowed, fmt.Sprintf("Protected branch %s requires signed commits but this merge would not be signed", pr.BaseBranch), err)
+ } else {
+ ctx.InternalServerError(err)
+ }
+ return
+ }
+
+ // handle manually-merged mark
+ if manuallyMerged {
+ if err := pull_service.MergedManually(ctx, pr, ctx.Doer, ctx.Repo.GitRepo, form.MergeCommitID); err != nil {
+ if models.IsErrInvalidMergeStyle(err) {
+ ctx.Error(http.StatusMethodNotAllowed, "Invalid merge style", fmt.Errorf("%s is not allowed an allowed merge style for this repository", repo_model.MergeStyle(form.Do)))
+ return
+ }
+ if strings.Contains(err.Error(), "Wrong commit ID") {
+ ctx.JSON(http.StatusConflict, err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "Manually-Merged", err)
+ return
+ }
+ ctx.Status(http.StatusOK)
+ return
+ }
+
+ if len(form.Do) == 0 {
+ form.Do = string(repo_model.MergeStyleMerge)
+ }
+
+ message := strings.TrimSpace(form.MergeTitleField)
+ if len(message) == 0 {
+ message, _, err = pull_service.GetDefaultMergeMessage(ctx, ctx.Repo.GitRepo, pr, repo_model.MergeStyle(form.Do))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetDefaultMergeMessage", err)
+ return
+ }
+ }
+
+ form.MergeMessageField = strings.TrimSpace(form.MergeMessageField)
+ if len(form.MergeMessageField) > 0 {
+ message += "\n\n" + form.MergeMessageField
+ }
+
+ if form.MergeWhenChecksSucceed {
+ scheduled, err := automerge.ScheduleAutoMerge(ctx, ctx.Doer, pr, repo_model.MergeStyle(form.Do), message)
+ if err != nil {
+ if pull_model.IsErrAlreadyScheduledToAutoMerge(err) {
+ ctx.Error(http.StatusConflict, "ScheduleAutoMerge", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "ScheduleAutoMerge", err)
+ return
+ } else if scheduled {
+ // nothing more to do ...
+ ctx.Status(http.StatusCreated)
+ return
+ }
+ }
+
+ if err := pull_service.Merge(ctx, pr, ctx.Doer, ctx.Repo.GitRepo, repo_model.MergeStyle(form.Do), form.HeadCommitID, message, false); err != nil {
+ if models.IsErrInvalidMergeStyle(err) {
+ ctx.Error(http.StatusMethodNotAllowed, "Invalid merge style", fmt.Errorf("%s is not allowed an allowed merge style for this repository", repo_model.MergeStyle(form.Do)))
+ } else if models.IsErrMergeConflicts(err) {
+ conflictError := err.(models.ErrMergeConflicts)
+ ctx.JSON(http.StatusConflict, conflictError)
+ } else if models.IsErrRebaseConflicts(err) {
+ conflictError := err.(models.ErrRebaseConflicts)
+ ctx.JSON(http.StatusConflict, conflictError)
+ } else if models.IsErrMergeUnrelatedHistories(err) {
+ conflictError := err.(models.ErrMergeUnrelatedHistories)
+ ctx.JSON(http.StatusConflict, conflictError)
+ } else if git.IsErrPushOutOfDate(err) {
+ ctx.Error(http.StatusConflict, "Merge", "merge push out of date")
+ } else if models.IsErrSHADoesNotMatch(err) {
+ ctx.Error(http.StatusConflict, "Merge", "head out of date")
+ } else if git.IsErrPushRejected(err) {
+ errPushRej := err.(*git.ErrPushRejected)
+ if len(errPushRej.Message) == 0 {
+ ctx.Error(http.StatusConflict, "Merge", "PushRejected without remote error message")
+ } else {
+ ctx.Error(http.StatusConflict, "Merge", "PushRejected with remote message: "+errPushRej.Message)
+ }
+ } else {
+ ctx.Error(http.StatusInternalServerError, "Merge", err)
+ }
+ return
+ }
+ log.Trace("Pull request merged: %d", pr.ID)
+
+ if form.DeleteBranchAfterMerge {
+ var headRepo *git.Repository
+ if ctx.Repo != nil && ctx.Repo.Repository != nil && ctx.Repo.Repository.ID == pr.HeadRepoID && ctx.Repo.GitRepo != nil {
+ headRepo = ctx.Repo.GitRepo
+ } else {
+ headRepo, err = gitrepo.OpenRepository(ctx, pr.HeadRepo)
+ if err != nil {
+ ctx.ServerError(fmt.Sprintf("OpenRepository[%s]", pr.HeadRepo.FullName()), err)
+ return
+ }
+ defer headRepo.Close()
+ }
+
+ if err := repo_service.DeleteBranchAfterMerge(ctx, ctx.Doer, pr, headRepo); err != nil {
+ switch {
+ case errors.Is(err, repo_service.ErrBranchIsDefault):
+ ctx.Error(http.StatusForbidden, "DefaultBranch", fmt.Errorf("the head branch is the default branch"))
+ case errors.Is(err, git_model.ErrBranchIsProtected):
+ ctx.Error(http.StatusForbidden, "IsProtectedBranch", fmt.Errorf("the head branch is protected"))
+ case errors.Is(err, util.ErrPermissionDenied):
+ ctx.Error(http.StatusForbidden, "HeadBranch", fmt.Errorf("insufficient permission to delete head branch"))
+ default:
+ ctx.Error(http.StatusInternalServerError, "DeleteBranchAfterMerge", err)
+ }
+ return
+ }
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) (*repo_model.Repository, *git.Repository, *git.CompareInfo, string, string) {
+ baseRepo := ctx.Repo.Repository
+
+ // Get compared branches information
+ // format: <base branch>...[<head repo>:]<head branch>
+ // base<-head: master...head:feature
+ // same repo: master...feature
+
+ // TODO: Validate form first?
+
+ baseBranch := form.Base
+
+ var (
+ headUser *user_model.User
+ headBranch string
+ isSameRepo bool
+ err error
+ )
+
+ // If there is no head repository, it means pull request between same repository.
+ headInfos := strings.Split(form.Head, ":")
+ if len(headInfos) == 1 {
+ isSameRepo = true
+ headUser = ctx.Repo.Owner
+ headBranch = headInfos[0]
+ } else if len(headInfos) == 2 {
+ headUser, err = user_model.GetUserByName(ctx, headInfos[0])
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound("GetUserByName")
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return nil, nil, nil, "", ""
+ }
+ headBranch = headInfos[1]
+ // The head repository can also point to the same repo
+ isSameRepo = ctx.Repo.Owner.ID == headUser.ID
+ } else {
+ ctx.NotFound()
+ return nil, nil, nil, "", ""
+ }
+
+ ctx.Repo.PullRequest.SameRepo = isSameRepo
+ log.Trace("Repo path: %q, base branch: %q, head branch: %q", ctx.Repo.GitRepo.Path, baseBranch, headBranch)
+
+ // Check if base branch is valid.
+ baseIsCommit := ctx.Repo.GitRepo.IsCommitExist(baseBranch)
+ baseIsBranch := ctx.Repo.GitRepo.IsBranchExist(baseBranch)
+ baseIsTag := ctx.Repo.GitRepo.IsTagExist(baseBranch)
+ if !baseIsCommit && !baseIsBranch && !baseIsTag {
+ // Check for short SHA usage
+ if baseCommit, _ := ctx.Repo.GitRepo.GetCommit(baseBranch); baseCommit != nil {
+ baseBranch = baseCommit.ID.String()
+ } else {
+ ctx.NotFound("BaseNotExist")
+ return nil, nil, nil, "", ""
+ }
+ }
+
+ // Check if current user has fork of repository or in the same repository.
+ headRepo := repo_model.GetForkedRepo(ctx, headUser.ID, baseRepo.ID)
+ if headRepo == nil && !isSameRepo {
+ err := baseRepo.GetBaseRepo(ctx)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBaseRepo", err)
+ return nil, nil, nil, "", ""
+ }
+
+ // Check if baseRepo's base repository is the same as headUser's repository.
+ if baseRepo.BaseRepo == nil || baseRepo.BaseRepo.OwnerID != headUser.ID {
+ log.Trace("parseCompareInfo[%d]: does not have fork or in same repository", baseRepo.ID)
+ ctx.NotFound("GetBaseRepo")
+ return nil, nil, nil, "", ""
+ }
+ // Assign headRepo so it can be used below.
+ headRepo = baseRepo.BaseRepo
+ }
+
+ var headGitRepo *git.Repository
+ if isSameRepo {
+ headRepo = ctx.Repo.Repository
+ headGitRepo = ctx.Repo.GitRepo
+ } else {
+ headGitRepo, err = gitrepo.OpenRepository(ctx, headRepo)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
+ return nil, nil, nil, "", ""
+ }
+ }
+
+ // user should have permission to read baseRepo's codes and pulls, NOT headRepo's
+ permBase, err := access_model.GetUserRepoPermission(ctx, baseRepo, ctx.Doer)
+ if err != nil {
+ headGitRepo.Close()
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return nil, nil, nil, "", ""
+ }
+ if !permBase.CanReadIssuesOrPulls(true) || !permBase.CanRead(unit.TypeCode) {
+ if log.IsTrace() {
+ log.Trace("Permission Denied: User %-v cannot create/read pull requests or cannot read code in Repo %-v\nUser in baseRepo has Permissions: %-+v",
+ ctx.Doer,
+ baseRepo,
+ permBase)
+ }
+ headGitRepo.Close()
+ ctx.NotFound("Can't read pulls or can't read UnitTypeCode")
+ return nil, nil, nil, "", ""
+ }
+
+ // user should have permission to read headrepo's codes
+ permHead, err := access_model.GetUserRepoPermission(ctx, headRepo, ctx.Doer)
+ if err != nil {
+ headGitRepo.Close()
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return nil, nil, nil, "", ""
+ }
+ if !permHead.CanRead(unit.TypeCode) {
+ if log.IsTrace() {
+ log.Trace("Permission Denied: User: %-v cannot read code in Repo: %-v\nUser in headRepo has Permissions: %-+v",
+ ctx.Doer,
+ headRepo,
+ permHead)
+ }
+ headGitRepo.Close()
+ ctx.NotFound("Can't read headRepo UnitTypeCode")
+ return nil, nil, nil, "", ""
+ }
+
+ // Check if head branch is valid.
+ headIsCommit := headGitRepo.IsBranchExist(headBranch)
+ headIsBranch := headGitRepo.IsTagExist(headBranch)
+ headIsTag := headGitRepo.IsCommitExist(baseBranch)
+ if !headIsCommit && !headIsBranch && !headIsTag {
+ // Check if headBranch is short sha commit hash
+ if headCommit, _ := headGitRepo.GetCommit(headBranch); headCommit != nil {
+ headBranch = headCommit.ID.String()
+ } else {
+ headGitRepo.Close()
+ ctx.NotFound("IsRefExist", nil)
+ return nil, nil, nil, "", ""
+ }
+ }
+
+ baseBranchRef := baseBranch
+ if baseIsBranch {
+ baseBranchRef = git.BranchPrefix + baseBranch
+ } else if baseIsTag {
+ baseBranchRef = git.TagPrefix + baseBranch
+ }
+ headBranchRef := headBranch
+ if headIsBranch {
+ headBranchRef = headBranch
+ } else if headIsTag {
+ headBranchRef = headBranch
+ }
+
+ compareInfo, err := headGitRepo.GetCompareInfo(repo_model.RepoPath(baseRepo.Owner.Name, baseRepo.Name), baseBranchRef, headBranchRef, false, false)
+ if err != nil {
+ headGitRepo.Close()
+ ctx.Error(http.StatusInternalServerError, "GetCompareInfo", err)
+ return nil, nil, nil, "", ""
+ }
+
+ return headRepo, headGitRepo, compareInfo, baseBranch, headBranch
+}
+
+// UpdatePullRequest merge PR's baseBranch into headBranch
+func UpdatePullRequest(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/update repository repoUpdatePullRequest
+ // ---
+ // summary: Merge PR's baseBranch into headBranch
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to get
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: style
+ // in: query
+ // description: how to update pull request
+ // type: string
+ // enum: [merge, rebase]
+ // responses:
+ // "200":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if pr.HasMerged {
+ ctx.Error(http.StatusUnprocessableEntity, "UpdatePullRequest", err)
+ return
+ }
+
+ if err = pr.LoadIssue(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssue", err)
+ return
+ }
+
+ if pr.Issue.IsClosed {
+ ctx.Error(http.StatusUnprocessableEntity, "UpdatePullRequest", err)
+ return
+ }
+
+ if err = pr.LoadBaseRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadBaseRepo", err)
+ return
+ }
+ if err = pr.LoadHeadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadHeadRepo", err)
+ return
+ }
+
+ rebase := ctx.FormString("style") == "rebase"
+
+ allowedUpdateByMerge, allowedUpdateByRebase, err := pull_service.IsUserAllowedToUpdate(ctx, pr, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "IsUserAllowedToMerge", err)
+ return
+ }
+
+ if (!allowedUpdateByMerge && !rebase) || (rebase && !allowedUpdateByRebase) {
+ ctx.Status(http.StatusForbidden)
+ return
+ }
+
+ // default merge commit message
+ message := fmt.Sprintf("Merge branch '%s' into %s", pr.BaseBranch, pr.HeadBranch)
+
+ if err = pull_service.Update(ctx, pr, ctx.Doer, message, rebase); err != nil {
+ if models.IsErrMergeConflicts(err) {
+ ctx.Error(http.StatusConflict, "Update", "merge failed because of conflict")
+ return
+ } else if models.IsErrRebaseConflicts(err) {
+ ctx.Error(http.StatusConflict, "Update", "rebase failed because of conflict")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "pull_service.Update", err)
+ return
+ }
+
+ ctx.Status(http.StatusOK)
+}
+
+// MergePullRequest cancel an auto merge scheduled for a given PullRequest by index
+func CancelScheduledAutoMerge(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/pulls/{index}/merge repository repoCancelScheduledAutoMerge
+ // ---
+ // summary: Cancel the scheduled auto merge for the given pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to merge
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ pullIndex := ctx.ParamsInt64(":index")
+ pull, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, pullIndex)
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.InternalServerError(err)
+ return
+ }
+
+ exist, autoMerge, err := pull_model.GetScheduledMergeByPullID(ctx, pull.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if !exist {
+ ctx.NotFound()
+ return
+ }
+
+ if ctx.Doer.ID != autoMerge.DoerID {
+ allowed, err := access_model.IsUserRepoAdmin(ctx, ctx.Repo.Repository, ctx.Doer)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if !allowed {
+ ctx.Error(http.StatusForbidden, "No permission to cancel", "user has no permission to cancel the scheduled auto merge")
+ return
+ }
+ }
+
+ if err := automerge.RemoveScheduledAutoMerge(ctx, ctx.Doer, pull); err != nil {
+ ctx.InternalServerError(err)
+ } else {
+ ctx.Status(http.StatusNoContent)
+ }
+}
+
+// GetPullRequestCommits gets all commits associated with a given PR
+func GetPullRequestCommits(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}/commits repository repoGetPullRequestCommits
+ // ---
+ // summary: Get commits for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to get
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // - name: verification
+ // in: query
+ // description: include verification for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // - name: files
+ // in: query
+ // description: include a list of affected files for every commit (disable for speedup, default 'true')
+ // type: boolean
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CommitList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if err := pr.LoadBaseRepo(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ var prInfo *git.CompareInfo
+ baseGitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, pr.BaseRepo)
+ if err != nil {
+ ctx.ServerError("OpenRepository", err)
+ return
+ }
+ defer closer.Close()
+
+ if pr.HasMerged {
+ prInfo, err = baseGitRepo.GetCompareInfo(pr.BaseRepo.RepoPath(), pr.MergeBase, pr.GetGitRefName(), false, false)
+ } else {
+ prInfo, err = baseGitRepo.GetCompareInfo(pr.BaseRepo.RepoPath(), pr.BaseBranch, pr.GetGitRefName(), false, false)
+ }
+ if err != nil {
+ ctx.ServerError("GetCompareInfo", err)
+ return
+ }
+ commits := prInfo.Commits
+
+ listOptions := utils.GetListOptions(ctx)
+
+ totalNumberOfCommits := len(commits)
+ totalNumberOfPages := int(math.Ceil(float64(totalNumberOfCommits) / float64(listOptions.PageSize)))
+
+ userCache := make(map[string]*user_model.User)
+
+ start, limit := listOptions.GetSkipTake()
+
+ limit = min(limit, totalNumberOfCommits-start)
+ limit = max(limit, 0)
+
+ verification := ctx.FormString("verification") == "" || ctx.FormBool("verification")
+ files := ctx.FormString("files") == "" || ctx.FormBool("files")
+
+ apiCommits := make([]*api.Commit, 0, limit)
+ for i := start; i < start+limit; i++ {
+ apiCommit, err := convert.ToCommit(ctx, ctx.Repo.Repository, baseGitRepo, commits[i], userCache,
+ convert.ToCommitOptions{
+ Stat: true,
+ Verification: verification,
+ Files: files,
+ })
+ if err != nil {
+ ctx.ServerError("toCommit", err)
+ return
+ }
+ apiCommits = append(apiCommits, apiCommit)
+ }
+
+ ctx.SetLinkHeader(totalNumberOfCommits, listOptions.PageSize)
+ ctx.SetTotalCountHeader(int64(totalNumberOfCommits))
+
+ ctx.RespHeader().Set("X-Page", strconv.Itoa(listOptions.Page))
+ ctx.RespHeader().Set("X-PerPage", strconv.Itoa(listOptions.PageSize))
+ ctx.RespHeader().Set("X-PageCount", strconv.Itoa(totalNumberOfPages))
+ ctx.RespHeader().Set("X-HasMore", strconv.FormatBool(listOptions.Page < totalNumberOfPages))
+ ctx.AppendAccessControlExposeHeaders("X-Page", "X-PerPage", "X-PageCount", "X-HasMore")
+
+ ctx.JSON(http.StatusOK, &apiCommits)
+}
+
+// GetPullRequestFiles gets all changed files associated with a given PR
+func GetPullRequestFiles(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}/files repository repoGetPullRequestFiles
+ // ---
+ // summary: Get changed files for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request to get
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: skip-to
+ // in: query
+ // description: skip to given file
+ // type: string
+ // - name: whitespace
+ // in: query
+ // description: whitespace behavior
+ // type: string
+ // enum: [ignore-all, ignore-change, ignore-eol, show-all]
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ChangedFileList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if err := pr.LoadBaseRepo(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ if err := pr.LoadHeadRepo(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ baseGitRepo := ctx.Repo.GitRepo
+
+ var prInfo *git.CompareInfo
+ if pr.HasMerged {
+ prInfo, err = baseGitRepo.GetCompareInfo(pr.BaseRepo.RepoPath(), pr.MergeBase, pr.GetGitRefName(), true, false)
+ } else {
+ prInfo, err = baseGitRepo.GetCompareInfo(pr.BaseRepo.RepoPath(), pr.BaseBranch, pr.GetGitRefName(), true, false)
+ }
+ if err != nil {
+ ctx.ServerError("GetCompareInfo", err)
+ return
+ }
+
+ headCommitID, err := baseGitRepo.GetRefCommitID(pr.GetGitRefName())
+ if err != nil {
+ ctx.ServerError("GetRefCommitID", err)
+ return
+ }
+
+ startCommitID := prInfo.MergeBase
+ endCommitID := headCommitID
+
+ maxLines := setting.Git.MaxGitDiffLines
+
+ // FIXME: If there are too many files in the repo, may cause some unpredictable issues.
+ diff, err := gitdiff.GetDiff(ctx, baseGitRepo,
+ &gitdiff.DiffOptions{
+ BeforeCommitID: startCommitID,
+ AfterCommitID: endCommitID,
+ SkipTo: ctx.FormString("skip-to"),
+ MaxLines: maxLines,
+ MaxLineCharacters: setting.Git.MaxGitDiffLineCharacters,
+ MaxFiles: -1, // GetDiff() will return all files
+ WhitespaceBehavior: gitdiff.GetWhitespaceFlag(ctx.FormString("whitespace")),
+ })
+ if err != nil {
+ ctx.ServerError("GetDiff", err)
+ return
+ }
+
+ listOptions := utils.GetListOptions(ctx)
+
+ totalNumberOfFiles := diff.NumFiles
+ totalNumberOfPages := int(math.Ceil(float64(totalNumberOfFiles) / float64(listOptions.PageSize)))
+
+ start, limit := listOptions.GetSkipTake()
+
+ limit = min(limit, totalNumberOfFiles-start)
+
+ limit = max(limit, 0)
+
+ apiFiles := make([]*api.ChangedFile, 0, limit)
+ for i := start; i < start+limit; i++ {
+ apiFiles = append(apiFiles, convert.ToChangedFile(diff.Files[i], pr.HeadRepo, endCommitID))
+ }
+
+ ctx.SetLinkHeader(totalNumberOfFiles, listOptions.PageSize)
+ ctx.SetTotalCountHeader(int64(totalNumberOfFiles))
+
+ ctx.RespHeader().Set("X-Page", strconv.Itoa(listOptions.Page))
+ ctx.RespHeader().Set("X-PerPage", strconv.Itoa(listOptions.PageSize))
+ ctx.RespHeader().Set("X-PageCount", strconv.Itoa(totalNumberOfPages))
+ ctx.RespHeader().Set("X-HasMore", strconv.FormatBool(listOptions.Page < totalNumberOfPages))
+ ctx.AppendAccessControlExposeHeaders("X-Page", "X-PerPage", "X-PageCount", "X-HasMore")
+
+ ctx.JSON(http.StatusOK, &apiFiles)
+}
diff --git a/routers/api/v1/repo/pull_review.go b/routers/api/v1/repo/pull_review.go
new file mode 100644
index 0000000..8fba085
--- /dev/null
+++ b/routers/api/v1/repo/pull_review.go
@@ -0,0 +1,1107 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/organization"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/gitrepo"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ issue_service "code.gitea.io/gitea/services/issue"
+ pull_service "code.gitea.io/gitea/services/pull"
+)
+
+// ListPullReviews lists all reviews of a pull request
+func ListPullReviews(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}/reviews repository repoListPullReviews
+ // ---
+ // summary: List all reviews for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReviewList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound("GetPullRequestByIndex", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if err = pr.LoadIssue(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssue", err)
+ return
+ }
+
+ if err = pr.Issue.LoadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadRepo", err)
+ return
+ }
+
+ opts := issues_model.FindReviewOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ IssueID: pr.IssueID,
+ }
+
+ allReviews, err := issues_model.FindReviews(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ count, err := issues_model.CountReviews(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiReviews, err := convert.ToPullReviewList(ctx, allReviews, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convertToPullReviewList", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, &apiReviews)
+}
+
+// GetPullReview gets a specific review of a pull request
+func GetPullReview(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}/reviews/{id} repository repoGetPullReview
+ // ---
+ // summary: Get a specific review for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReview"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ review, _, statusSet := prepareSingleReview(ctx)
+ if statusSet {
+ return
+ }
+
+ apiReview, err := convert.ToPullReview(ctx, review, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convertToPullReview", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, apiReview)
+}
+
+// GetPullReviewComments lists all comments of a pull request review
+func GetPullReviewComments(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}/reviews/{id}/comments repository repoGetPullReviewComments
+ // ---
+ // summary: Get a specific review for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReviewCommentList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ review, _, statusSet := prepareSingleReview(ctx)
+ if statusSet {
+ return
+ }
+
+ apiComments, err := convert.ToPullReviewCommentList(ctx, review, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convertToPullReviewCommentList", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, apiComments)
+}
+
+// GetPullReviewComment get a pull review comment
+func GetPullReviewComment(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/pulls/{index}/reviews/{id}/comments/{comment} repository repoGetPullReviewComment
+ // ---
+ // summary: Get a pull review comment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: comment
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReviewComment"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ review, _, statusSet := prepareSingleReview(ctx)
+ if statusSet {
+ return
+ }
+
+ if err := ctx.Comment.LoadPoster(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiComment, err := convert.ToPullReviewComment(ctx, review, ctx.Comment, ctx.Doer)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, apiComment)
+}
+
+// CreatePullReviewComments add a new comment to a pull request review
+func CreatePullReviewComment(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/reviews/{id}/comments repository repoCreatePullReviewComment
+ // ---
+ // summary: Add a new comment to a pull request review
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreatePullReviewCommentOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReviewComment"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ opts := web.GetForm(ctx).(*api.CreatePullReviewCommentOptions)
+
+ review, pr, statusSet := prepareSingleReview(ctx)
+ if statusSet {
+ return
+ }
+
+ if err := pr.Issue.LoadRepo(ctx); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ line := opts.NewLineNum
+ if opts.OldLineNum > 0 {
+ line = opts.OldLineNum * -1
+ }
+
+ comment, err := pull_service.CreateCodeCommentKnownReviewID(ctx,
+ ctx.Doer,
+ pr.Issue.Repo,
+ pr.Issue,
+ opts.Body,
+ opts.Path,
+ line,
+ review.ID,
+ nil,
+ )
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiComment, err := convert.ToPullReviewComment(ctx, review, comment, ctx.Doer)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, apiComment)
+}
+
+// DeletePullReview delete a specific review from a pull request
+func DeletePullReview(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/pulls/{index}/reviews/{id} repository repoDeletePullReview
+ // ---
+ // summary: Delete a specific review from a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ review, _, statusSet := prepareSingleReview(ctx)
+ if statusSet {
+ return
+ }
+
+ if ctx.Doer == nil {
+ ctx.NotFound()
+ return
+ }
+ if !ctx.Doer.IsAdmin && ctx.Doer.ID != review.ReviewerID {
+ ctx.Error(http.StatusForbidden, "only admin and user itself can delete a review", nil)
+ return
+ }
+
+ if err := issues_model.DeleteReview(ctx, review); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteReview", fmt.Errorf("can not delete ReviewID: %d", review.ID))
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreatePullReview create a review to a pull request
+func CreatePullReview(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/reviews repository repoCreatePullReview
+ // ---
+ // summary: Create a review to an pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreatePullReviewOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReview"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ opts := web.GetForm(ctx).(*api.CreatePullReviewOptions)
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound("GetPullRequestByIndex", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ // determine review type
+ reviewType, isWrong := preparePullReviewType(ctx, pr, opts.Event, opts.Body, len(opts.Comments) > 0)
+ if isWrong {
+ return
+ }
+
+ if err := pr.Issue.LoadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "pr.Issue.LoadRepo", err)
+ return
+ }
+
+ // if CommitID is empty, set it as lastCommitID
+ if opts.CommitID == "" {
+ gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, pr.Issue.Repo)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "git.OpenRepository", err)
+ return
+ }
+ defer closer.Close()
+
+ headCommitID, err := gitRepo.GetRefCommitID(pr.GetGitRefName())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetRefCommitID", err)
+ return
+ }
+
+ opts.CommitID = headCommitID
+ }
+
+ // create review comments
+ for _, c := range opts.Comments {
+ line := c.NewLineNum
+ if c.OldLineNum > 0 {
+ line = c.OldLineNum * -1
+ }
+
+ if _, err := pull_service.CreateCodeComment(ctx,
+ ctx.Doer,
+ ctx.Repo.GitRepo,
+ pr.Issue,
+ line,
+ c.Body,
+ c.Path,
+ true, // pending review
+ 0, // no reply
+ opts.CommitID,
+ nil,
+ ); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateCodeComment", err)
+ return
+ }
+ }
+
+ // create review and associate all pending review comments
+ review, _, err := pull_service.SubmitReview(ctx, ctx.Doer, ctx.Repo.GitRepo, pr.Issue, reviewType, opts.Body, opts.CommitID, nil)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SubmitReview", err)
+ return
+ }
+
+ // convert response
+ apiReview, err := convert.ToPullReview(ctx, review, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convertToPullReview", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiReview)
+}
+
+// SubmitPullReview submit a pending review to an pull request
+func SubmitPullReview(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/reviews/{id} repository repoSubmitPullReview
+ // ---
+ // summary: Submit a pending review to an pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/SubmitPullReviewOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReview"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ opts := web.GetForm(ctx).(*api.SubmitPullReviewOptions)
+ review, pr, isWrong := prepareSingleReview(ctx)
+ if isWrong {
+ return
+ }
+
+ if review.Type != issues_model.ReviewTypePending {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("only a pending review can be submitted"))
+ return
+ }
+
+ // determine review type
+ reviewType, isWrong := preparePullReviewType(ctx, pr, opts.Event, opts.Body, len(review.Comments) > 0)
+ if isWrong {
+ return
+ }
+
+ // if review stay pending return
+ if reviewType == issues_model.ReviewTypePending {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("review stay pending"))
+ return
+ }
+
+ headCommitID, err := ctx.Repo.GitRepo.GetRefCommitID(pr.GetGitRefName())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GitRepo: GetRefCommitID", err)
+ return
+ }
+
+ // create review and associate all pending review comments
+ review, _, err = pull_service.SubmitReview(ctx, ctx.Doer, ctx.Repo.GitRepo, pr.Issue, reviewType, opts.Body, headCommitID, nil)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SubmitReview", err)
+ return
+ }
+
+ // convert response
+ apiReview, err := convert.ToPullReview(ctx, review, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convertToPullReview", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiReview)
+}
+
+// preparePullReviewType return ReviewType and false or nil and true if an error happen
+func preparePullReviewType(ctx *context.APIContext, pr *issues_model.PullRequest, event api.ReviewStateType, body string, hasComments bool) (issues_model.ReviewType, bool) {
+ if err := pr.LoadIssue(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadIssue", err)
+ return -1, true
+ }
+
+ needsBody := true
+ hasBody := len(strings.TrimSpace(body)) > 0
+
+ var reviewType issues_model.ReviewType
+ switch event {
+ case api.ReviewStateApproved:
+ // can not approve your own PR
+ if pr.Issue.IsPoster(ctx.Doer.ID) {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("approve your own pull is not allowed"))
+ return -1, true
+ }
+ reviewType = issues_model.ReviewTypeApprove
+ needsBody = false
+
+ case api.ReviewStateRequestChanges:
+ // can not reject your own PR
+ if pr.Issue.IsPoster(ctx.Doer.ID) {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("reject your own pull is not allowed"))
+ return -1, true
+ }
+ reviewType = issues_model.ReviewTypeReject
+
+ case api.ReviewStateComment:
+ reviewType = issues_model.ReviewTypeComment
+ needsBody = false
+ // if there is no body we need to ensure that there are comments
+ if !hasBody && !hasComments {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("review event %s requires a body or a comment", event))
+ return -1, true
+ }
+ default:
+ reviewType = issues_model.ReviewTypePending
+ }
+
+ // reject reviews with empty body if a body is required for this call
+ if needsBody && !hasBody {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("review event %s requires a body", event))
+ return -1, true
+ }
+
+ return reviewType, false
+}
+
+// prepareSingleReview return review, related pull and false or nil, nil and true if an error happen
+func prepareSingleReview(ctx *context.APIContext) (*issues_model.Review, *issues_model.PullRequest, bool) {
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound("GetPullRequestByIndex", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return nil, nil, true
+ }
+
+ review, err := issues_model.GetReviewByID(ctx, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if issues_model.IsErrReviewNotExist(err) {
+ ctx.NotFound("GetReviewByID", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetReviewByID", err)
+ }
+ return nil, nil, true
+ }
+
+ // validate the review is for the given PR
+ if review.IssueID != pr.IssueID {
+ ctx.NotFound("ReviewNotInPR")
+ return nil, nil, true
+ }
+
+ // make sure that the user has access to this review if it is pending
+ if review.Type == issues_model.ReviewTypePending && review.ReviewerID != ctx.Doer.ID && !ctx.Doer.IsAdmin {
+ ctx.NotFound("GetReviewByID")
+ return nil, nil, true
+ }
+
+ if err := review.LoadAttributes(ctx); err != nil && !user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "ReviewLoadAttributes", err)
+ return nil, nil, true
+ }
+
+ return review, pr, false
+}
+
+// CreateReviewRequests create review requests to an pull request
+func CreateReviewRequests(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/requested_reviewers repository repoCreatePullReviewRequests
+ // ---
+ // summary: create review requests for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/PullReviewRequestOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/PullReviewList"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opts := web.GetForm(ctx).(*api.PullReviewRequestOptions)
+ apiReviewRequest(ctx, *opts, true)
+}
+
+// DeleteReviewRequests delete review requests to an pull request
+func DeleteReviewRequests(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/pulls/{index}/requested_reviewers repository repoDeletePullReviewRequests
+ // ---
+ // summary: cancel review requests for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/PullReviewRequestOptions"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ opts := web.GetForm(ctx).(*api.PullReviewRequestOptions)
+ apiReviewRequest(ctx, *opts, false)
+}
+
+func apiReviewRequest(ctx *context.APIContext, opts api.PullReviewRequestOptions, isAdd bool) {
+ pr, err := issues_model.GetPullRequestByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
+ if err != nil {
+ if issues_model.IsErrPullRequestNotExist(err) {
+ ctx.NotFound("GetPullRequestByIndex", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPullRequestByIndex", err)
+ }
+ return
+ }
+
+ if err := pr.Issue.LoadRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "pr.Issue.LoadRepo", err)
+ return
+ }
+
+ reviewers := make([]*user_model.User, 0, len(opts.Reviewers))
+
+ permDoer, err := access_model.GetUserRepoPermission(ctx, pr.Issue.Repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return
+ }
+
+ for _, r := range opts.Reviewers {
+ var reviewer *user_model.User
+ if strings.Contains(r, "@") {
+ reviewer, err = user_model.GetUserByEmail(ctx, r)
+ } else {
+ reviewer, err = user_model.GetUserByName(ctx, r)
+ }
+
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.NotFound("UserNotExist", fmt.Sprintf("User '%s' not exist", r))
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUser", err)
+ return
+ }
+
+ err = issue_service.IsValidReviewRequest(ctx, reviewer, ctx.Doer, isAdd, pr.Issue, &permDoer)
+ if err != nil {
+ if issues_model.IsErrNotValidReviewRequest(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "NotValidReviewRequest", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "IsValidReviewRequest", err)
+ return
+ }
+
+ reviewers = append(reviewers, reviewer)
+ }
+
+ var reviews []*issues_model.Review
+ if isAdd {
+ reviews = make([]*issues_model.Review, 0, len(reviewers))
+ }
+
+ for _, reviewer := range reviewers {
+ comment, err := issue_service.ReviewRequest(ctx, pr.Issue, ctx.Doer, reviewer, isAdd)
+ if err != nil {
+ if issues_model.IsErrReviewRequestOnClosedPR(err) {
+ ctx.Error(http.StatusForbidden, "", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "ReviewRequest", err)
+ return
+ }
+
+ if comment != nil && isAdd {
+ if err = comment.LoadReview(ctx); err != nil {
+ ctx.ServerError("ReviewRequest", err)
+ return
+ }
+ reviews = append(reviews, comment.Review)
+ }
+ }
+
+ if ctx.Repo.Repository.Owner.IsOrganization() && len(opts.TeamReviewers) > 0 {
+ teamReviewers := make([]*organization.Team, 0, len(opts.TeamReviewers))
+ for _, t := range opts.TeamReviewers {
+ var teamReviewer *organization.Team
+ teamReviewer, err = organization.GetTeam(ctx, ctx.Repo.Owner.ID, t)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.NotFound("TeamNotExist", fmt.Sprintf("Team '%s' not exist", t))
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "ReviewRequest", err)
+ return
+ }
+
+ err = issue_service.IsValidTeamReviewRequest(ctx, teamReviewer, ctx.Doer, isAdd, pr.Issue)
+ if err != nil {
+ if issues_model.IsErrNotValidReviewRequest(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "NotValidReviewRequest", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "IsValidTeamReviewRequest", err)
+ return
+ }
+
+ teamReviewers = append(teamReviewers, teamReviewer)
+ }
+
+ for _, teamReviewer := range teamReviewers {
+ comment, err := issue_service.TeamReviewRequest(ctx, pr.Issue, ctx.Doer, teamReviewer, isAdd)
+ if err != nil {
+ ctx.ServerError("TeamReviewRequest", err)
+ return
+ }
+
+ if comment != nil && isAdd {
+ if err = comment.LoadReview(ctx); err != nil {
+ ctx.ServerError("ReviewRequest", err)
+ return
+ }
+ reviews = append(reviews, comment.Review)
+ }
+ }
+ }
+
+ if isAdd {
+ apiReviews, err := convert.ToPullReviewList(ctx, reviews, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convertToPullReviewList", err)
+ return
+ }
+ ctx.JSON(http.StatusCreated, apiReviews)
+ } else {
+ ctx.Status(http.StatusNoContent)
+ return
+ }
+}
+
+// DismissPullReview dismiss a review for a pull request
+func DismissPullReview(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/reviews/{id}/dismissals repository repoDismissPullReview
+ // ---
+ // summary: Dismiss a review for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/DismissPullReviewOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReview"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ opts := web.GetForm(ctx).(*api.DismissPullReviewOptions)
+ dismissReview(ctx, opts.Message, true, opts.Priors)
+}
+
+// UnDismissPullReview cancel to dismiss a review for a pull request
+func UnDismissPullReview(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/pulls/{index}/reviews/{id}/undismissals repository repoUnDismissPullReview
+ // ---
+ // summary: Cancel to dismiss a review for a pull request
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PullReview"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ dismissReview(ctx, "", false, false)
+}
+
+// DeletePullReviewComment delete a pull review comment
+func DeletePullReviewComment(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/pulls/{index}/reviews/{id}/comments/{comment} repository repoDeletePullReviewComment
+ // ---
+ // summary: Delete a pull review comment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: index
+ // in: path
+ // description: index of the pull request
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the review
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: comment
+ // in: path
+ // description: id of the comment
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ deleteIssueComment(ctx, issues_model.CommentTypeCode)
+}
+
+func dismissReview(ctx *context.APIContext, msg string, isDismiss, dismissPriors bool) {
+ if !ctx.Repo.IsAdmin() {
+ ctx.Error(http.StatusForbidden, "", "Must be repo admin")
+ return
+ }
+ review, _, isWrong := prepareSingleReview(ctx)
+ if isWrong {
+ return
+ }
+
+ if review.Type != issues_model.ReviewTypeApprove && review.Type != issues_model.ReviewTypeReject {
+ ctx.Error(http.StatusForbidden, "", "not need to dismiss this review because it's type is not Approve or change request")
+ return
+ }
+
+ _, err := pull_service.DismissReview(ctx, review.ID, ctx.Repo.Repository.ID, msg, ctx.Doer, isDismiss, dismissPriors)
+ if err != nil {
+ if pull_service.IsErrDismissRequestOnClosedPR(err) {
+ ctx.Error(http.StatusForbidden, "", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "pull_service.DismissReview", err)
+ return
+ }
+
+ if review, err = issues_model.GetReviewByID(ctx, review.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetReviewByID", err)
+ return
+ }
+
+ // convert response
+ apiReview, err := convert.ToPullReview(ctx, review, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convertToPullReview", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiReview)
+}
diff --git a/routers/api/v1/repo/release.go b/routers/api/v1/repo/release.go
new file mode 100644
index 0000000..5ea4dc8
--- /dev/null
+++ b/routers/api/v1/repo/release.go
@@ -0,0 +1,424 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/git"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ release_service "code.gitea.io/gitea/services/release"
+)
+
+// GetRelease get a single release of a repository
+func GetRelease(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/releases/{id} repository repoGetRelease
+ // ---
+ // summary: Get a release
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Release"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ id := ctx.ParamsInt64(":id")
+ release, err := repo_model.GetReleaseForRepoByID(ctx, ctx.Repo.Repository.ID, id)
+ if err != nil && !repo_model.IsErrReleaseNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetReleaseForRepoByID", err)
+ return
+ }
+ if err != nil && repo_model.IsErrReleaseNotExist(err) || release.IsTag {
+ ctx.NotFound()
+ return
+ }
+
+ if err := release.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))
+}
+
+// GetLatestRelease gets the most recent non-prerelease, non-draft release of a repository, sorted by created_at
+func GetLatestRelease(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/releases/latest repository repoGetLatestRelease
+ // ---
+ // summary: Gets the most recent non-prerelease, non-draft release of a repository, sorted by created_at
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Release"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ release, err := repo_model.GetLatestReleaseByRepoID(ctx, ctx.Repo.Repository.ID)
+ if err != nil && !repo_model.IsErrReleaseNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetLatestRelease", err)
+ return
+ }
+ if err != nil && repo_model.IsErrReleaseNotExist(err) ||
+ release.IsTag || release.RepoID != ctx.Repo.Repository.ID {
+ ctx.NotFound()
+ return
+ }
+
+ if err := release.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))
+}
+
+// ListReleases list a repository's releases
+func ListReleases(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/releases repository repoListReleases
+ // ---
+ // summary: List a repo's releases
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: draft
+ // in: query
+ // description: filter (exclude / include) drafts, if you dont have repo write access none will show
+ // type: boolean
+ // - name: pre-release
+ // in: query
+ // description: filter (exclude / include) pre-releases
+ // type: boolean
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ReleaseList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ listOptions := utils.GetListOptions(ctx)
+
+ opts := repo_model.FindReleasesOptions{
+ ListOptions: listOptions,
+ IncludeDrafts: ctx.Repo.AccessMode >= perm.AccessModeWrite || ctx.Repo.UnitAccessMode(unit.TypeReleases) >= perm.AccessModeWrite,
+ IncludeTags: false,
+ IsDraft: ctx.FormOptionalBool("draft"),
+ IsPreRelease: ctx.FormOptionalBool("pre-release"),
+ RepoID: ctx.Repo.Repository.ID,
+ }
+
+ releases, err := db.Find[repo_model.Release](ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetReleasesByRepoID", err)
+ return
+ }
+ rels := make([]*api.Release, len(releases))
+ for i, release := range releases {
+ if err := release.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ rels[i] = convert.ToAPIRelease(ctx, ctx.Repo.Repository, release)
+ }
+
+ filteredCount, err := db.Count[repo_model.Release](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.SetLinkHeader(int(filteredCount), listOptions.PageSize)
+ ctx.SetTotalCountHeader(filteredCount)
+ ctx.JSON(http.StatusOK, rels)
+}
+
+// CreateRelease create a release
+func CreateRelease(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/releases repository repoCreateRelease
+ // ---
+ // summary: Create a release
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateReleaseOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Release"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // "$ref": "#/responses/error"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateReleaseOption)
+ if ctx.Repo.Repository.IsEmpty {
+ ctx.Error(http.StatusUnprocessableEntity, "RepoIsEmpty", fmt.Errorf("repo is empty"))
+ return
+ }
+ rel, err := repo_model.GetRelease(ctx, ctx.Repo.Repository.ID, form.TagName)
+ if err != nil {
+ if !repo_model.IsErrReleaseNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetRelease", err)
+ return
+ }
+ // If target is not provided use default branch
+ if len(form.Target) == 0 {
+ form.Target = ctx.Repo.Repository.DefaultBranch
+ }
+ rel = &repo_model.Release{
+ RepoID: ctx.Repo.Repository.ID,
+ PublisherID: ctx.Doer.ID,
+ Publisher: ctx.Doer,
+ TagName: form.TagName,
+ Target: form.Target,
+ Title: form.Title,
+ Note: form.Note,
+ IsDraft: form.IsDraft,
+ IsPrerelease: form.IsPrerelease,
+ HideArchiveLinks: form.HideArchiveLinks,
+ IsTag: false,
+ Repo: ctx.Repo.Repository,
+ }
+ if err := release_service.CreateRelease(ctx.Repo.GitRepo, rel, "", nil); err != nil {
+ if repo_model.IsErrReleaseAlreadyExist(err) {
+ ctx.Error(http.StatusConflict, "ReleaseAlreadyExist", err)
+ } else if models.IsErrProtectedTagName(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "ProtectedTagName", err)
+ } else if git.IsErrNotExist(err) {
+ ctx.Error(http.StatusNotFound, "ErrNotExist", fmt.Errorf("target \"%v\" not found: %w", rel.Target, err))
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateRelease", err)
+ }
+ return
+ }
+ } else {
+ if !rel.IsTag {
+ ctx.Error(http.StatusConflict, "GetRelease", "Release is has no Tag")
+ return
+ }
+
+ rel.Title = form.Title
+ rel.Note = form.Note
+ rel.IsDraft = form.IsDraft
+ rel.IsPrerelease = form.IsPrerelease
+ rel.HideArchiveLinks = form.HideArchiveLinks
+ rel.PublisherID = ctx.Doer.ID
+ rel.IsTag = false
+ rel.Repo = ctx.Repo.Repository
+ rel.Publisher = ctx.Doer
+ rel.Target = form.Target
+
+ if err = release_service.UpdateRelease(ctx, ctx.Doer, ctx.Repo.GitRepo, rel, true, nil); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateRelease", err)
+ return
+ }
+ }
+ ctx.JSON(http.StatusCreated, convert.ToAPIRelease(ctx, ctx.Repo.Repository, rel))
+}
+
+// EditRelease edit a release
+func EditRelease(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/releases/{id} repository repoEditRelease
+ // ---
+ // summary: Update a release
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditReleaseOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Release"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.EditReleaseOption)
+ id := ctx.ParamsInt64(":id")
+ rel, err := repo_model.GetReleaseForRepoByID(ctx, ctx.Repo.Repository.ID, id)
+ if err != nil && !repo_model.IsErrReleaseNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetReleaseForRepoByID", err)
+ return
+ }
+ if err != nil && repo_model.IsErrReleaseNotExist(err) || rel.IsTag {
+ ctx.NotFound()
+ return
+ }
+
+ if len(form.TagName) > 0 {
+ rel.TagName = form.TagName
+ }
+ if len(form.Target) > 0 {
+ rel.Target = form.Target
+ }
+ if len(form.Title) > 0 {
+ rel.Title = form.Title
+ }
+ if len(form.Note) > 0 {
+ rel.Note = form.Note
+ }
+ if form.IsDraft != nil {
+ rel.IsDraft = *form.IsDraft
+ }
+ if form.IsPrerelease != nil {
+ rel.IsPrerelease = *form.IsPrerelease
+ }
+ if form.HideArchiveLinks != nil {
+ rel.HideArchiveLinks = *form.HideArchiveLinks
+ }
+ if err := release_service.UpdateRelease(ctx, ctx.Doer, ctx.Repo.GitRepo, rel, false, nil); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateRelease", err)
+ return
+ }
+
+ // reload data from database
+ rel, err = repo_model.GetReleaseByID(ctx, id)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetReleaseByID", err)
+ return
+ }
+ if err := rel.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, rel))
+}
+
+// DeleteRelease delete a release from a repository
+func DeleteRelease(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/releases/{id} repository repoDeleteRelease
+ // ---
+ // summary: Delete a release
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ id := ctx.ParamsInt64(":id")
+ rel, err := repo_model.GetReleaseForRepoByID(ctx, ctx.Repo.Repository.ID, id)
+ if err != nil && !repo_model.IsErrReleaseNotExist(err) {
+ ctx.Error(http.StatusInternalServerError, "GetReleaseForRepoByID", err)
+ return
+ }
+ if err != nil && repo_model.IsErrReleaseNotExist(err) || rel.IsTag {
+ ctx.NotFound()
+ return
+ }
+ if err := release_service.DeleteReleaseByID(ctx, ctx.Repo.Repository, rel, ctx.Doer, false); err != nil {
+ if models.IsErrProtectedTagName(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "delTag", "user not allowed to delete protected tag")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "DeleteReleaseByID", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/release_attachment.go b/routers/api/v1/repo/release_attachment.go
new file mode 100644
index 0000000..d569f6e
--- /dev/null
+++ b/routers/api/v1/repo/release_attachment.go
@@ -0,0 +1,467 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "io"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/attachment"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/context/upload"
+ "code.gitea.io/gitea/services/convert"
+)
+
+func checkReleaseMatchRepo(ctx *context.APIContext, releaseID int64) bool {
+ release, err := repo_model.GetReleaseByID(ctx, releaseID)
+ if err != nil {
+ if repo_model.IsErrReleaseNotExist(err) {
+ ctx.NotFound()
+ return false
+ }
+ ctx.Error(http.StatusInternalServerError, "GetReleaseByID", err)
+ return false
+ }
+ if release.RepoID != ctx.Repo.Repository.ID {
+ ctx.NotFound()
+ return false
+ }
+ return true
+}
+
+// GetReleaseAttachment gets a single attachment of the release
+func GetReleaseAttachment(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/releases/{id}/assets/{attachment_id} repository repoGetReleaseAttachment
+ // ---
+ // summary: Get a release attachment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Attachment"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ releaseID := ctx.ParamsInt64(":id")
+ if !checkReleaseMatchRepo(ctx, releaseID) {
+ return
+ }
+
+ attachID := ctx.ParamsInt64(":attachment_id")
+ attach, err := repo_model.GetAttachmentByID(ctx, attachID)
+ if err != nil {
+ if repo_model.IsErrAttachmentNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetAttachmentByID", err)
+ return
+ }
+ if attach.ReleaseID != releaseID {
+ log.Info("User requested attachment is not in release, release_id %v, attachment_id: %v", releaseID, attachID)
+ ctx.NotFound()
+ return
+ }
+ // FIXME Should prove the existence of the given repo, but results in unnecessary database requests
+ ctx.JSON(http.StatusOK, convert.ToAPIAttachment(ctx.Repo.Repository, attach))
+}
+
+// ListReleaseAttachments lists all attachments of the release
+func ListReleaseAttachments(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/releases/{id}/assets repository repoListReleaseAttachments
+ // ---
+ // summary: List release's attachments
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/AttachmentList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ releaseID := ctx.ParamsInt64(":id")
+ release, err := repo_model.GetReleaseByID(ctx, releaseID)
+ if err != nil {
+ if repo_model.IsErrReleaseNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetReleaseByID", err)
+ return
+ }
+ if release.RepoID != ctx.Repo.Repository.ID {
+ ctx.NotFound()
+ return
+ }
+ if err := release.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release).Attachments)
+}
+
+// CreateReleaseAttachment creates an attachment and saves the given file
+func CreateReleaseAttachment(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/releases/{id}/assets repository repoCreateReleaseAttachment
+ // ---
+ // summary: Create a release attachment
+ // produces:
+ // - application/json
+ // consumes:
+ // - multipart/form-data
+ // - application/octet-stream
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: name
+ // in: query
+ // description: name of the attachment
+ // type: string
+ // required: false
+ // # There is no good way to specify "either 'attachment' or 'external_url' is required" with OpenAPI
+ // # https://github.com/OAI/OpenAPI-Specification/issues/256
+ // - name: attachment
+ // in: formData
+ // description: attachment to upload (this parameter is incompatible with `external_url`)
+ // type: file
+ // required: false
+ // - name: external_url
+ // in: formData
+ // description: url to external asset (this parameter is incompatible with `attachment`)
+ // type: string
+ // required: false
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Attachment"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+
+ // Check if attachments are enabled
+ if !setting.Attachment.Enabled {
+ ctx.NotFound("Attachment is not enabled")
+ return
+ }
+
+ // Check if release exists an load release
+ releaseID := ctx.ParamsInt64(":id")
+ if !checkReleaseMatchRepo(ctx, releaseID) {
+ return
+ }
+
+ // Get uploaded file from request
+ var isForm, hasAttachmentFile, hasExternalURL bool
+ externalURL := ctx.FormString("external_url")
+ hasExternalURL = externalURL != ""
+ filename := ctx.FormString("name")
+ isForm = strings.HasPrefix(strings.ToLower(ctx.Req.Header.Get("Content-Type")), "multipart/form-data")
+
+ if isForm {
+ _, _, err := ctx.Req.FormFile("attachment")
+ hasAttachmentFile = err == nil
+ } else {
+ hasAttachmentFile = ctx.Req.Body != nil
+ }
+
+ if hasAttachmentFile && hasExternalURL {
+ ctx.Error(http.StatusBadRequest, "DuplicateAttachment", "'attachment' and 'external_url' are mutually exclusive")
+ } else if hasAttachmentFile {
+ var content io.ReadCloser
+ var size int64 = -1
+
+ if isForm {
+ var header *multipart.FileHeader
+ content, header, _ = ctx.Req.FormFile("attachment")
+ size = header.Size
+ defer content.Close()
+ if filename == "" {
+ filename = header.Filename
+ }
+ } else {
+ content = ctx.Req.Body
+ defer content.Close()
+ }
+
+ if filename == "" {
+ ctx.Error(http.StatusBadRequest, "MissingName", "Missing 'name' parameter")
+ return
+ }
+
+ // Create a new attachment and save the file
+ attach, err := attachment.UploadAttachment(ctx, content, setting.Repository.Release.AllowedTypes, size, &repo_model.Attachment{
+ Name: filename,
+ UploaderID: ctx.Doer.ID,
+ RepoID: ctx.Repo.Repository.ID,
+ ReleaseID: releaseID,
+ })
+ if err != nil {
+ if upload.IsErrFileTypeForbidden(err) {
+ ctx.Error(http.StatusBadRequest, "DetectContentType", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "NewAttachment", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attach))
+ } else if hasExternalURL {
+ url, err := url.Parse(externalURL)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "InvalidExternalURL", err)
+ return
+ }
+
+ if filename == "" {
+ filename = path.Base(url.Path)
+
+ if filename == "." {
+ // Url path is empty
+ filename = url.Host
+ }
+ }
+
+ attach, err := attachment.NewExternalAttachment(ctx, &repo_model.Attachment{
+ Name: filename,
+ UploaderID: ctx.Doer.ID,
+ RepoID: ctx.Repo.Repository.ID,
+ ReleaseID: releaseID,
+ ExternalURL: url.String(),
+ })
+ if err != nil {
+ if repo_model.IsErrInvalidExternalURL(err) {
+ ctx.Error(http.StatusBadRequest, "NewExternalAttachment", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "NewExternalAttachment", err)
+ }
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attach))
+ } else {
+ ctx.Error(http.StatusBadRequest, "MissingAttachment", "One of 'attachment' or 'external_url' is required")
+ }
+}
+
+// EditReleaseAttachment updates the given attachment
+func EditReleaseAttachment(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/releases/{id}/assets/{attachment_id} repository repoEditReleaseAttachment
+ // ---
+ // summary: Edit a release attachment
+ // produces:
+ // - application/json
+ // consumes:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to edit
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditAttachmentOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Attachment"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+
+ form := web.GetForm(ctx).(*api.EditAttachmentOptions)
+
+ // Check if release exists an load release
+ releaseID := ctx.ParamsInt64(":id")
+ if !checkReleaseMatchRepo(ctx, releaseID) {
+ return
+ }
+
+ attachID := ctx.ParamsInt64(":attachment_id")
+ attach, err := repo_model.GetAttachmentByID(ctx, attachID)
+ if err != nil {
+ if repo_model.IsErrAttachmentNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetAttachmentByID", err)
+ return
+ }
+ if attach.ReleaseID != releaseID {
+ log.Info("User requested attachment is not in release, release_id %v, attachment_id: %v", releaseID, attachID)
+ ctx.NotFound()
+ return
+ }
+ // FIXME Should prove the existence of the given repo, but results in unnecessary database requests
+ if form.Name != "" {
+ attach.Name = form.Name
+ }
+
+ if form.DownloadURL != "" {
+ if attach.ExternalURL == "" {
+ ctx.Error(http.StatusBadRequest, "EditAttachment", "existing attachment is not external")
+ return
+ }
+ attach.ExternalURL = form.DownloadURL
+ }
+
+ if err := repo_model.UpdateAttachment(ctx, attach); err != nil {
+ if repo_model.IsErrInvalidExternalURL(err) {
+ ctx.Error(http.StatusBadRequest, "UpdateAttachment", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UpdateAttachment", err)
+ }
+ return
+ }
+ ctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attach))
+}
+
+// DeleteReleaseAttachment delete a given attachment
+func DeleteReleaseAttachment(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/releases/{id}/assets/{attachment_id} repository repoDeleteReleaseAttachment
+ // ---
+ // summary: Delete a release attachment
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the release
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: attachment_id
+ // in: path
+ // description: id of the attachment to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ // Check if release exists an load release
+ releaseID := ctx.ParamsInt64(":id")
+ if !checkReleaseMatchRepo(ctx, releaseID) {
+ return
+ }
+
+ attachID := ctx.ParamsInt64(":attachment_id")
+ attach, err := repo_model.GetAttachmentByID(ctx, attachID)
+ if err != nil {
+ if repo_model.IsErrAttachmentNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetAttachmentByID", err)
+ return
+ }
+ if attach.ReleaseID != releaseID {
+ log.Info("User requested attachment is not in release, release_id %v, attachment_id: %v", releaseID, attachID)
+ ctx.NotFound()
+ return
+ }
+ // FIXME Should prove the existence of the given repo, but results in unnecessary database requests
+
+ if err := repo_model.DeleteAttachment(ctx, attach, true); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteAttachment", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/release_tags.go b/routers/api/v1/repo/release_tags.go
new file mode 100644
index 0000000..f845fad
--- /dev/null
+++ b/routers/api/v1/repo/release_tags.go
@@ -0,0 +1,125 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/models"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ releaseservice "code.gitea.io/gitea/services/release"
+)
+
+// GetReleaseByTag get a single release of a repository by tag name
+func GetReleaseByTag(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/releases/tags/{tag} repository repoGetReleaseByTag
+ // ---
+ // summary: Get a release by tag name
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: tag
+ // in: path
+ // description: tag name of the release to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Release"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ tag := ctx.Params(":tag")
+
+ release, err := repo_model.GetRelease(ctx, ctx.Repo.Repository.ID, tag)
+ if err != nil {
+ if repo_model.IsErrReleaseNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetRelease", err)
+ return
+ }
+
+ if release.IsTag {
+ ctx.NotFound()
+ return
+ }
+
+ if err = release.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))
+}
+
+// DeleteReleaseByTag delete a release from a repository by tag name
+func DeleteReleaseByTag(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/releases/tags/{tag} repository repoDeleteReleaseByTag
+ // ---
+ // summary: Delete a release by tag name
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: tag
+ // in: path
+ // description: tag name of the release to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ tag := ctx.Params(":tag")
+
+ release, err := repo_model.GetRelease(ctx, ctx.Repo.Repository.ID, tag)
+ if err != nil {
+ if repo_model.IsErrReleaseNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetRelease", err)
+ return
+ }
+
+ if release.IsTag {
+ ctx.NotFound()
+ return
+ }
+
+ if err = releaseservice.DeleteReleaseByID(ctx, ctx.Repo.Repository, release, ctx.Doer, false); err != nil {
+ if models.IsErrProtectedTagName(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "delTag", "user not allowed to delete protected tag")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "DeleteReleaseByID", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/repo.go b/routers/api/v1/repo/repo.go
new file mode 100644
index 0000000..f39e582
--- /dev/null
+++ b/routers/api/v1/repo/repo.go
@@ -0,0 +1,1338 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+ "slices"
+ "strings"
+ "time"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ activities_model "code.gitea.io/gitea/models/activities"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ quota_model "code.gitea.io/gitea/models/quota"
+ repo_model "code.gitea.io/gitea/models/repo"
+ unit_model "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/label"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ repo_module "code.gitea.io/gitea/modules/repository"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/validation"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ actions_service "code.gitea.io/gitea/services/actions"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ "code.gitea.io/gitea/services/issue"
+ repo_service "code.gitea.io/gitea/services/repository"
+ wiki_service "code.gitea.io/gitea/services/wiki"
+)
+
+// Search repositories via options
+func Search(ctx *context.APIContext) {
+ // swagger:operation GET /repos/search repository repoSearch
+ // ---
+ // summary: Search for repositories
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: q
+ // in: query
+ // description: keyword
+ // type: string
+ // - name: topic
+ // in: query
+ // description: Limit search to repositories with keyword as topic
+ // type: boolean
+ // - name: includeDesc
+ // in: query
+ // description: include search of keyword within repository description
+ // type: boolean
+ // - name: uid
+ // in: query
+ // description: search only for repos that the user with the given id owns or contributes to
+ // type: integer
+ // format: int64
+ // - name: priority_owner_id
+ // in: query
+ // description: repo owner to prioritize in the results
+ // type: integer
+ // format: int64
+ // - name: team_id
+ // in: query
+ // description: search only for repos that belong to the given team id
+ // type: integer
+ // format: int64
+ // - name: starredBy
+ // in: query
+ // description: search only for repos that the user with the given id has starred
+ // type: integer
+ // format: int64
+ // - name: private
+ // in: query
+ // description: include private repositories this user has access to (defaults to true)
+ // type: boolean
+ // - name: is_private
+ // in: query
+ // description: show only pubic, private or all repositories (defaults to all)
+ // type: boolean
+ // - name: template
+ // in: query
+ // description: include template repositories this user has access to (defaults to true)
+ // type: boolean
+ // - name: archived
+ // in: query
+ // description: show only archived, non-archived or all repositories (defaults to all)
+ // type: boolean
+ // - name: mode
+ // in: query
+ // description: type of repository to search for. Supported values are
+ // "fork", "source", "mirror" and "collaborative"
+ // type: string
+ // - name: exclusive
+ // in: query
+ // description: if `uid` is given, search only for repos that the user owns
+ // type: boolean
+ // - name: sort
+ // in: query
+ // description: sort repos by attribute. Supported values are
+ // "alpha", "created", "updated", "size", "git_size", "lfs_size", "stars", "forks" and "id".
+ // Default is "alpha"
+ // type: string
+ // - name: order
+ // in: query
+ // description: sort order, either "asc" (ascending) or "desc" (descending).
+ // Default is "asc", ignored if "sort" is not specified.
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/SearchResults"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ private := ctx.IsSigned && (ctx.FormString("private") == "" || ctx.FormBool("private"))
+ if ctx.PublicOnly {
+ private = false
+ }
+
+ opts := &repo_model.SearchRepoOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ Actor: ctx.Doer,
+ Keyword: ctx.FormTrim("q"),
+ OwnerID: ctx.FormInt64("uid"),
+ PriorityOwnerID: ctx.FormInt64("priority_owner_id"),
+ TeamID: ctx.FormInt64("team_id"),
+ TopicOnly: ctx.FormBool("topic"),
+ Collaborate: optional.None[bool](),
+ Private: private,
+ Template: optional.None[bool](),
+ StarredByID: ctx.FormInt64("starredBy"),
+ IncludeDescription: ctx.FormBool("includeDesc"),
+ }
+
+ if ctx.FormString("template") != "" {
+ opts.Template = optional.Some(ctx.FormBool("template"))
+ }
+
+ if ctx.FormBool("exclusive") {
+ opts.Collaborate = optional.Some(false)
+ }
+
+ mode := ctx.FormString("mode")
+ switch mode {
+ case "source":
+ opts.Fork = optional.Some(false)
+ opts.Mirror = optional.Some(false)
+ case "fork":
+ opts.Fork = optional.Some(true)
+ case "mirror":
+ opts.Mirror = optional.Some(true)
+ case "collaborative":
+ opts.Mirror = optional.Some(false)
+ opts.Collaborate = optional.Some(true)
+ case "":
+ default:
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("Invalid search mode: \"%s\"", mode))
+ return
+ }
+
+ if ctx.FormString("archived") != "" {
+ opts.Archived = optional.Some(ctx.FormBool("archived"))
+ }
+
+ if ctx.FormString("is_private") != "" {
+ opts.IsPrivate = optional.Some(ctx.FormBool("is_private"))
+ }
+
+ sortMode := ctx.FormString("sort")
+ if len(sortMode) > 0 {
+ sortOrder := ctx.FormString("order")
+ if len(sortOrder) == 0 {
+ sortOrder = "asc"
+ }
+ if searchModeMap, ok := repo_model.OrderByMap[sortOrder]; ok {
+ if orderBy, ok := searchModeMap[sortMode]; ok {
+ opts.OrderBy = orderBy
+ } else {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("Invalid sort mode: \"%s\"", sortMode))
+ return
+ }
+ } else {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("Invalid sort order: \"%s\"", sortOrder))
+ return
+ }
+ }
+
+ var err error
+ repos, count, err := repo_model.SearchRepository(ctx, opts)
+ if err != nil {
+ ctx.JSON(http.StatusInternalServerError, api.SearchError{
+ OK: false,
+ Error: err.Error(),
+ })
+ return
+ }
+
+ results := make([]*api.Repository, len(repos))
+ for i, repo := range repos {
+ if err = repo.LoadOwner(ctx); err != nil {
+ ctx.JSON(http.StatusInternalServerError, api.SearchError{
+ OK: false,
+ Error: err.Error(),
+ })
+ return
+ }
+ permission, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.JSON(http.StatusInternalServerError, api.SearchError{
+ OK: false,
+ Error: err.Error(),
+ })
+ }
+ results[i] = convert.ToRepo(ctx, repo, permission)
+ }
+ ctx.SetLinkHeader(int(count), opts.PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, api.SearchResults{
+ OK: true,
+ Data: results,
+ })
+}
+
+// CreateUserRepo create a repository for a user
+func CreateUserRepo(ctx *context.APIContext, owner *user_model.User, opt api.CreateRepoOption) {
+ if opt.AutoInit && opt.Readme == "" {
+ opt.Readme = "Default"
+ }
+
+ // If the readme template does not exist, a 400 will be returned.
+ if opt.AutoInit && len(opt.Readme) > 0 && !slices.Contains(repo_module.Readmes, opt.Readme) {
+ ctx.Error(http.StatusBadRequest, "", fmt.Errorf("readme template does not exist, available templates: %v", repo_module.Readmes))
+ return
+ }
+
+ repo, err := repo_service.CreateRepository(ctx, ctx.Doer, owner, repo_service.CreateRepoOptions{
+ Name: opt.Name,
+ Description: opt.Description,
+ IssueLabels: opt.IssueLabels,
+ Gitignores: opt.Gitignores,
+ License: opt.License,
+ Readme: opt.Readme,
+ IsPrivate: opt.Private || setting.Repository.ForcePrivate,
+ AutoInit: opt.AutoInit,
+ DefaultBranch: opt.DefaultBranch,
+ TrustModel: repo_model.ToTrustModel(opt.TrustModel),
+ IsTemplate: opt.Template,
+ ObjectFormatName: opt.ObjectFormatName,
+ })
+ if err != nil {
+ if repo_model.IsErrRepoAlreadyExist(err) {
+ ctx.Error(http.StatusConflict, "", "The repository with the same name already exists.")
+ } else if db.IsErrNameReserved(err) ||
+ db.IsErrNamePatternNotAllowed(err) ||
+ label.IsErrTemplateLoad(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateRepository", err)
+ }
+ return
+ }
+
+ // reload repo from db to get a real state after creation
+ repo, err = repo_model.GetRepositoryByID(ctx, repo.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetRepositoryByID", err)
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToRepo(ctx, repo, access_model.Permission{AccessMode: perm.AccessModeOwner}))
+}
+
+// Create one repository of mine
+func Create(ctx *context.APIContext) {
+ // swagger:operation POST /user/repos repository user createCurrentUserRepo
+ // ---
+ // summary: Create a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateRepoOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Repository"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "409":
+ // description: The repository with the same name already exists.
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ opt := web.GetForm(ctx).(*api.CreateRepoOption)
+ if ctx.Doer.IsOrganization() {
+ // Shouldn't reach this condition, but just in case.
+ ctx.Error(http.StatusUnprocessableEntity, "", "not allowed creating repository for organization")
+ return
+ }
+ CreateUserRepo(ctx, ctx.Doer, *opt)
+}
+
+// Generate Create a repository using a template
+func Generate(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{template_owner}/{template_repo}/generate repository generateRepo
+ // ---
+ // summary: Create a repository using a template
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: template_owner
+ // in: path
+ // description: name of the template repository owner
+ // type: string
+ // required: true
+ // - name: template_repo
+ // in: path
+ // description: name of the template repository
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/GenerateRepoOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Repository"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "409":
+ // description: The repository with the same name already exists.
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ form := web.GetForm(ctx).(*api.GenerateRepoOption)
+
+ if !ctx.Repo.Repository.IsTemplate {
+ ctx.Error(http.StatusUnprocessableEntity, "", "this is not a template repo")
+ return
+ }
+
+ if ctx.Doer.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "", "not allowed creating repository for organization")
+ return
+ }
+
+ opts := repo_service.GenerateRepoOptions{
+ Name: form.Name,
+ DefaultBranch: form.DefaultBranch,
+ Description: form.Description,
+ Private: form.Private || setting.Repository.ForcePrivate,
+ GitContent: form.GitContent,
+ Topics: form.Topics,
+ GitHooks: form.GitHooks,
+ Webhooks: form.Webhooks,
+ Avatar: form.Avatar,
+ IssueLabels: form.Labels,
+ ProtectedBranch: form.ProtectedBranch,
+ }
+
+ if !opts.IsValid() {
+ ctx.Error(http.StatusUnprocessableEntity, "", "must select at least one template item")
+ return
+ }
+
+ ctxUser := ctx.Doer
+ var err error
+ if form.Owner != ctxUser.Name {
+ ctxUser, err = user_model.GetUserByName(ctx, form.Owner)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.JSON(http.StatusNotFound, map[string]any{
+ "error": "request owner `" + form.Owner + "` does not exist",
+ })
+ return
+ }
+
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ return
+ }
+
+ if !ctx.Doer.IsAdmin && !ctxUser.IsOrganization() {
+ ctx.Error(http.StatusForbidden, "", "Only admin can generate repository for other user.")
+ return
+ }
+
+ if !ctx.Doer.IsAdmin {
+ canCreate, err := organization.OrgFromUser(ctxUser).CanCreateOrgRepo(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.ServerError("CanCreateOrgRepo", err)
+ return
+ } else if !canCreate {
+ ctx.Error(http.StatusForbidden, "", "Given user is not allowed to create repository in organization.")
+ return
+ }
+ }
+ }
+
+ if !ctx.CheckQuota(quota_model.LimitSubjectSizeReposAll, ctxUser.ID, ctxUser.Name) {
+ return
+ }
+
+ repo, err := repo_service.GenerateRepository(ctx, ctx.Doer, ctxUser, ctx.Repo.Repository, opts)
+ if err != nil {
+ if repo_model.IsErrRepoAlreadyExist(err) {
+ ctx.Error(http.StatusConflict, "", "The repository with the same name already exists.")
+ } else if db.IsErrNameReserved(err) ||
+ db.IsErrNamePatternNotAllowed(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateRepository", err)
+ }
+ return
+ }
+ log.Trace("Repository generated [%d]: %s/%s", repo.ID, ctxUser.Name, repo.Name)
+
+ ctx.JSON(http.StatusCreated, convert.ToRepo(ctx, repo, access_model.Permission{AccessMode: perm.AccessModeOwner}))
+}
+
+// CreateOrgRepoDeprecated create one repository of the organization
+func CreateOrgRepoDeprecated(ctx *context.APIContext) {
+ // swagger:operation POST /org/{org}/repos organization createOrgRepoDeprecated
+ // ---
+ // summary: Create a repository in an organization
+ // deprecated: true
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of organization
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateRepoOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Repository"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ CreateOrgRepo(ctx)
+}
+
+// CreateOrgRepo create one repository of the organization
+func CreateOrgRepo(ctx *context.APIContext) {
+ // swagger:operation POST /orgs/{org}/repos organization createOrgRepo
+ // ---
+ // summary: Create a repository in an organization
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of organization
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateRepoOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Repository"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ opt := web.GetForm(ctx).(*api.CreateRepoOption)
+ org, err := organization.GetOrgByName(ctx, ctx.Params(":org"))
+ if err != nil {
+ if organization.IsErrOrgNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetOrgByName", err)
+ }
+ return
+ }
+
+ if !organization.HasOrgOrUserVisible(ctx, org.AsUser(), ctx.Doer) {
+ ctx.NotFound("HasOrgOrUserVisible", nil)
+ return
+ }
+
+ if !ctx.Doer.IsAdmin {
+ canCreate, err := org.CanCreateOrgRepo(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CanCreateOrgRepo", err)
+ return
+ } else if !canCreate {
+ ctx.Error(http.StatusForbidden, "", "Given user is not allowed to create repository in organization.")
+ return
+ }
+ }
+ CreateUserRepo(ctx, org.AsUser(), *opt)
+}
+
+// Get one repository
+func Get(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo} repository repoGet
+ // ---
+ // summary: Get a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Repository"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := ctx.Repo.Repository.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "Repository.LoadAttributes", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToRepo(ctx, ctx.Repo.Repository, ctx.Repo.Permission))
+}
+
+// GetByID returns a single Repository
+func GetByID(ctx *context.APIContext) {
+ // swagger:operation GET /repositories/{id} repository repoGetByID
+ // ---
+ // summary: Get a repository by id
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the repo to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Repository"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo, err := repo_model.GetRepositoryByID(ctx, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if repo_model.IsErrRepoNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetRepositoryByID", err)
+ }
+ return
+ }
+
+ permission, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return
+ } else if !permission.HasAccess() {
+ ctx.NotFound()
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToRepo(ctx, repo, permission))
+}
+
+// Edit edit repository properties
+func Edit(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo} repository repoEdit
+ // ---
+ // summary: Edit a repository's properties. Only fields that are set will be changed.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to edit
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to edit
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // description: "Properties of a repo that you can edit"
+ // schema:
+ // "$ref": "#/definitions/EditRepoOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Repository"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ opts := *web.GetForm(ctx).(*api.EditRepoOption)
+
+ if err := updateBasicProperties(ctx, opts); err != nil {
+ return
+ }
+
+ if err := updateRepoUnits(ctx, opts); err != nil {
+ return
+ }
+
+ if opts.Archived != nil {
+ if err := updateRepoArchivedState(ctx, opts); err != nil {
+ return
+ }
+ }
+
+ if opts.MirrorInterval != nil || opts.EnablePrune != nil {
+ if err := updateMirror(ctx, opts); err != nil {
+ return
+ }
+ }
+
+ repo, err := repo_model.GetRepositoryByID(ctx, ctx.Repo.Repository.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToRepo(ctx, repo, ctx.Repo.Permission))
+}
+
+// updateBasicProperties updates the basic properties of a repo: Name, Description, Website and Visibility
+func updateBasicProperties(ctx *context.APIContext, opts api.EditRepoOption) error {
+ owner := ctx.Repo.Owner
+ repo := ctx.Repo.Repository
+ newRepoName := repo.Name
+ if opts.Name != nil {
+ newRepoName = *opts.Name
+ }
+ // Check if repository name has been changed and not just a case change
+ if repo.LowerName != strings.ToLower(newRepoName) {
+ if err := repo_service.ChangeRepositoryName(ctx, ctx.Doer, repo, newRepoName); err != nil {
+ switch {
+ case repo_model.IsErrRepoAlreadyExist(err):
+ ctx.Error(http.StatusUnprocessableEntity, fmt.Sprintf("repo name is already taken [name: %s]", newRepoName), err)
+ case db.IsErrNameReserved(err):
+ ctx.Error(http.StatusUnprocessableEntity, fmt.Sprintf("repo name is reserved [name: %s]", newRepoName), err)
+ case db.IsErrNamePatternNotAllowed(err):
+ ctx.Error(http.StatusUnprocessableEntity, fmt.Sprintf("repo name's pattern is not allowed [name: %s, pattern: %s]", newRepoName, err.(db.ErrNamePatternNotAllowed).Pattern), err)
+ default:
+ ctx.Error(http.StatusUnprocessableEntity, "ChangeRepositoryName", err)
+ }
+ return err
+ }
+
+ log.Trace("Repository name changed: %s/%s -> %s", ctx.Repo.Owner.Name, repo.Name, newRepoName)
+ }
+ // Update the name in the repo object for the response
+ repo.Name = newRepoName
+ repo.LowerName = strings.ToLower(newRepoName)
+
+ if opts.Description != nil {
+ repo.Description = *opts.Description
+ }
+
+ if opts.Website != nil {
+ repo.Website = *opts.Website
+ }
+
+ visibilityChanged := false
+ if opts.Private != nil {
+ // Visibility of forked repository is forced sync with base repository.
+ if repo.IsFork {
+ if err := repo.GetBaseRepo(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "Unable to load base repository", err)
+ return err
+ }
+ *opts.Private = repo.BaseRepo.IsPrivate
+ }
+
+ visibilityChanged = repo.IsPrivate != *opts.Private
+ // when ForcePrivate enabled, you could change public repo to private, but only admin users can change private to public
+ if visibilityChanged && setting.Repository.ForcePrivate && !*opts.Private && !ctx.Doer.IsAdmin {
+ err := fmt.Errorf("cannot change private repository to public")
+ ctx.Error(http.StatusUnprocessableEntity, "Force Private enabled", err)
+ return err
+ }
+
+ repo.IsPrivate = *opts.Private
+ }
+
+ if opts.Template != nil {
+ repo.IsTemplate = *opts.Template
+ }
+
+ if ctx.Repo.GitRepo == nil && !repo.IsEmpty {
+ var err error
+ ctx.Repo.GitRepo, err = gitrepo.OpenRepository(ctx, repo)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "Unable to OpenRepository", err)
+ return err
+ }
+ defer ctx.Repo.GitRepo.Close()
+ }
+
+ // Default branch only updated if changed and exist or the repository is empty
+ if opts.DefaultBranch != nil && repo.DefaultBranch != *opts.DefaultBranch && (repo.IsEmpty || ctx.Repo.GitRepo.IsBranchExist(*opts.DefaultBranch)) {
+ if !repo.IsEmpty {
+ if err := gitrepo.SetDefaultBranch(ctx, ctx.Repo.Repository, *opts.DefaultBranch); err != nil {
+ if !git.IsErrUnsupportedVersion(err) {
+ ctx.Error(http.StatusInternalServerError, "SetDefaultBranch", err)
+ return err
+ }
+ }
+ }
+ repo.DefaultBranch = *opts.DefaultBranch
+ }
+
+ // Wiki branch is updated if changed
+ if opts.WikiBranch != nil && repo.WikiBranch != *opts.WikiBranch {
+ if err := wiki_service.NormalizeWikiBranch(ctx, repo, *opts.WikiBranch); err != nil {
+ ctx.Error(http.StatusInternalServerError, "NormalizeWikiBranch", err)
+ return err
+ }
+ // While NormalizeWikiBranch updates the db, we need to update *this*
+ // instance of `repo`, so that the `UpdateRepository` below will not
+ // reset the branch back.
+ repo.WikiBranch = *opts.WikiBranch
+ }
+
+ if err := repo_service.UpdateRepository(ctx, repo, visibilityChanged); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateRepository", err)
+ return err
+ }
+
+ log.Trace("Repository basic settings updated: %s/%s", owner.Name, repo.Name)
+ return nil
+}
+
+// updateRepoUnits updates repo units: Issue settings, Wiki settings, PR settings
+func updateRepoUnits(ctx *context.APIContext, opts api.EditRepoOption) error {
+ owner := ctx.Repo.Owner
+ repo := ctx.Repo.Repository
+
+ var units []repo_model.RepoUnit
+ var deleteUnitTypes []unit_model.Type
+
+ currHasIssues := repo.UnitEnabled(ctx, unit_model.TypeIssues)
+ newHasIssues := currHasIssues
+ if opts.HasIssues != nil {
+ newHasIssues = *opts.HasIssues
+ }
+ if currHasIssues || newHasIssues {
+ if newHasIssues && opts.ExternalTracker != nil && !unit_model.TypeExternalTracker.UnitGlobalDisabled() {
+ // Check that values are valid
+ if !validation.IsValidExternalURL(opts.ExternalTracker.ExternalTrackerURL) {
+ err := fmt.Errorf("External tracker URL not valid")
+ ctx.Error(http.StatusUnprocessableEntity, "Invalid external tracker URL", err)
+ return err
+ }
+ if len(opts.ExternalTracker.ExternalTrackerFormat) != 0 && !validation.IsValidExternalTrackerURLFormat(opts.ExternalTracker.ExternalTrackerFormat) {
+ err := fmt.Errorf("External tracker URL format not valid")
+ ctx.Error(http.StatusUnprocessableEntity, "Invalid external tracker URL format", err)
+ return err
+ }
+
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeExternalTracker,
+ Config: &repo_model.ExternalTrackerConfig{
+ ExternalTrackerURL: opts.ExternalTracker.ExternalTrackerURL,
+ ExternalTrackerFormat: opts.ExternalTracker.ExternalTrackerFormat,
+ ExternalTrackerStyle: opts.ExternalTracker.ExternalTrackerStyle,
+ ExternalTrackerRegexpPattern: opts.ExternalTracker.ExternalTrackerRegexpPattern,
+ },
+ })
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeIssues)
+ } else if newHasIssues && opts.ExternalTracker == nil && !unit_model.TypeIssues.UnitGlobalDisabled() {
+ // Default to built-in tracker
+ var config *repo_model.IssuesConfig
+
+ if opts.InternalTracker != nil {
+ config = &repo_model.IssuesConfig{
+ EnableTimetracker: opts.InternalTracker.EnableTimeTracker,
+ AllowOnlyContributorsToTrackTime: opts.InternalTracker.AllowOnlyContributorsToTrackTime,
+ EnableDependencies: opts.InternalTracker.EnableIssueDependencies,
+ }
+ } else if unit, err := repo.GetUnit(ctx, unit_model.TypeIssues); err != nil {
+ // Unit type doesn't exist so we make a new config file with default values
+ config = &repo_model.IssuesConfig{
+ EnableTimetracker: true,
+ AllowOnlyContributorsToTrackTime: true,
+ EnableDependencies: true,
+ }
+ } else {
+ config = unit.IssuesConfig()
+ }
+
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeIssues,
+ Config: config,
+ })
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeExternalTracker)
+ } else if !newHasIssues {
+ if !unit_model.TypeExternalTracker.UnitGlobalDisabled() {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeExternalTracker)
+ }
+ if !unit_model.TypeIssues.UnitGlobalDisabled() {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeIssues)
+ }
+ }
+ }
+
+ currHasWiki := repo.UnitEnabled(ctx, unit_model.TypeWiki)
+ newHasWiki := currHasWiki
+ if opts.HasWiki != nil {
+ newHasWiki = *opts.HasWiki
+ }
+ if currHasWiki || newHasWiki {
+ wikiPermissions := repo.MustGetUnit(ctx, unit_model.TypeWiki).DefaultPermissions
+ if opts.GloballyEditableWiki != nil {
+ if *opts.GloballyEditableWiki {
+ wikiPermissions = repo_model.UnitAccessModeWrite
+ } else {
+ wikiPermissions = repo_model.UnitAccessModeRead
+ }
+ }
+
+ if newHasWiki && opts.ExternalWiki != nil && !unit_model.TypeExternalWiki.UnitGlobalDisabled() {
+ // Check that values are valid
+ if !validation.IsValidExternalURL(opts.ExternalWiki.ExternalWikiURL) {
+ err := fmt.Errorf("External wiki URL not valid")
+ ctx.Error(http.StatusUnprocessableEntity, "", "Invalid external wiki URL")
+ return err
+ }
+
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeExternalWiki,
+ Config: &repo_model.ExternalWikiConfig{
+ ExternalWikiURL: opts.ExternalWiki.ExternalWikiURL,
+ },
+ })
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeWiki)
+ } else if newHasWiki && opts.ExternalWiki == nil && !unit_model.TypeWiki.UnitGlobalDisabled() {
+ config := &repo_model.UnitConfig{}
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeWiki,
+ Config: config,
+ DefaultPermissions: wikiPermissions,
+ })
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeExternalWiki)
+ } else if !newHasWiki {
+ if !unit_model.TypeExternalWiki.UnitGlobalDisabled() {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeExternalWiki)
+ }
+ if !unit_model.TypeWiki.UnitGlobalDisabled() {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeWiki)
+ }
+ } else if *opts.GloballyEditableWiki {
+ config := &repo_model.UnitConfig{}
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeWiki,
+ Config: config,
+ DefaultPermissions: wikiPermissions,
+ })
+ }
+ }
+
+ currHasPullRequests := repo.UnitEnabled(ctx, unit_model.TypePullRequests)
+ newHasPullRequests := currHasPullRequests
+ if opts.HasPullRequests != nil {
+ newHasPullRequests = *opts.HasPullRequests
+ }
+ if currHasPullRequests || newHasPullRequests {
+ if newHasPullRequests && !unit_model.TypePullRequests.UnitGlobalDisabled() {
+ // We do allow setting individual PR settings through the API, so
+ // we get the config settings and then set them
+ // if those settings were provided in the opts.
+ unit, err := repo.GetUnit(ctx, unit_model.TypePullRequests)
+ var config *repo_model.PullRequestsConfig
+ if err != nil {
+ // Unit type doesn't exist so we make a new config file with default values
+ config = &repo_model.PullRequestsConfig{
+ IgnoreWhitespaceConflicts: false,
+ AllowMerge: true,
+ AllowRebase: true,
+ AllowRebaseMerge: true,
+ AllowSquash: true,
+ AllowFastForwardOnly: true,
+ AllowManualMerge: true,
+ AutodetectManualMerge: false,
+ AllowRebaseUpdate: true,
+ DefaultDeleteBranchAfterMerge: false,
+ DefaultMergeStyle: repo_model.MergeStyleMerge,
+ DefaultAllowMaintainerEdit: false,
+ }
+ } else {
+ config = unit.PullRequestsConfig()
+ }
+
+ if opts.IgnoreWhitespaceConflicts != nil {
+ config.IgnoreWhitespaceConflicts = *opts.IgnoreWhitespaceConflicts
+ }
+ if opts.AllowMerge != nil {
+ config.AllowMerge = *opts.AllowMerge
+ }
+ if opts.AllowRebase != nil {
+ config.AllowRebase = *opts.AllowRebase
+ }
+ if opts.AllowRebaseMerge != nil {
+ config.AllowRebaseMerge = *opts.AllowRebaseMerge
+ }
+ if opts.AllowSquash != nil {
+ config.AllowSquash = *opts.AllowSquash
+ }
+ if opts.AllowFastForwardOnly != nil {
+ config.AllowFastForwardOnly = *opts.AllowFastForwardOnly
+ }
+ if opts.AllowManualMerge != nil {
+ config.AllowManualMerge = *opts.AllowManualMerge
+ }
+ if opts.AutodetectManualMerge != nil {
+ config.AutodetectManualMerge = *opts.AutodetectManualMerge
+ }
+ if opts.AllowRebaseUpdate != nil {
+ config.AllowRebaseUpdate = *opts.AllowRebaseUpdate
+ }
+ if opts.DefaultDeleteBranchAfterMerge != nil {
+ config.DefaultDeleteBranchAfterMerge = *opts.DefaultDeleteBranchAfterMerge
+ }
+ if opts.DefaultMergeStyle != nil {
+ config.DefaultMergeStyle = repo_model.MergeStyle(*opts.DefaultMergeStyle)
+ }
+ if opts.DefaultAllowMaintainerEdit != nil {
+ config.DefaultAllowMaintainerEdit = *opts.DefaultAllowMaintainerEdit
+ }
+
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypePullRequests,
+ Config: config,
+ })
+ } else if !newHasPullRequests && !unit_model.TypePullRequests.UnitGlobalDisabled() {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypePullRequests)
+ }
+ }
+
+ if opts.HasProjects != nil && !unit_model.TypeProjects.UnitGlobalDisabled() {
+ if *opts.HasProjects {
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeProjects,
+ })
+ } else {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeProjects)
+ }
+ }
+
+ if opts.HasReleases != nil && !unit_model.TypeReleases.UnitGlobalDisabled() {
+ if *opts.HasReleases {
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeReleases,
+ })
+ } else {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeReleases)
+ }
+ }
+
+ if opts.HasPackages != nil && !unit_model.TypePackages.UnitGlobalDisabled() {
+ if *opts.HasPackages {
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypePackages,
+ })
+ } else {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypePackages)
+ }
+ }
+
+ if opts.HasActions != nil && !unit_model.TypeActions.UnitGlobalDisabled() {
+ if *opts.HasActions {
+ units = append(units, repo_model.RepoUnit{
+ RepoID: repo.ID,
+ Type: unit_model.TypeActions,
+ })
+ } else {
+ deleteUnitTypes = append(deleteUnitTypes, unit_model.TypeActions)
+ }
+ }
+
+ if len(units)+len(deleteUnitTypes) > 0 {
+ if err := repo_service.UpdateRepositoryUnits(ctx, repo, units, deleteUnitTypes); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateRepositoryUnits", err)
+ return err
+ }
+ }
+
+ log.Trace("Repository advanced settings updated: %s/%s", owner.Name, repo.Name)
+ return nil
+}
+
+// updateRepoArchivedState updates repo's archive state
+func updateRepoArchivedState(ctx *context.APIContext, opts api.EditRepoOption) error {
+ repo := ctx.Repo.Repository
+ // archive / un-archive
+ if opts.Archived != nil {
+ if repo.IsMirror {
+ err := fmt.Errorf("repo is a mirror, cannot archive/un-archive")
+ ctx.Error(http.StatusUnprocessableEntity, err.Error(), err)
+ return err
+ }
+ if *opts.Archived {
+ if err := repo_model.SetArchiveRepoState(ctx, repo, *opts.Archived); err != nil {
+ log.Error("Tried to archive a repo: %s", err)
+ ctx.Error(http.StatusInternalServerError, "ArchiveRepoState", err)
+ return err
+ }
+ if err := actions_model.CleanRepoScheduleTasks(ctx, repo, true); err != nil {
+ log.Error("CleanRepoScheduleTasks for archived repo %s/%s: %v", ctx.Repo.Owner.Name, repo.Name, err)
+ }
+ log.Trace("Repository was archived: %s/%s", ctx.Repo.Owner.Name, repo.Name)
+ } else {
+ if err := repo_model.SetArchiveRepoState(ctx, repo, *opts.Archived); err != nil {
+ log.Error("Tried to un-archive a repo: %s", err)
+ ctx.Error(http.StatusInternalServerError, "ArchiveRepoState", err)
+ return err
+ }
+ if ctx.Repo.Repository.UnitEnabled(ctx, unit_model.TypeActions) {
+ if err := actions_service.DetectAndHandleSchedules(ctx, repo); err != nil {
+ log.Error("DetectAndHandleSchedules for un-archived repo %s/%s: %v", ctx.Repo.Owner.Name, repo.Name, err)
+ }
+ }
+ log.Trace("Repository was un-archived: %s/%s", ctx.Repo.Owner.Name, repo.Name)
+ }
+ }
+ return nil
+}
+
+// updateMirror updates a repo's mirror Interval and EnablePrune
+func updateMirror(ctx *context.APIContext, opts api.EditRepoOption) error {
+ repo := ctx.Repo.Repository
+
+ // Skip this update if the repo is not a mirror, do not return error.
+ // Because reporting errors only makes the logic more complex&fragile, it doesn't really help end users.
+ if !repo.IsMirror {
+ return nil
+ }
+
+ // get the mirror from the repo
+ mirror, err := repo_model.GetMirrorByRepoID(ctx, repo.ID)
+ if err != nil {
+ log.Error("Failed to get mirror: %s", err)
+ ctx.Error(http.StatusInternalServerError, "MirrorInterval", err)
+ return err
+ }
+
+ // update MirrorInterval
+ if opts.MirrorInterval != nil {
+ // MirrorInterval should be a duration
+ interval, err := time.ParseDuration(*opts.MirrorInterval)
+ if err != nil {
+ log.Error("Wrong format for MirrorInternal Sent: %s", err)
+ ctx.Error(http.StatusUnprocessableEntity, "MirrorInterval", err)
+ return err
+ }
+
+ // Ensure the provided duration is not too short
+ if interval != 0 && interval < setting.Mirror.MinInterval {
+ err := fmt.Errorf("invalid mirror interval: %s is below minimum interval: %s", interval, setting.Mirror.MinInterval)
+ ctx.Error(http.StatusUnprocessableEntity, "MirrorInterval", err)
+ return err
+ }
+
+ mirror.Interval = interval
+ mirror.Repo = repo
+ mirror.ScheduleNextUpdate()
+ log.Trace("Repository %s Mirror[%d] Set Interval: %s NextUpdateUnix: %s", repo.FullName(), mirror.ID, interval, mirror.NextUpdateUnix)
+ }
+
+ // update EnablePrune
+ if opts.EnablePrune != nil {
+ mirror.EnablePrune = *opts.EnablePrune
+ log.Trace("Repository %s Mirror[%d] Set EnablePrune: %t", repo.FullName(), mirror.ID, mirror.EnablePrune)
+ }
+
+ // finally update the mirror in the DB
+ if err := repo_model.UpdateMirror(ctx, mirror); err != nil {
+ log.Error("Failed to Set Mirror Interval: %s", err)
+ ctx.Error(http.StatusUnprocessableEntity, "MirrorInterval", err)
+ return err
+ }
+
+ return nil
+}
+
+// Delete one repository
+func Delete(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo} repository repoDelete
+ // ---
+ // summary: Delete a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to delete
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ owner := ctx.Repo.Owner
+ repo := ctx.Repo.Repository
+
+ canDelete, err := repo_module.CanUserDelete(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CanUserDelete", err)
+ return
+ } else if !canDelete {
+ ctx.Error(http.StatusForbidden, "", "Given user is not owner of organization.")
+ return
+ }
+
+ if ctx.Repo.GitRepo != nil {
+ ctx.Repo.GitRepo.Close()
+ }
+
+ if err := repo_service.DeleteRepository(ctx, ctx.Doer, repo, true); err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteRepository", err)
+ return
+ }
+
+ log.Trace("Repository deleted: %s/%s", owner.Name, repo.Name)
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetIssueTemplates returns the issue templates for a repository
+func GetIssueTemplates(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issue_templates repository repoGetIssueTemplates
+ // ---
+ // summary: Get available issue templates for a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/IssueTemplates"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ ret, _ := issue.GetTemplatesFromDefaultBranch(ctx.Repo.Repository, ctx.Repo.GitRepo)
+ ctx.JSON(http.StatusOK, ret)
+}
+
+// GetIssueConfig returns the issue config for a repo
+func GetIssueConfig(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issue_config repository repoGetIssueConfig
+ // ---
+ // summary: Returns the issue config for a repo
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepoIssueConfig"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ issueConfig, _ := issue.GetTemplateConfigFromDefaultBranch(ctx.Repo.Repository, ctx.Repo.GitRepo)
+ ctx.JSON(http.StatusOK, issueConfig)
+}
+
+// ValidateIssueConfig returns validation errors for the issue config
+func ValidateIssueConfig(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/issue_config/validate repository repoValidateIssueConfig
+ // ---
+ // summary: Returns the validation information for a issue config
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepoIssueConfigValidation"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ _, err := issue.GetTemplateConfigFromDefaultBranch(ctx.Repo.Repository, ctx.Repo.GitRepo)
+
+ if err == nil {
+ ctx.JSON(http.StatusOK, api.IssueConfigValidation{Valid: true, Message: ""})
+ } else {
+ ctx.JSON(http.StatusOK, api.IssueConfigValidation{Valid: false, Message: err.Error()})
+ }
+}
+
+func ListRepoActivityFeeds(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/activities/feeds repository repoListActivityFeeds
+ // ---
+ // summary: List a repository's activity feeds
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: date
+ // in: query
+ // description: the date of the activities to be found
+ // type: string
+ // format: date
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActivityFeedsList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ opts := activities_model.GetFeedsOptions{
+ RequestedRepo: ctx.Repo.Repository,
+ OnlyPerformedByActor: true,
+ Actor: ctx.Doer,
+ IncludePrivate: true,
+ Date: ctx.FormString("date"),
+ ListOptions: listOptions,
+ }
+
+ feeds, count, err := activities_model.GetFeeds(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetFeeds", err)
+ return
+ }
+ ctx.SetTotalCountHeader(count)
+
+ ctx.JSON(http.StatusOK, convert.ToActivities(ctx, feeds, ctx.Doer))
+}
diff --git a/routers/api/v1/repo/repo_test.go b/routers/api/v1/repo/repo_test.go
new file mode 100644
index 0000000..8d6ca9e
--- /dev/null
+++ b/routers/api/v1/repo/repo_test.go
@@ -0,0 +1,86 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "testing"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/contexttest"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRepoEdit(t *testing.T) {
+ unittest.PrepareTestEnv(t)
+
+ ctx, _ := contexttest.MockAPIContext(t, "user2/repo1")
+ contexttest.LoadRepo(t, ctx, 1)
+ contexttest.LoadUser(t, ctx, 2)
+ ctx.Repo.Owner = ctx.Doer
+ description := "new description"
+ website := "http://wwww.newwebsite.com"
+ private := true
+ hasIssues := false
+ hasWiki := false
+ defaultBranch := "master"
+ hasPullRequests := true
+ ignoreWhitespaceConflicts := true
+ allowMerge := false
+ allowRebase := false
+ allowRebaseMerge := false
+ allowSquashMerge := false
+ allowFastForwardOnlyMerge := false
+ archived := true
+ opts := api.EditRepoOption{
+ Name: &ctx.Repo.Repository.Name,
+ Description: &description,
+ Website: &website,
+ Private: &private,
+ HasIssues: &hasIssues,
+ HasWiki: &hasWiki,
+ DefaultBranch: &defaultBranch,
+ HasPullRequests: &hasPullRequests,
+ IgnoreWhitespaceConflicts: &ignoreWhitespaceConflicts,
+ AllowMerge: &allowMerge,
+ AllowRebase: &allowRebase,
+ AllowRebaseMerge: &allowRebaseMerge,
+ AllowSquash: &allowSquashMerge,
+ AllowFastForwardOnly: &allowFastForwardOnlyMerge,
+ Archived: &archived,
+ }
+
+ web.SetForm(ctx, &opts)
+ Edit(ctx)
+
+ assert.EqualValues(t, http.StatusOK, ctx.Resp.Status())
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{
+ ID: 1,
+ }, unittest.Cond("name = ? AND is_archived = 1", *opts.Name))
+}
+
+func TestRepoEditNameChange(t *testing.T) {
+ unittest.PrepareTestEnv(t)
+
+ ctx, _ := contexttest.MockAPIContext(t, "user2/repo1")
+ contexttest.LoadRepo(t, ctx, 1)
+ contexttest.LoadUser(t, ctx, 2)
+ ctx.Repo.Owner = ctx.Doer
+ name := "newname"
+ opts := api.EditRepoOption{
+ Name: &name,
+ }
+
+ web.SetForm(ctx, &opts)
+ Edit(ctx)
+ assert.EqualValues(t, http.StatusOK, ctx.Resp.Status())
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{
+ ID: 1,
+ }, unittest.Cond("name = ?", opts.Name))
+}
diff --git a/routers/api/v1/repo/star.go b/routers/api/v1/repo/star.go
new file mode 100644
index 0000000..99676de
--- /dev/null
+++ b/routers/api/v1/repo/star.go
@@ -0,0 +1,60 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListStargazers list a repository's stargazers
+func ListStargazers(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/stargazers repository repoListStargazers
+ // ---
+ // summary: List a repo's stargazers
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ stargazers, err := repo_model.GetStargazers(ctx, ctx.Repo.Repository, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetStargazers", err)
+ return
+ }
+ users := make([]*api.User, len(stargazers))
+ for i, stargazer := range stargazers {
+ users[i] = convert.ToUser(ctx, stargazer, ctx.Doer)
+ }
+
+ ctx.SetTotalCountHeader(int64(ctx.Repo.Repository.NumStars))
+ ctx.JSON(http.StatusOK, users)
+}
diff --git a/routers/api/v1/repo/status.go b/routers/api/v1/repo/status.go
new file mode 100644
index 0000000..9e36ea0
--- /dev/null
+++ b/routers/api/v1/repo/status.go
@@ -0,0 +1,282 @@
+// Copyright 2017 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ commitstatus_service "code.gitea.io/gitea/services/repository/commitstatus"
+)
+
+// NewCommitStatus creates a new CommitStatus
+func NewCommitStatus(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/statuses/{sha} repository repoCreateStatus
+ // ---
+ // summary: Create a commit status
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: sha of the commit
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateStatusOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/CommitStatus"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.CreateStatusOption)
+ sha := ctx.Params("sha")
+ if len(sha) == 0 {
+ ctx.Error(http.StatusBadRequest, "sha not given", nil)
+ return
+ }
+ status := &git_model.CommitStatus{
+ State: form.State,
+ TargetURL: form.TargetURL,
+ Description: form.Description,
+ Context: form.Context,
+ }
+ if err := commitstatus_service.CreateCommitStatus(ctx, ctx.Repo.Repository, ctx.Doer, sha, status); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateCommitStatus", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToCommitStatus(ctx, status))
+}
+
+// GetCommitStatuses returns all statuses for any given commit hash
+func GetCommitStatuses(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/statuses/{sha} repository repoListStatuses
+ // ---
+ // summary: Get a commit's statuses
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: sha of the commit
+ // type: string
+ // required: true
+ // - name: sort
+ // in: query
+ // description: type of sort
+ // type: string
+ // enum: [oldest, recentupdate, leastupdate, leastindex, highestindex]
+ // required: false
+ // - name: state
+ // in: query
+ // description: type of state
+ // type: string
+ // enum: [pending, success, error, failure, warning]
+ // required: false
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CommitStatusList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ getCommitStatuses(ctx, ctx.Params("sha"))
+}
+
+// GetCommitStatusesByRef returns all statuses for any given commit ref
+func GetCommitStatusesByRef(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/commits/{ref}/statuses repository repoListStatusesByRef
+ // ---
+ // summary: Get a commit's statuses, by branch/tag/commit reference
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: ref
+ // in: path
+ // description: name of branch/tag/commit
+ // type: string
+ // required: true
+ // - name: sort
+ // in: query
+ // description: type of sort
+ // type: string
+ // enum: [oldest, recentupdate, leastupdate, leastindex, highestindex]
+ // required: false
+ // - name: state
+ // in: query
+ // description: type of state
+ // type: string
+ // enum: [pending, success, error, failure, warning]
+ // required: false
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CommitStatusList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ filter := utils.ResolveRefOrSha(ctx, ctx.Params("ref"))
+ if ctx.Written() {
+ return
+ }
+
+ getCommitStatuses(ctx, filter) // By default filter is maybe the raw SHA
+}
+
+func getCommitStatuses(ctx *context.APIContext, sha string) {
+ if len(sha) == 0 {
+ ctx.Error(http.StatusBadRequest, "ref/sha not given", nil)
+ return
+ }
+ sha = utils.MustConvertToSHA1(ctx.Base, ctx.Repo, sha)
+ repo := ctx.Repo.Repository
+
+ listOptions := utils.GetListOptions(ctx)
+
+ statuses, maxResults, err := db.FindAndCount[git_model.CommitStatus](ctx, &git_model.CommitStatusOptions{
+ ListOptions: listOptions,
+ RepoID: repo.ID,
+ SHA: sha,
+ SortType: ctx.FormTrim("sort"),
+ State: ctx.FormTrim("state"),
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommitStatuses", fmt.Errorf("GetCommitStatuses[%s, %s, %d]: %w", repo.FullName(), sha, ctx.FormInt("page"), err))
+ return
+ }
+
+ apiStatuses := make([]*api.CommitStatus, 0, len(statuses))
+ for _, status := range statuses {
+ apiStatuses = append(apiStatuses, convert.ToCommitStatus(ctx, status))
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+
+ ctx.JSON(http.StatusOK, apiStatuses)
+}
+
+// GetCombinedCommitStatusByRef returns the combined status for any given commit hash
+func GetCombinedCommitStatusByRef(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/commits/{ref}/status repository repoGetCombinedStatusByRef
+ // ---
+ // summary: Get a commit's combined status, by branch/tag/commit reference
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: ref
+ // in: path
+ // description: name of branch/tag/commit
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/CombinedStatus"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ sha := utils.ResolveRefOrSha(ctx, ctx.Params("ref"))
+ if ctx.Written() {
+ return
+ }
+
+ repo := ctx.Repo.Repository
+
+ statuses, count, err := git_model.GetLatestCommitStatus(ctx, repo.ID, sha, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetLatestCommitStatus", fmt.Errorf("GetLatestCommitStatus[%s, %s]: %w", repo.FullName(), sha, err))
+ return
+ }
+
+ if len(statuses) == 0 {
+ ctx.JSON(http.StatusOK, &api.CombinedStatus{})
+ return
+ }
+
+ combiStatus := convert.ToCombinedStatus(ctx, statuses, convert.ToRepo(ctx, repo, ctx.Repo.Permission))
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, combiStatus)
+}
diff --git a/routers/api/v1/repo/subscriber.go b/routers/api/v1/repo/subscriber.go
new file mode 100644
index 0000000..8584182
--- /dev/null
+++ b/routers/api/v1/repo/subscriber.go
@@ -0,0 +1,60 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListSubscribers list a repo's subscribers (i.e. watchers)
+func ListSubscribers(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/subscribers repository repoListSubscribers
+ // ---
+ // summary: List a repo's watchers
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ subscribers, err := repo_model.GetRepoWatchers(ctx, ctx.Repo.Repository.ID, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetRepoWatchers", err)
+ return
+ }
+ users := make([]*api.User, len(subscribers))
+ for i, subscriber := range subscribers {
+ users[i] = convert.ToUser(ctx, subscriber, ctx.Doer)
+ }
+
+ ctx.SetTotalCountHeader(int64(ctx.Repo.Repository.NumWatches))
+ ctx.JSON(http.StatusOK, users)
+}
diff --git a/routers/api/v1/repo/tag.go b/routers/api/v1/repo/tag.go
new file mode 100644
index 0000000..7dbdd1f
--- /dev/null
+++ b/routers/api/v1/repo/tag.go
@@ -0,0 +1,668 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "code.gitea.io/gitea/models"
+ git_model "code.gitea.io/gitea/models/git"
+ "code.gitea.io/gitea/models/organization"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ releaseservice "code.gitea.io/gitea/services/release"
+)
+
+// ListTags list all the tags of a repository
+func ListTags(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/tags repository repoListTags
+ // ---
+ // summary: List a repository's tags
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results, default maximum page size is 50
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TagList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listOpts := utils.GetListOptions(ctx)
+
+ tags, total, err := ctx.Repo.GitRepo.GetTagInfos(listOpts.Page, listOpts.PageSize)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTags", err)
+ return
+ }
+
+ apiTags := make([]*api.Tag, len(tags))
+ for i := range tags {
+ tags[i].ArchiveDownloadCount, err = repo_model.GetArchiveDownloadCountForTagName(ctx, ctx.Repo.Repository.ID, tags[i].Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTagArchiveDownloadCountForName", err)
+ return
+ }
+
+ apiTags[i] = convert.ToTag(ctx.Repo.Repository, tags[i])
+ }
+
+ ctx.SetTotalCountHeader(int64(total))
+ ctx.JSON(http.StatusOK, &apiTags)
+}
+
+// GetAnnotatedTag get the tag of a repository.
+func GetAnnotatedTag(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/tags/{sha} repository GetAnnotatedTag
+ // ---
+ // summary: Gets the tag object of an annotated tag (not lightweight tags)
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: sha of the tag. The Git tags API only supports annotated tag objects, not lightweight tags.
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/AnnotatedTag"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ sha := ctx.Params("sha")
+ if len(sha) == 0 {
+ ctx.Error(http.StatusBadRequest, "", "SHA not provided")
+ return
+ }
+
+ if tag, err := ctx.Repo.GitRepo.GetAnnotatedTag(sha); err != nil {
+ ctx.Error(http.StatusBadRequest, "GetAnnotatedTag", err)
+ } else {
+ commit, err := tag.Commit(ctx.Repo.GitRepo)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "GetAnnotatedTag", err)
+ }
+
+ tag.ArchiveDownloadCount, err = repo_model.GetArchiveDownloadCountForTagName(ctx, ctx.Repo.Repository.ID, tag.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTagArchiveDownloadCountForName", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToAnnotatedTag(ctx, ctx.Repo.Repository, tag, commit))
+ }
+}
+
+// GetTag get the tag of a repository
+func GetTag(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/tags/{tag} repository repoGetTag
+ // ---
+ // summary: Get the tag of a repository by tag name
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: tag
+ // in: path
+ // description: name of tag
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Tag"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ tagName := ctx.Params("*")
+
+ tag, err := ctx.Repo.GitRepo.GetTag(tagName)
+ if err != nil {
+ ctx.NotFound(tagName)
+ return
+ }
+
+ tag.ArchiveDownloadCount, err = repo_model.GetArchiveDownloadCountForTagName(ctx, ctx.Repo.Repository.ID, tag.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTagArchiveDownloadCountForName", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToTag(ctx.Repo.Repository, tag))
+}
+
+// CreateTag create a new git tag in a repository
+func CreateTag(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/tags repository repoCreateTag
+ // ---
+ // summary: Create a new git tag in a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateTagOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Tag"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "405":
+ // "$ref": "#/responses/empty"
+ // "409":
+ // "$ref": "#/responses/conflict"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ form := web.GetForm(ctx).(*api.CreateTagOption)
+
+ // If target is not provided use default branch
+ if len(form.Target) == 0 {
+ form.Target = ctx.Repo.Repository.DefaultBranch
+ }
+
+ commit, err := ctx.Repo.GitRepo.GetCommit(form.Target)
+ if err != nil {
+ ctx.Error(http.StatusNotFound, "target not found", fmt.Errorf("target not found: %w", err))
+ return
+ }
+
+ if err := releaseservice.CreateNewTag(ctx, ctx.Doer, ctx.Repo.Repository, commit.ID.String(), form.TagName, form.Message); err != nil {
+ if models.IsErrTagAlreadyExists(err) {
+ ctx.Error(http.StatusConflict, "tag exist", err)
+ return
+ }
+ if models.IsErrProtectedTagName(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "CreateNewTag", "user not allowed to create protected tag")
+ return
+ }
+
+ ctx.InternalServerError(err)
+ return
+ }
+
+ tag, err := ctx.Repo.GitRepo.GetTag(form.TagName)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ tag.ArchiveDownloadCount, err = repo_model.GetArchiveDownloadCountForTagName(ctx, ctx.Repo.Repository.ID, tag.Name)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetTagArchiveDownloadCountForName", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToTag(ctx.Repo.Repository, tag))
+}
+
+// DeleteTag delete a specific tag of in a repository by name
+func DeleteTag(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/tags/{tag} repository repoDeleteTag
+ // ---
+ // summary: Delete a repository's tag by name
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: tag
+ // in: path
+ // description: name of tag to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "405":
+ // "$ref": "#/responses/empty"
+ // "409":
+ // "$ref": "#/responses/conflict"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+ tagName := ctx.Params("*")
+
+ tag, err := repo_model.GetRelease(ctx, ctx.Repo.Repository.ID, tagName)
+ if err != nil {
+ if repo_model.IsErrReleaseNotExist(err) {
+ ctx.NotFound()
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetRelease", err)
+ return
+ }
+
+ if !tag.IsTag {
+ ctx.Error(http.StatusConflict, "IsTag", errors.New("a tag attached to a release cannot be deleted directly"))
+ return
+ }
+
+ if err = releaseservice.DeleteReleaseByID(ctx, ctx.Repo.Repository, tag, ctx.Doer, true); err != nil {
+ if models.IsErrProtectedTagName(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "delTag", "user not allowed to delete protected tag")
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "DeleteReleaseByID", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// ListTagProtection lists tag protections for a repo
+func ListTagProtection(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/tag_protections repository repoListTagProtection
+ // ---
+ // summary: List tag protections for a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TagProtectionList"
+
+ repo := ctx.Repo.Repository
+ pts, err := git_model.GetProtectedTags(ctx, repo.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedTags", err)
+ return
+ }
+ apiPts := make([]*api.TagProtection, len(pts))
+ for i := range pts {
+ apiPts[i] = convert.ToTagProtection(ctx, pts[i], repo)
+ }
+
+ ctx.JSON(http.StatusOK, apiPts)
+}
+
+// GetTagProtection gets a tag protection
+func GetTagProtection(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/tag_protections/{id} repository repoGetTagProtection
+ // ---
+ // summary: Get a specific tag protection for the repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of the tag protect to get
+ // type: integer
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TagProtection"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo.Repository
+ id := ctx.ParamsInt64(":id")
+ pt, err := git_model.GetProtectedTagByID(ctx, id)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedTagByID", err)
+ return
+ }
+
+ if pt == nil || repo.ID != pt.RepoID {
+ ctx.NotFound()
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToTagProtection(ctx, pt, repo))
+}
+
+// CreateTagProtection creates a tag protection for a repo
+func CreateTagProtection(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/tag_protections repository repoCreateTagProtection
+ // ---
+ // summary: Create a tag protections for a repository
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateTagProtectionOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/TagProtection"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ form := web.GetForm(ctx).(*api.CreateTagProtectionOption)
+ repo := ctx.Repo.Repository
+
+ namePattern := strings.TrimSpace(form.NamePattern)
+ if namePattern == "" {
+ ctx.Error(http.StatusBadRequest, "name_pattern are empty", "name_pattern are empty")
+ return
+ }
+
+ if len(form.WhitelistUsernames) == 0 && len(form.WhitelistTeams) == 0 {
+ ctx.Error(http.StatusBadRequest, "both whitelist_usernames and whitelist_teams are empty", "both whitelist_usernames and whitelist_teams are empty")
+ return
+ }
+
+ pt, err := git_model.GetProtectedTagByNamePattern(ctx, repo.ID, namePattern)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectTagOfRepo", err)
+ return
+ } else if pt != nil {
+ ctx.Error(http.StatusForbidden, "Create tag protection", "Tag protection already exist")
+ return
+ }
+
+ var whitelistUsers, whitelistTeams []int64
+ whitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.WhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+
+ if repo.Owner.IsOrganization() {
+ whitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.WhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ }
+
+ protectTag := &git_model.ProtectedTag{
+ RepoID: repo.ID,
+ NamePattern: strings.TrimSpace(namePattern),
+ AllowlistUserIDs: whitelistUsers,
+ AllowlistTeamIDs: whitelistTeams,
+ }
+ if err := git_model.InsertProtectedTag(ctx, protectTag); err != nil {
+ ctx.Error(http.StatusInternalServerError, "InsertProtectedTag", err)
+ return
+ }
+
+ pt, err = git_model.GetProtectedTagByID(ctx, protectTag.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedTagByID", err)
+ return
+ }
+
+ if pt == nil || pt.RepoID != repo.ID {
+ ctx.Error(http.StatusInternalServerError, "New tag protection not found", err)
+ return
+ }
+
+ ctx.JSON(http.StatusCreated, convert.ToTagProtection(ctx, pt, repo))
+}
+
+// EditTagProtection edits a tag protection for a repo
+func EditTagProtection(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/tag_protections/{id} repository repoEditTagProtection
+ // ---
+ // summary: Edit a tag protections for a repository. Only fields that are set will be changed
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of protected tag
+ // type: integer
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditTagProtectionOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TagProtection"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ repo := ctx.Repo.Repository
+ form := web.GetForm(ctx).(*api.EditTagProtectionOption)
+
+ id := ctx.ParamsInt64(":id")
+ pt, err := git_model.GetProtectedTagByID(ctx, id)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedTagByID", err)
+ return
+ }
+
+ if pt == nil || pt.RepoID != repo.ID {
+ ctx.NotFound()
+ return
+ }
+
+ if form.NamePattern != nil {
+ pt.NamePattern = *form.NamePattern
+ }
+
+ var whitelistUsers, whitelistTeams []int64
+ if form.WhitelistTeams != nil {
+ if repo.Owner.IsOrganization() {
+ whitelistTeams, err = organization.GetTeamIDsByNames(ctx, repo.OwnerID, form.WhitelistTeams, false)
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "Team does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetTeamIDsByNames", err)
+ return
+ }
+ }
+ pt.AllowlistTeamIDs = whitelistTeams
+ }
+
+ if form.WhitelistUsernames != nil {
+ whitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.WhitelistUsernames, false)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "User does not exist", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "GetUserIDsByNames", err)
+ return
+ }
+ pt.AllowlistUserIDs = whitelistUsers
+ }
+
+ err = git_model.UpdateProtectedTag(ctx, pt)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateProtectedTag", err)
+ return
+ }
+
+ pt, err = git_model.GetProtectedTagByID(ctx, id)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedTagByID", err)
+ return
+ }
+
+ if pt == nil || pt.RepoID != repo.ID {
+ ctx.Error(http.StatusInternalServerError, "New tag protection not found", "New tag protection not found")
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToTagProtection(ctx, pt, repo))
+}
+
+// DeleteTagProtection
+func DeleteTagProtection(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/tag_protections/{id} repository repoDeleteTagProtection
+ // ---
+ // summary: Delete a specific tag protection for the repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: id
+ // in: path
+ // description: id of protected tag
+ // type: integer
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ repo := ctx.Repo.Repository
+ id := ctx.ParamsInt64(":id")
+ pt, err := git_model.GetProtectedTagByID(ctx, id)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetProtectedTagByID", err)
+ return
+ }
+
+ if pt == nil || pt.RepoID != repo.ID {
+ ctx.NotFound()
+ return
+ }
+
+ err = git_model.DeleteProtectedTag(ctx, pt)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteProtectedTag", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/repo/teams.go b/routers/api/v1/repo/teams.go
new file mode 100644
index 0000000..0ecf3a3
--- /dev/null
+++ b/routers/api/v1/repo/teams.go
@@ -0,0 +1,235 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ org_service "code.gitea.io/gitea/services/org"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// ListTeams list a repository's teams
+func ListTeams(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/teams repository repoListTeams
+ // ---
+ // summary: List a repository's teams
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TeamList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !ctx.Repo.Owner.IsOrganization() {
+ ctx.Error(http.StatusMethodNotAllowed, "noOrg", "repo is not owned by an organization")
+ return
+ }
+
+ teams, err := organization.GetRepoTeams(ctx, ctx.Repo.Repository)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiTeams, err := convert.ToTeams(ctx, teams, false)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, apiTeams)
+}
+
+// IsTeam check if a team is assigned to a repository
+func IsTeam(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/teams/{team} repository repoCheckTeam
+ // ---
+ // summary: Check if a team is assigned to a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: team
+ // in: path
+ // description: team name
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Team"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "405":
+ // "$ref": "#/responses/error"
+
+ if !ctx.Repo.Owner.IsOrganization() {
+ ctx.Error(http.StatusMethodNotAllowed, "noOrg", "repo is not owned by an organization")
+ return
+ }
+
+ team := getTeamByParam(ctx)
+ if team == nil {
+ return
+ }
+
+ if repo_service.HasRepository(ctx, team, ctx.Repo.Repository.ID) {
+ apiTeam, err := convert.ToTeam(ctx, team)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiTeam)
+ return
+ }
+
+ ctx.NotFound()
+}
+
+// AddTeam add a team to a repository
+func AddTeam(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/teams/{team} repository repoAddTeam
+ // ---
+ // summary: Add a team to a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: team
+ // in: path
+ // description: team name
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "405":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ changeRepoTeam(ctx, true)
+}
+
+// DeleteTeam delete a team from a repository
+func DeleteTeam(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/teams/{team} repository repoDeleteTeam
+ // ---
+ // summary: Delete a team from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: team
+ // in: path
+ // description: team name
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "422":
+ // "$ref": "#/responses/validationError"
+ // "405":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ changeRepoTeam(ctx, false)
+}
+
+func changeRepoTeam(ctx *context.APIContext, add bool) {
+ if !ctx.Repo.Owner.IsOrganization() {
+ ctx.Error(http.StatusMethodNotAllowed, "noOrg", "repo is not owned by an organization")
+ }
+ if !ctx.Repo.Owner.RepoAdminChangeTeamAccess && !ctx.Repo.IsOwner() {
+ ctx.Error(http.StatusForbidden, "noAdmin", "user is nor repo admin nor owner")
+ return
+ }
+
+ team := getTeamByParam(ctx)
+ if team == nil {
+ return
+ }
+
+ repoHasTeam := repo_service.HasRepository(ctx, team, ctx.Repo.Repository.ID)
+ var err error
+ if add {
+ if repoHasTeam {
+ ctx.Error(http.StatusUnprocessableEntity, "alreadyAdded", fmt.Errorf("team '%s' is already added to repo", team.Name))
+ return
+ }
+ err = org_service.TeamAddRepository(ctx, team, ctx.Repo.Repository)
+ } else {
+ if !repoHasTeam {
+ ctx.Error(http.StatusUnprocessableEntity, "notAdded", fmt.Errorf("team '%s' was not added to repo", team.Name))
+ return
+ }
+ err = repo_service.RemoveRepositoryFromTeam(ctx, team, ctx.Repo.Repository.ID)
+ }
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+func getTeamByParam(ctx *context.APIContext) *organization.Team {
+ team, err := organization.GetTeam(ctx, ctx.Repo.Owner.ID, ctx.Params(":team"))
+ if err != nil {
+ if organization.IsErrTeamNotExist(err) {
+ ctx.Error(http.StatusNotFound, "TeamNotExit", err)
+ return nil
+ }
+ ctx.InternalServerError(err)
+ return nil
+ }
+ return team
+}
diff --git a/routers/api/v1/repo/topic.go b/routers/api/v1/repo/topic.go
new file mode 100644
index 0000000..1d8e675
--- /dev/null
+++ b/routers/api/v1/repo/topic.go
@@ -0,0 +1,305 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+ "strings"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListTopics returns list of current topics for repo
+func ListTopics(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/topics repository repoListTopics
+ // ---
+ // summary: Get list of topics that a repository has
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TopicNames"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opts := &repo_model.FindTopicOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ RepoID: ctx.Repo.Repository.ID,
+ }
+
+ topics, total, err := repo_model.FindTopics(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ topicNames := make([]string, len(topics))
+ for i, topic := range topics {
+ topicNames[i] = topic.Name
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, map[string]any{
+ "topics": topicNames,
+ })
+}
+
+// UpdateTopics updates repo with a new set of topics
+func UpdateTopics(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/topics repository repoUpdateTopics
+ // ---
+ // summary: Replace list of topics for a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/RepoTopicOptions"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/invalidTopicsError"
+
+ form := web.GetForm(ctx).(*api.RepoTopicOptions)
+ topicNames := form.Topics
+ validTopics, invalidTopics := repo_model.SanitizeAndValidateTopics(topicNames)
+
+ if len(validTopics) > 25 {
+ ctx.JSON(http.StatusUnprocessableEntity, map[string]any{
+ "invalidTopics": nil,
+ "message": "Exceeding maximum number of topics per repo",
+ })
+ return
+ }
+
+ if len(invalidTopics) > 0 {
+ ctx.JSON(http.StatusUnprocessableEntity, map[string]any{
+ "invalidTopics": invalidTopics,
+ "message": "Topic names are invalid",
+ })
+ return
+ }
+
+ err := repo_model.SaveTopics(ctx, ctx.Repo.Repository.ID, validTopics...)
+ if err != nil {
+ log.Error("SaveTopics failed: %v", err)
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// AddTopic adds a topic name to a repo
+func AddTopic(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/topics/{topic} repository repoAddTopic
+ // ---
+ // summary: Add a topic to a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: topic
+ // in: path
+ // description: name of the topic to add
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/invalidTopicsError"
+
+ topicName := strings.TrimSpace(strings.ToLower(ctx.Params(":topic")))
+
+ if !repo_model.ValidateTopic(topicName) {
+ ctx.JSON(http.StatusUnprocessableEntity, map[string]any{
+ "invalidTopics": topicName,
+ "message": "Topic name is invalid",
+ })
+ return
+ }
+
+ // Prevent adding more topics than allowed to repo
+ count, err := repo_model.CountTopics(ctx, &repo_model.FindTopicOptions{
+ RepoID: ctx.Repo.Repository.ID,
+ })
+ if err != nil {
+ log.Error("CountTopics failed: %v", err)
+ ctx.InternalServerError(err)
+ return
+ }
+ if count >= 25 {
+ ctx.JSON(http.StatusUnprocessableEntity, map[string]any{
+ "message": "Exceeding maximum allowed topics per repo.",
+ })
+ return
+ }
+
+ _, err = repo_model.AddTopic(ctx, ctx.Repo.Repository.ID, topicName)
+ if err != nil {
+ log.Error("AddTopic failed: %v", err)
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteTopic removes topic name from repo
+func DeleteTopic(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/topics/{topic} repository repoDeleteTopic
+ // ---
+ // summary: Delete a topic from a repository
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: topic
+ // in: path
+ // description: name of the topic to delete
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/invalidTopicsError"
+
+ topicName := strings.TrimSpace(strings.ToLower(ctx.Params(":topic")))
+
+ if !repo_model.ValidateTopic(topicName) {
+ ctx.JSON(http.StatusUnprocessableEntity, map[string]any{
+ "invalidTopics": topicName,
+ "message": "Topic name is invalid",
+ })
+ return
+ }
+
+ topic, err := repo_model.DeleteTopic(ctx, ctx.Repo.Repository.ID, topicName)
+ if err != nil {
+ log.Error("DeleteTopic failed: %v", err)
+ ctx.InternalServerError(err)
+ return
+ }
+
+ if topic == nil {
+ ctx.NotFound()
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// TopicSearch search for creating topic
+func TopicSearch(ctx *context.APIContext) {
+ // swagger:operation GET /topics/search repository topicSearch
+ // ---
+ // summary: search topics via keyword
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: q
+ // in: query
+ // description: keywords to search
+ // required: true
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TopicListResponse"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opts := &repo_model.FindTopicOptions{
+ Keyword: ctx.FormString("q"),
+ ListOptions: utils.GetListOptions(ctx),
+ }
+
+ topics, total, err := repo_model.FindTopics(ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ topicResponses := make([]*api.TopicResponse, len(topics))
+ for i, topic := range topics {
+ topicResponses[i] = convert.ToTopicResponse(topic)
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, map[string]any{
+ "topics": topicResponses,
+ })
+}
diff --git a/routers/api/v1/repo/transfer.go b/routers/api/v1/repo/transfer.go
new file mode 100644
index 0000000..0715aed
--- /dev/null
+++ b/routers/api/v1/repo/transfer.go
@@ -0,0 +1,254 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ quota_model "code.gitea.io/gitea/models/quota"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ repo_service "code.gitea.io/gitea/services/repository"
+)
+
+// Transfer transfers the ownership of a repository
+func Transfer(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/transfer repository repoTransfer
+ // ---
+ // summary: Transfer a repo ownership
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to transfer
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to transfer
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // description: "Transfer Options"
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/TransferRepoOption"
+ // responses:
+ // "202":
+ // "$ref": "#/responses/Repository"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ opts := web.GetForm(ctx).(*api.TransferRepoOption)
+
+ newOwner, err := user_model.GetUserByName(ctx, opts.NewOwner)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ ctx.Error(http.StatusNotFound, "", "The new owner does not exist or cannot be found")
+ return
+ }
+ ctx.InternalServerError(err)
+ return
+ }
+
+ if newOwner.Type == user_model.UserTypeOrganization {
+ if !ctx.Doer.IsAdmin && newOwner.Visibility == api.VisibleTypePrivate && !organization.OrgFromUser(newOwner).HasMemberWithUserID(ctx, ctx.Doer.ID) {
+ // The user shouldn't know about this organization
+ ctx.Error(http.StatusNotFound, "", "The new owner does not exist or cannot be found")
+ return
+ }
+ }
+
+ if !ctx.CheckQuota(quota_model.LimitSubjectSizeReposAll, newOwner.ID, newOwner.Name) {
+ return
+ }
+
+ var teams []*organization.Team
+ if opts.TeamIDs != nil {
+ if !newOwner.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "repoTransfer", "Teams can only be added to organization-owned repositories")
+ return
+ }
+
+ org := convert.ToOrganization(ctx, organization.OrgFromUser(newOwner))
+ for _, tID := range *opts.TeamIDs {
+ team, err := organization.GetTeamByID(ctx, tID)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "team", fmt.Errorf("team %d not found", tID))
+ return
+ }
+
+ if team.OrgID != org.ID {
+ ctx.Error(http.StatusForbidden, "team", fmt.Errorf("team %d belongs not to org %d", tID, org.ID))
+ return
+ }
+
+ teams = append(teams, team)
+ }
+ }
+
+ if ctx.Repo.GitRepo != nil {
+ ctx.Repo.GitRepo.Close()
+ ctx.Repo.GitRepo = nil
+ }
+
+ oldFullname := ctx.Repo.Repository.FullName()
+
+ if err := repo_service.StartRepositoryTransfer(ctx, ctx.Doer, newOwner, ctx.Repo.Repository, teams); err != nil {
+ if errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, "StartRepositoryTransfer", err)
+ return
+ }
+
+ if models.IsErrRepoTransferInProgress(err) {
+ ctx.Error(http.StatusConflict, "StartRepositoryTransfer", err)
+ return
+ }
+
+ if repo_model.IsErrRepoAlreadyExist(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "StartRepositoryTransfer", err)
+ return
+ }
+
+ ctx.InternalServerError(err)
+ return
+ }
+
+ if ctx.Repo.Repository.Status == repo_model.RepositoryPendingTransfer {
+ log.Trace("Repository transfer initiated: %s -> %s", oldFullname, ctx.Repo.Repository.FullName())
+ ctx.JSON(http.StatusCreated, convert.ToRepo(ctx, ctx.Repo.Repository, access_model.Permission{AccessMode: perm.AccessModeAdmin}))
+ return
+ }
+
+ log.Trace("Repository transferred: %s -> %s", oldFullname, ctx.Repo.Repository.FullName())
+ ctx.JSON(http.StatusAccepted, convert.ToRepo(ctx, ctx.Repo.Repository, access_model.Permission{AccessMode: perm.AccessModeAdmin}))
+}
+
+// AcceptTransfer accept a repo transfer
+func AcceptTransfer(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/transfer/accept repository acceptRepoTransfer
+ // ---
+ // summary: Accept a repo transfer
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to transfer
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to transfer
+ // type: string
+ // required: true
+ // responses:
+ // "202":
+ // "$ref": "#/responses/Repository"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+
+ err := acceptOrRejectRepoTransfer(ctx, true)
+ if ctx.Written() {
+ return
+ }
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "acceptOrRejectRepoTransfer", err)
+ return
+ }
+
+ ctx.JSON(http.StatusAccepted, convert.ToRepo(ctx, ctx.Repo.Repository, ctx.Repo.Permission))
+}
+
+// RejectTransfer reject a repo transfer
+func RejectTransfer(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/transfer/reject repository rejectRepoTransfer
+ // ---
+ // summary: Reject a repo transfer
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to transfer
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to transfer
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Repository"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := acceptOrRejectRepoTransfer(ctx, false)
+ if ctx.Written() {
+ return
+ }
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "acceptOrRejectRepoTransfer", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToRepo(ctx, ctx.Repo.Repository, ctx.Repo.Permission))
+}
+
+func acceptOrRejectRepoTransfer(ctx *context.APIContext, accept bool) error {
+ repoTransfer, err := models.GetPendingRepositoryTransfer(ctx, ctx.Repo.Repository)
+ if err != nil {
+ if models.IsErrNoPendingTransfer(err) {
+ ctx.NotFound()
+ return nil
+ }
+ return err
+ }
+
+ if err := repoTransfer.LoadAttributes(ctx); err != nil {
+ return err
+ }
+
+ if !repoTransfer.CanUserAcceptTransfer(ctx, ctx.Doer) {
+ ctx.Error(http.StatusForbidden, "CanUserAcceptTransfer", nil)
+ return fmt.Errorf("user does not have permissions to do this")
+ }
+
+ if accept {
+ recipient := repoTransfer.Recipient
+ if !ctx.CheckQuota(quota_model.LimitSubjectSizeReposAll, recipient.ID, recipient.Name) {
+ return nil
+ }
+
+ return repo_service.TransferOwnership(ctx, repoTransfer.Doer, repoTransfer.Recipient, ctx.Repo.Repository, repoTransfer.Teams)
+ }
+
+ return repo_service.CancelRepositoryTransfer(ctx, ctx.Repo.Repository)
+}
diff --git a/routers/api/v1/repo/tree.go b/routers/api/v1/repo/tree.go
new file mode 100644
index 0000000..353a996
--- /dev/null
+++ b/routers/api/v1/repo/tree.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/services/context"
+ files_service "code.gitea.io/gitea/services/repository/files"
+)
+
+// GetTree get the tree of a repository.
+func GetTree(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/git/trees/{sha} repository GetTree
+ // ---
+ // summary: Gets the tree of a repository.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: sha
+ // in: path
+ // description: sha of the commit
+ // type: string
+ // required: true
+ // - name: recursive
+ // in: query
+ // description: show all directories and files
+ // required: false
+ // type: boolean
+ // - name: page
+ // in: query
+ // description: page number; the 'truncated' field in the response will be true if there are still more items after this page, false if the last page
+ // required: false
+ // type: integer
+ // - name: per_page
+ // in: query
+ // description: number of items per page
+ // required: false
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GitTreeResponse"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ sha := ctx.Params(":sha")
+ if len(sha) == 0 {
+ ctx.Error(http.StatusBadRequest, "", "sha not provided")
+ return
+ }
+ if tree, err := files_service.GetTreeBySHA(ctx, ctx.Repo.Repository, ctx.Repo.GitRepo, sha, ctx.FormInt("page"), ctx.FormInt("per_page"), ctx.FormBool("recursive")); err != nil {
+ ctx.Error(http.StatusBadRequest, "", err.Error())
+ } else {
+ ctx.SetTotalCountHeader(int64(tree.TotalCount))
+ ctx.JSON(http.StatusOK, tree)
+ }
+}
diff --git a/routers/api/v1/repo/wiki.go b/routers/api/v1/repo/wiki.go
new file mode 100644
index 0000000..12aaa8e
--- /dev/null
+++ b/routers/api/v1/repo/wiki.go
@@ -0,0 +1,536 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ notify_service "code.gitea.io/gitea/services/notify"
+ wiki_service "code.gitea.io/gitea/services/wiki"
+)
+
+// NewWikiPage response for wiki create request
+func NewWikiPage(ctx *context.APIContext) {
+ // swagger:operation POST /repos/{owner}/{repo}/wiki/new repository repoCreateWikiPage
+ // ---
+ // summary: Create a wiki page
+ // consumes:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateWikiPageOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/WikiPage"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ form := web.GetForm(ctx).(*api.CreateWikiPageOptions)
+
+ if util.IsEmptyString(form.Title) {
+ ctx.Error(http.StatusBadRequest, "emptyTitle", nil)
+ return
+ }
+
+ wikiName := wiki_service.UserTitleToWebPath("", form.Title)
+
+ if len(form.Message) == 0 {
+ form.Message = fmt.Sprintf("Add %q", form.Title)
+ }
+
+ content, err := base64.StdEncoding.DecodeString(form.ContentBase64)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "invalid base64 encoding of content", err)
+ return
+ }
+ form.ContentBase64 = string(content)
+
+ if err := wiki_service.AddWikiPage(ctx, ctx.Doer, ctx.Repo.Repository, wikiName, form.ContentBase64, form.Message); err != nil {
+ if repo_model.IsErrWikiReservedName(err) {
+ ctx.Error(http.StatusBadRequest, "IsErrWikiReservedName", err)
+ } else if repo_model.IsErrWikiAlreadyExist(err) {
+ ctx.Error(http.StatusBadRequest, "IsErrWikiAlreadyExists", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "AddWikiPage", err)
+ }
+ return
+ }
+
+ wikiPage := getWikiPage(ctx, wikiName)
+
+ if !ctx.Written() {
+ notify_service.NewWikiPage(ctx, ctx.Doer, ctx.Repo.Repository, string(wikiName), form.Message)
+ ctx.JSON(http.StatusCreated, wikiPage)
+ }
+}
+
+// EditWikiPage response for wiki modify request
+func EditWikiPage(ctx *context.APIContext) {
+ // swagger:operation PATCH /repos/{owner}/{repo}/wiki/page/{pageName} repository repoEditWikiPage
+ // ---
+ // summary: Edit a wiki page
+ // consumes:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: pageName
+ // in: path
+ // description: name of the page
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateWikiPageOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/WikiPage"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "413":
+ // "$ref": "#/responses/quotaExceeded"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ form := web.GetForm(ctx).(*api.CreateWikiPageOptions)
+
+ oldWikiName := wiki_service.WebPathFromRequest(ctx.PathParamRaw(":pageName"))
+ newWikiName := wiki_service.UserTitleToWebPath("", form.Title)
+
+ if len(newWikiName) == 0 {
+ newWikiName = oldWikiName
+ }
+
+ if len(form.Message) == 0 {
+ form.Message = fmt.Sprintf("Update %q", newWikiName)
+ }
+
+ content, err := base64.StdEncoding.DecodeString(form.ContentBase64)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "invalid base64 encoding of content", err)
+ return
+ }
+ form.ContentBase64 = string(content)
+
+ if err := wiki_service.EditWikiPage(ctx, ctx.Doer, ctx.Repo.Repository, oldWikiName, newWikiName, form.ContentBase64, form.Message); err != nil {
+ ctx.Error(http.StatusInternalServerError, "EditWikiPage", err)
+ return
+ }
+
+ wikiPage := getWikiPage(ctx, newWikiName)
+
+ if !ctx.Written() {
+ notify_service.EditWikiPage(ctx, ctx.Doer, ctx.Repo.Repository, string(newWikiName), form.Message)
+ ctx.JSON(http.StatusOK, wikiPage)
+ }
+}
+
+func getWikiPage(ctx *context.APIContext, wikiName wiki_service.WebPath) *api.WikiPage {
+ wikiRepo, commit := findWikiRepoCommit(ctx)
+ if wikiRepo != nil {
+ defer wikiRepo.Close()
+ }
+ if ctx.Written() {
+ return nil
+ }
+
+ // lookup filename in wiki - get filecontent, real filename
+ content, pageFilename := wikiContentsByName(ctx, commit, wikiName, false)
+ if ctx.Written() {
+ return nil
+ }
+
+ sidebarContent, _ := wikiContentsByName(ctx, commit, "_Sidebar", true)
+ if ctx.Written() {
+ return nil
+ }
+
+ footerContent, _ := wikiContentsByName(ctx, commit, "_Footer", true)
+ if ctx.Written() {
+ return nil
+ }
+
+ // get commit count - wiki revisions
+ commitsCount, _ := wikiRepo.FileCommitsCount(ctx.Repo.Repository.GetWikiBranchName(), pageFilename)
+
+ // Get last change information.
+ lastCommit, err := wikiRepo.GetCommitByPath(pageFilename)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommitByPath", err)
+ return nil
+ }
+
+ return &api.WikiPage{
+ WikiPageMetaData: wiki_service.ToWikiPageMetaData(wikiName, lastCommit, ctx.Repo.Repository),
+ ContentBase64: content,
+ CommitCount: commitsCount,
+ Sidebar: sidebarContent,
+ Footer: footerContent,
+ }
+}
+
+// DeleteWikiPage delete wiki page
+func DeleteWikiPage(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/wiki/page/{pageName} repository repoDeleteWikiPage
+ // ---
+ // summary: Delete a wiki page
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: pageName
+ // in: path
+ // description: name of the page
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "423":
+ // "$ref": "#/responses/repoArchivedError"
+
+ wikiName := wiki_service.WebPathFromRequest(ctx.PathParamRaw(":pageName"))
+
+ if err := wiki_service.DeleteWikiPage(ctx, ctx.Doer, ctx.Repo.Repository, wikiName); err != nil {
+ if err.Error() == "file does not exist" {
+ ctx.NotFound(err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "DeleteWikiPage", err)
+ return
+ }
+
+ notify_service.DeleteWikiPage(ctx, ctx.Doer, ctx.Repo.Repository, string(wikiName))
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// ListWikiPages get wiki pages list
+func ListWikiPages(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/wiki/pages repository repoGetWikiPages
+ // ---
+ // summary: Get all wiki pages
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/WikiPageList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ wikiRepo, commit := findWikiRepoCommit(ctx)
+ if wikiRepo != nil {
+ defer wikiRepo.Close()
+ }
+ if ctx.Written() {
+ return
+ }
+
+ page := ctx.FormInt("page")
+ if page <= 1 {
+ page = 1
+ }
+ limit := ctx.FormInt("limit")
+ if limit <= 1 {
+ limit = setting.API.DefaultPagingNum
+ }
+
+ skip := (page - 1) * limit
+ max := page * limit
+
+ entries, err := commit.ListEntries()
+ if err != nil {
+ ctx.ServerError("ListEntries", err)
+ return
+ }
+ pages := make([]*api.WikiPageMetaData, 0, len(entries))
+ for i, entry := range entries {
+ if i < skip || i >= max || !entry.IsRegular() {
+ continue
+ }
+ c, err := wikiRepo.GetCommitByPath(entry.Name())
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetCommit", err)
+ return
+ }
+ wikiName, err := wiki_service.GitPathToWebPath(entry.Name())
+ if err != nil {
+ if repo_model.IsErrWikiInvalidFileName(err) {
+ continue
+ }
+ ctx.Error(http.StatusInternalServerError, "WikiFilenameToName", err)
+ return
+ }
+ pages = append(pages, wiki_service.ToWikiPageMetaData(wikiName, c, ctx.Repo.Repository))
+ }
+
+ ctx.SetTotalCountHeader(int64(len(entries)))
+ ctx.JSON(http.StatusOK, pages)
+}
+
+// GetWikiPage get single wiki page
+func GetWikiPage(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/wiki/page/{pageName} repository repoGetWikiPage
+ // ---
+ // summary: Get a wiki page
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: pageName
+ // in: path
+ // description: name of the page
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/WikiPage"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ // get requested pagename
+ pageName := wiki_service.WebPathFromRequest(ctx.PathParamRaw(":pageName"))
+
+ wikiPage := getWikiPage(ctx, pageName)
+ if !ctx.Written() {
+ ctx.JSON(http.StatusOK, wikiPage)
+ }
+}
+
+// ListPageRevisions renders file revision list of wiki page
+func ListPageRevisions(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/wiki/revisions/{pageName} repository repoGetWikiPageRevisions
+ // ---
+ // summary: Get revisions of a wiki page
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // - name: pageName
+ // in: path
+ // description: name of the page
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/WikiCommitList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ wikiRepo, commit := findWikiRepoCommit(ctx)
+ if wikiRepo != nil {
+ defer wikiRepo.Close()
+ }
+ if ctx.Written() {
+ return
+ }
+
+ // get requested pagename
+ pageName := wiki_service.WebPathFromRequest(ctx.PathParamRaw(":pageName"))
+ if len(pageName) == 0 {
+ pageName = "Home"
+ }
+
+ // lookup filename in wiki - get filecontent, gitTree entry , real filename
+ _, pageFilename := wikiContentsByName(ctx, commit, pageName, false)
+ if ctx.Written() {
+ return
+ }
+
+ // get commit count - wiki revisions
+ commitsCount, _ := wikiRepo.FileCommitsCount(ctx.Repo.Repository.GetWikiBranchName(), pageFilename)
+
+ page := ctx.FormInt("page")
+ if page <= 1 {
+ page = 1
+ }
+
+ // get Commit Count
+ commitsHistory, err := wikiRepo.CommitsByFileAndRange(
+ git.CommitsByFileAndRangeOptions{
+ Revision: ctx.Repo.Repository.GetWikiBranchName(),
+ File: pageFilename,
+ Page: page,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "CommitsByFileAndRange", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(commitsCount)
+ ctx.JSON(http.StatusOK, convert.ToWikiCommitList(commitsHistory, commitsCount))
+}
+
+// findEntryForFile finds the tree entry for a target filepath.
+func findEntryForFile(commit *git.Commit, target string) (*git.TreeEntry, error) {
+ entry, err := commit.GetTreeEntryByPath(target)
+ if err != nil {
+ return nil, err
+ }
+ if entry != nil {
+ return entry, nil
+ }
+
+ // Then the unescaped, shortest alternative
+ var unescapedTarget string
+ if unescapedTarget, err = url.QueryUnescape(target); err != nil {
+ return nil, err
+ }
+ return commit.GetTreeEntryByPath(unescapedTarget)
+}
+
+// findWikiRepoCommit opens the wiki repo and returns the latest commit, writing to context on error.
+// The caller is responsible for closing the returned repo again
+func findWikiRepoCommit(ctx *context.APIContext) (*git.Repository, *git.Commit) {
+ wikiRepo, err := gitrepo.OpenWikiRepository(ctx, ctx.Repo.Repository)
+ if err != nil {
+ if git.IsErrNotExist(err) || err.Error() == "no such file or directory" {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
+ }
+ return nil, nil
+ }
+
+ commit, err := wikiRepo.GetBranchCommit(ctx.Repo.Repository.GetWikiBranchName())
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ ctx.NotFound(err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetBranchCommit", err)
+ }
+ return wikiRepo, nil
+ }
+ return wikiRepo, commit
+}
+
+// wikiContentsByEntry returns the contents of the wiki page referenced by the
+// given tree entry, encoded with base64. Writes to ctx if an error occurs.
+func wikiContentsByEntry(ctx *context.APIContext, entry *git.TreeEntry) string {
+ blob := entry.Blob()
+ if blob.Size() > setting.API.DefaultMaxBlobSize {
+ return ""
+ }
+ content, err := blob.GetBlobContentBase64()
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetBlobContentBase64", err)
+ return ""
+ }
+ return content
+}
+
+// wikiContentsByName returns the contents of a wiki page, along with a boolean
+// indicating whether the page exists. Writes to ctx if an error occurs.
+func wikiContentsByName(ctx *context.APIContext, commit *git.Commit, wikiName wiki_service.WebPath, isSidebarOrFooter bool) (string, string) {
+ gitFilename := wiki_service.WebPathToGitPath(wikiName)
+ entry, err := findEntryForFile(commit, gitFilename)
+ if err != nil {
+ if git.IsErrNotExist(err) {
+ if !isSidebarOrFooter {
+ ctx.NotFound()
+ }
+ } else {
+ ctx.ServerError("findEntryForFile", err)
+ }
+ return "", ""
+ }
+ return wikiContentsByEntry(ctx, entry), gitFilename
+}
diff --git a/routers/api/v1/settings/settings.go b/routers/api/v1/settings/settings.go
new file mode 100644
index 0000000..c422315
--- /dev/null
+++ b/routers/api/v1/settings/settings.go
@@ -0,0 +1,86 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package settings
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+)
+
+// GetGeneralUISettings returns instance's global settings for ui
+func GetGeneralUISettings(ctx *context.APIContext) {
+ // swagger:operation GET /settings/ui settings getGeneralUISettings
+ // ---
+ // summary: Get instance's global settings for ui
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GeneralUISettings"
+ ctx.JSON(http.StatusOK, api.GeneralUISettings{
+ DefaultTheme: setting.UI.DefaultTheme,
+ AllowedReactions: setting.UI.Reactions,
+ CustomEmojis: setting.UI.CustomEmojis,
+ })
+}
+
+// GetGeneralAPISettings returns instance's global settings for api
+func GetGeneralAPISettings(ctx *context.APIContext) {
+ // swagger:operation GET /settings/api settings getGeneralAPISettings
+ // ---
+ // summary: Get instance's global settings for api
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GeneralAPISettings"
+ ctx.JSON(http.StatusOK, api.GeneralAPISettings{
+ MaxResponseItems: setting.API.MaxResponseItems,
+ DefaultPagingNum: setting.API.DefaultPagingNum,
+ DefaultGitTreesPerPage: setting.API.DefaultGitTreesPerPage,
+ DefaultMaxBlobSize: setting.API.DefaultMaxBlobSize,
+ })
+}
+
+// GetGeneralRepoSettings returns instance's global settings for repositories
+func GetGeneralRepoSettings(ctx *context.APIContext) {
+ // swagger:operation GET /settings/repository settings getGeneralRepositorySettings
+ // ---
+ // summary: Get instance's global settings for repositories
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GeneralRepoSettings"
+ ctx.JSON(http.StatusOK, api.GeneralRepoSettings{
+ MirrorsDisabled: !setting.Mirror.Enabled,
+ HTTPGitDisabled: setting.Repository.DisableHTTPGit,
+ MigrationsDisabled: setting.Repository.DisableMigrations,
+ StarsDisabled: setting.Repository.DisableStars,
+ ForksDisabled: setting.Repository.DisableForks,
+ TimeTrackingDisabled: !setting.Service.EnableTimetracking,
+ LFSDisabled: !setting.LFS.StartServer,
+ })
+}
+
+// GetGeneralAttachmentSettings returns instance's global settings for Attachment
+func GetGeneralAttachmentSettings(ctx *context.APIContext) {
+ // swagger:operation GET /settings/attachment settings getGeneralAttachmentSettings
+ // ---
+ // summary: Get instance's global settings for Attachment
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GeneralAttachmentSettings"
+ ctx.JSON(http.StatusOK, api.GeneralAttachmentSettings{
+ Enabled: setting.Attachment.Enabled,
+ AllowedTypes: setting.Attachment.AllowedTypes,
+ MaxFiles: setting.Attachment.MaxFiles,
+ MaxSize: setting.Attachment.MaxSize,
+ })
+}
diff --git a/routers/api/v1/shared/quota.go b/routers/api/v1/shared/quota.go
new file mode 100644
index 0000000..b892df4
--- /dev/null
+++ b/routers/api/v1/shared/quota.go
@@ -0,0 +1,102 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package shared
+
+import (
+ "net/http"
+
+ quota_model "code.gitea.io/gitea/models/quota"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+func GetQuota(ctx *context.APIContext, userID int64) {
+ used, err := quota_model.GetUsedForUser(ctx, userID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.GetUsedForUser", err)
+ return
+ }
+
+ groups, err := quota_model.GetGroupsForUser(ctx, userID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.GetGroupsForUser", err)
+ return
+ }
+
+ result := convert.ToQuotaInfo(used, groups, false)
+ ctx.JSON(http.StatusOK, &result)
+}
+
+func CheckQuota(ctx *context.APIContext, userID int64) {
+ subjectQuery := ctx.FormTrim("subject")
+
+ subject, err := quota_model.ParseLimitSubject(subjectQuery)
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "quota_model.ParseLimitSubject", err)
+ return
+ }
+
+ ok, err := quota_model.EvaluateForUser(ctx, userID, subject)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "quota_model.EvaluateForUser", err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, &ok)
+}
+
+func ListQuotaAttachments(ctx *context.APIContext, userID int64) {
+ opts := utils.GetListOptions(ctx)
+ count, attachments, err := quota_model.GetQuotaAttachmentsForUser(ctx, userID, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetQuotaAttachmentsForUser", err)
+ return
+ }
+
+ result, err := convert.ToQuotaUsedAttachmentList(ctx, *attachments)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToQuotaUsedAttachmentList", err)
+ }
+
+ ctx.SetLinkHeader(int(count), opts.PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, result)
+}
+
+func ListQuotaPackages(ctx *context.APIContext, userID int64) {
+ opts := utils.GetListOptions(ctx)
+ count, packages, err := quota_model.GetQuotaPackagesForUser(ctx, userID, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetQuotaPackagesForUser", err)
+ return
+ }
+
+ result, err := convert.ToQuotaUsedPackageList(ctx, *packages)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToQuotaUsedPackageList", err)
+ }
+
+ ctx.SetLinkHeader(int(count), opts.PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, result)
+}
+
+func ListQuotaArtifacts(ctx *context.APIContext, userID int64) {
+ opts := utils.GetListOptions(ctx)
+ count, artifacts, err := quota_model.GetQuotaArtifactsForUser(ctx, userID, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetQuotaArtifactsForUser", err)
+ return
+ }
+
+ result, err := convert.ToQuotaUsedArtifactList(ctx, *artifacts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToQuotaUsedArtifactList", err)
+ }
+
+ ctx.SetLinkHeader(int(count), opts.PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, result)
+}
diff --git a/routers/api/v1/shared/runners.go b/routers/api/v1/shared/runners.go
new file mode 100644
index 0000000..f184786
--- /dev/null
+++ b/routers/api/v1/shared/runners.go
@@ -0,0 +1,32 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package shared
+
+import (
+ "errors"
+ "net/http"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/services/context"
+)
+
+// RegistrationToken is a string used to register a runner with a server
+// swagger:response RegistrationToken
+type RegistrationToken struct {
+ Token string `json:"token"`
+}
+
+func GetRegistrationToken(ctx *context.APIContext, ownerID, repoID int64) {
+ token, err := actions_model.GetLatestRunnerToken(ctx, ownerID, repoID)
+ if errors.Is(err, util.ErrNotExist) || (token != nil && !token.IsActive) {
+ token, err = actions_model.NewRunnerToken(ctx, ownerID, repoID)
+ }
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, RegistrationToken{Token: token.Token})
+}
diff --git a/routers/api/v1/swagger/action.go b/routers/api/v1/swagger/action.go
new file mode 100644
index 0000000..665f4d0
--- /dev/null
+++ b/routers/api/v1/swagger/action.go
@@ -0,0 +1,34 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import api "code.gitea.io/gitea/modules/structs"
+
+// SecretList
+// swagger:response SecretList
+type swaggerResponseSecretList struct {
+ // in:body
+ Body []api.Secret `json:"body"`
+}
+
+// Secret
+// swagger:response Secret
+type swaggerResponseSecret struct {
+ // in:body
+ Body api.Secret `json:"body"`
+}
+
+// ActionVariable
+// swagger:response ActionVariable
+type swaggerResponseActionVariable struct {
+ // in:body
+ Body api.ActionVariable `json:"body"`
+}
+
+// VariableList
+// swagger:response VariableList
+type swaggerResponseVariableList struct {
+ // in:body
+ Body []api.ActionVariable `json:"body"`
+}
diff --git a/routers/api/v1/swagger/activity.go b/routers/api/v1/swagger/activity.go
new file mode 100644
index 0000000..95e1ba9
--- /dev/null
+++ b/routers/api/v1/swagger/activity.go
@@ -0,0 +1,15 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// ActivityFeedsList
+// swagger:response ActivityFeedsList
+type swaggerActivityFeedsList struct {
+ // in:body
+ Body []api.Activity `json:"body"`
+}
diff --git a/routers/api/v1/swagger/activitypub.go b/routers/api/v1/swagger/activitypub.go
new file mode 100644
index 0000000..9134166
--- /dev/null
+++ b/routers/api/v1/swagger/activitypub.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// ActivityPub
+// swagger:response ActivityPub
+type swaggerResponseActivityPub struct {
+ // in:body
+ Body api.ActivityPub `json:"body"`
+}
diff --git a/routers/api/v1/swagger/app.go b/routers/api/v1/swagger/app.go
new file mode 100644
index 0000000..6a08b11
--- /dev/null
+++ b/routers/api/v1/swagger/app.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// OAuth2Application
+// swagger:response OAuth2Application
+type swaggerResponseOAuth2Application struct {
+ // in:body
+ Body api.OAuth2Application `json:"body"`
+}
+
+// AccessToken represents an API access token.
+// swagger:response AccessToken
+type swaggerResponseAccessToken struct {
+ // in:body
+ Body api.AccessToken `json:"body"`
+}
diff --git a/routers/api/v1/swagger/cron.go b/routers/api/v1/swagger/cron.go
new file mode 100644
index 0000000..00cfbe0
--- /dev/null
+++ b/routers/api/v1/swagger/cron.go
@@ -0,0 +1,15 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// CronList
+// swagger:response CronList
+type swaggerResponseCronList struct {
+ // in:body
+ Body []api.Cron `json:"body"`
+}
diff --git a/routers/api/v1/swagger/issue.go b/routers/api/v1/swagger/issue.go
new file mode 100644
index 0000000..62458a3
--- /dev/null
+++ b/routers/api/v1/swagger/issue.go
@@ -0,0 +1,127 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// Issue
+// swagger:response Issue
+type swaggerResponseIssue struct {
+ // in:body
+ Body api.Issue `json:"body"`
+}
+
+// IssueList
+// swagger:response IssueList
+type swaggerResponseIssueList struct {
+ // in:body
+ Body []api.Issue `json:"body"`
+}
+
+// Comment
+// swagger:response Comment
+type swaggerResponseComment struct {
+ // in:body
+ Body api.Comment `json:"body"`
+}
+
+// CommentList
+// swagger:response CommentList
+type swaggerResponseCommentList struct {
+ // in:body
+ Body []api.Comment `json:"body"`
+}
+
+// TimelineList
+// swagger:response TimelineList
+type swaggerResponseTimelineList struct {
+ // in:body
+ Body []api.TimelineComment `json:"body"`
+}
+
+// Label
+// swagger:response Label
+type swaggerResponseLabel struct {
+ // in:body
+ Body api.Label `json:"body"`
+}
+
+// LabelList
+// swagger:response LabelList
+type swaggerResponseLabelList struct {
+ // in:body
+ Body []api.Label `json:"body"`
+}
+
+// Milestone
+// swagger:response Milestone
+type swaggerResponseMilestone struct {
+ // in:body
+ Body api.Milestone `json:"body"`
+}
+
+// MilestoneList
+// swagger:response MilestoneList
+type swaggerResponseMilestoneList struct {
+ // in:body
+ Body []api.Milestone `json:"body"`
+}
+
+// TrackedTime
+// swagger:response TrackedTime
+type swaggerResponseTrackedTime struct {
+ // in:body
+ Body api.TrackedTime `json:"body"`
+}
+
+// TrackedTimeList
+// swagger:response TrackedTimeList
+type swaggerResponseTrackedTimeList struct {
+ // in:body
+ Body []api.TrackedTime `json:"body"`
+}
+
+// IssueDeadline
+// swagger:response IssueDeadline
+type swaggerIssueDeadline struct {
+ // in:body
+ Body api.IssueDeadline `json:"body"`
+}
+
+// IssueTemplates
+// swagger:response IssueTemplates
+type swaggerIssueTemplates struct {
+ // in:body
+ Body []api.IssueTemplate `json:"body"`
+}
+
+// StopWatch
+// swagger:response StopWatch
+type swaggerResponseStopWatch struct {
+ // in:body
+ Body api.StopWatch `json:"body"`
+}
+
+// StopWatchList
+// swagger:response StopWatchList
+type swaggerResponseStopWatchList struct {
+ // in:body
+ Body []api.StopWatch `json:"body"`
+}
+
+// Reaction
+// swagger:response Reaction
+type swaggerReaction struct {
+ // in:body
+ Body api.Reaction `json:"body"`
+}
+
+// ReactionList
+// swagger:response ReactionList
+type swaggerReactionList struct {
+ // in:body
+ Body []api.Reaction `json:"body"`
+}
diff --git a/routers/api/v1/swagger/key.go b/routers/api/v1/swagger/key.go
new file mode 100644
index 0000000..8390833
--- /dev/null
+++ b/routers/api/v1/swagger/key.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// PublicKey
+// swagger:response PublicKey
+type swaggerResponsePublicKey struct {
+ // in:body
+ Body api.PublicKey `json:"body"`
+}
+
+// PublicKeyList
+// swagger:response PublicKeyList
+type swaggerResponsePublicKeyList struct {
+ // in:body
+ Body []api.PublicKey `json:"body"`
+}
+
+// GPGKey
+// swagger:response GPGKey
+type swaggerResponseGPGKey struct {
+ // in:body
+ Body api.GPGKey `json:"body"`
+}
+
+// GPGKeyList
+// swagger:response GPGKeyList
+type swaggerResponseGPGKeyList struct {
+ // in:body
+ Body []api.GPGKey `json:"body"`
+}
+
+// DeployKey
+// swagger:response DeployKey
+type swaggerResponseDeployKey struct {
+ // in:body
+ Body api.DeployKey `json:"body"`
+}
+
+// DeployKeyList
+// swagger:response DeployKeyList
+type swaggerResponseDeployKeyList struct {
+ // in:body
+ Body []api.DeployKey `json:"body"`
+}
diff --git a/routers/api/v1/swagger/misc.go b/routers/api/v1/swagger/misc.go
new file mode 100644
index 0000000..0553eac
--- /dev/null
+++ b/routers/api/v1/swagger/misc.go
@@ -0,0 +1,71 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// ServerVersion
+// swagger:response ServerVersion
+type swaggerResponseServerVersion struct {
+ // in:body
+ Body api.ServerVersion `json:"body"`
+}
+
+// GitignoreTemplateList
+// swagger:response GitignoreTemplateList
+type swaggerResponseGitignoreTemplateList struct {
+ // in:body
+ Body []string `json:"body"`
+}
+
+// GitignoreTemplateInfo
+// swagger:response GitignoreTemplateInfo
+type swaggerResponseGitignoreTemplateInfo struct {
+ // in:body
+ Body api.GitignoreTemplateInfo `json:"body"`
+}
+
+// LicenseTemplateList
+// swagger:response LicenseTemplateList
+type swaggerResponseLicensesTemplateList struct {
+ // in:body
+ Body []api.LicensesTemplateListEntry `json:"body"`
+}
+
+// LicenseTemplateInfo
+// swagger:response LicenseTemplateInfo
+type swaggerResponseLicenseTemplateInfo struct {
+ // in:body
+ Body api.LicenseTemplateInfo `json:"body"`
+}
+
+// StringSlice
+// swagger:response StringSlice
+type swaggerResponseStringSlice struct {
+ // in:body
+ Body []string `json:"body"`
+}
+
+// LabelTemplateList
+// swagger:response LabelTemplateList
+type swaggerResponseLabelTemplateList struct {
+ // in:body
+ Body []string `json:"body"`
+}
+
+// LabelTemplateInfo
+// swagger:response LabelTemplateInfo
+type swaggerResponseLabelTemplateInfo struct {
+ // in:body
+ Body []api.LabelTemplate `json:"body"`
+}
+
+// Boolean
+// swagger:response boolean
+type swaggerResponseBoolean struct {
+ // in:body
+ Body bool `json:"body"`
+}
diff --git a/routers/api/v1/swagger/nodeinfo.go b/routers/api/v1/swagger/nodeinfo.go
new file mode 100644
index 0000000..8650dfa
--- /dev/null
+++ b/routers/api/v1/swagger/nodeinfo.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// NodeInfo
+// swagger:response NodeInfo
+type swaggerResponseNodeInfo struct {
+ // in:body
+ Body api.NodeInfo `json:"body"`
+}
diff --git a/routers/api/v1/swagger/notify.go b/routers/api/v1/swagger/notify.go
new file mode 100644
index 0000000..743d807
--- /dev/null
+++ b/routers/api/v1/swagger/notify.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// NotificationThread
+// swagger:response NotificationThread
+type swaggerNotificationThread struct {
+ // in:body
+ Body api.NotificationThread `json:"body"`
+}
+
+// NotificationThreadList
+// swagger:response NotificationThreadList
+type swaggerNotificationThreadList struct {
+ // in:body
+ Body []api.NotificationThread `json:"body"`
+}
+
+// Number of unread notifications
+// swagger:response NotificationCount
+type swaggerNotificationCount struct {
+ // in:body
+ Body api.NotificationCount `json:"body"`
+}
diff --git a/routers/api/v1/swagger/options.go b/routers/api/v1/swagger/options.go
new file mode 100644
index 0000000..3034b09
--- /dev/null
+++ b/routers/api/v1/swagger/options.go
@@ -0,0 +1,234 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ ffed "code.gitea.io/gitea/modules/forgefed"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/forms"
+)
+
+// not actually a response, just a hack to get go-swagger to include definitions
+// of the various XYZOption structs
+
+// parameterBodies
+// swagger:response parameterBodies
+type swaggerParameterBodies struct {
+ // in:body
+ ForgeLike ffed.ForgeLike
+
+ // in:body
+ AddCollaboratorOption api.AddCollaboratorOption
+
+ // in:body
+ ReplaceFlagsOption api.ReplaceFlagsOption
+
+ // in:body
+ CreateEmailOption api.CreateEmailOption
+ // in:body
+ DeleteEmailOption api.DeleteEmailOption
+
+ // in:body
+ CreateHookOption api.CreateHookOption
+ // in:body
+ EditHookOption api.EditHookOption
+
+ // in:body
+ EditGitHookOption api.EditGitHookOption
+
+ // in:body
+ CreateIssueOption api.CreateIssueOption
+ // in:body
+ EditIssueOption api.EditIssueOption
+ // in:body
+ EditDeadlineOption api.EditDeadlineOption
+
+ // in:body
+ CreateIssueCommentOption api.CreateIssueCommentOption
+ // in:body
+ EditIssueCommentOption api.EditIssueCommentOption
+ // in:body
+ IssueMeta api.IssueMeta
+
+ // in:body
+ IssueLabelsOption api.IssueLabelsOption
+
+ // in:body
+ DeleteLabelsOption api.DeleteLabelsOption
+
+ // in:body
+ CreateKeyOption api.CreateKeyOption
+
+ // in:body
+ RenameUserOption api.RenameUserOption
+
+ // in:body
+ CreateLabelOption api.CreateLabelOption
+ // in:body
+ EditLabelOption api.EditLabelOption
+
+ // in:body
+ MarkupOption api.MarkupOption
+ // in:body
+ MarkdownOption api.MarkdownOption
+
+ // in:body
+ CreateMilestoneOption api.CreateMilestoneOption
+ // in:body
+ EditMilestoneOption api.EditMilestoneOption
+
+ // in:body
+ CreateOrgOption api.CreateOrgOption
+ // in:body
+ EditOrgOption api.EditOrgOption
+
+ // in:body
+ CreatePullRequestOption api.CreatePullRequestOption
+ // in:body
+ EditPullRequestOption api.EditPullRequestOption
+ // in:body
+ MergePullRequestOption forms.MergePullRequestForm
+
+ // in:body
+ CreateReleaseOption api.CreateReleaseOption
+ // in:body
+ EditReleaseOption api.EditReleaseOption
+
+ // in:body
+ CreateRepoOption api.CreateRepoOption
+ // in:body
+ EditRepoOption api.EditRepoOption
+ // in:body
+ TransferRepoOption api.TransferRepoOption
+ // in:body
+ CreateForkOption api.CreateForkOption
+ // in:body
+ GenerateRepoOption api.GenerateRepoOption
+
+ // in:body
+ CreateStatusOption api.CreateStatusOption
+
+ // in:body
+ CreateTeamOption api.CreateTeamOption
+ // in:body
+ EditTeamOption api.EditTeamOption
+
+ // in:body
+ AddTimeOption api.AddTimeOption
+
+ // in:body
+ CreateUserOption api.CreateUserOption
+
+ // in:body
+ EditUserOption api.EditUserOption
+
+ // in:body
+ EditAttachmentOptions api.EditAttachmentOptions
+
+ // in:body
+ ChangeFilesOptions api.ChangeFilesOptions
+
+ // in:body
+ CreateFileOptions api.CreateFileOptions
+
+ // in:body
+ UpdateFileOptions api.UpdateFileOptions
+
+ // in:body
+ DeleteFileOptions api.DeleteFileOptions
+
+ // in:body
+ CommitDateOptions api.CommitDateOptions
+
+ // in:body
+ RepoTopicOptions api.RepoTopicOptions
+
+ // in:body
+ EditReactionOption api.EditReactionOption
+
+ // in:body
+ CreateBranchRepoOption api.CreateBranchRepoOption
+
+ // in:body
+ CreateBranchProtectionOption api.CreateBranchProtectionOption
+
+ // in:body
+ EditBranchProtectionOption api.EditBranchProtectionOption
+
+ // in:body
+ CreateOAuth2ApplicationOptions api.CreateOAuth2ApplicationOptions
+
+ // in:body
+ CreatePullReviewOptions api.CreatePullReviewOptions
+
+ // in:body
+ CreatePullReviewComment api.CreatePullReviewComment
+
+ // in:body
+ CreatePullReviewCommentOptions api.CreatePullReviewCommentOptions
+
+ // in:body
+ SubmitPullReviewOptions api.SubmitPullReviewOptions
+
+ // in:body
+ DismissPullReviewOptions api.DismissPullReviewOptions
+
+ // in:body
+ MigrateRepoOptions api.MigrateRepoOptions
+
+ // in:body
+ PullReviewRequestOptions api.PullReviewRequestOptions
+
+ // in:body
+ CreateTagOption api.CreateTagOption
+
+ // in:body
+ CreateTagProtectionOption api.CreateTagProtectionOption
+
+ // in:body
+ EditTagProtectionOption api.EditTagProtectionOption
+
+ // in:body
+ CreateAccessTokenOption api.CreateAccessTokenOption
+
+ // in:body
+ UserSettingsOptions api.UserSettingsOptions
+
+ // in:body
+ CreateWikiPageOptions api.CreateWikiPageOptions
+
+ // in:body
+ CreatePushMirrorOption api.CreatePushMirrorOption
+
+ // in:body
+ UpdateUserAvatarOptions api.UpdateUserAvatarOption
+
+ // in:body
+ UpdateRepoAvatarOptions api.UpdateRepoAvatarOption
+
+ // in:body
+ CreateOrUpdateSecretOption api.CreateOrUpdateSecretOption
+
+ // in:body
+ CreateVariableOption api.CreateVariableOption
+
+ // in:body
+ UpdateVariableOption api.UpdateVariableOption
+
+ // in:body
+ DispatchWorkflowOption api.DispatchWorkflowOption
+
+ // in:body
+ CreateQuotaGroupOptions api.CreateQuotaGroupOptions
+
+ // in:body
+ CreateQuotaRuleOptions api.CreateQuotaRuleOptions
+
+ // in:body
+ EditQuotaRuleOptions api.EditQuotaRuleOptions
+
+ // in:body
+ SetUserQuotaGroupsOptions api.SetUserQuotaGroupsOptions
+}
diff --git a/routers/api/v1/swagger/org.go b/routers/api/v1/swagger/org.go
new file mode 100644
index 0000000..0105446
--- /dev/null
+++ b/routers/api/v1/swagger/org.go
@@ -0,0 +1,43 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// Organization
+// swagger:response Organization
+type swaggerResponseOrganization struct {
+ // in:body
+ Body api.Organization `json:"body"`
+}
+
+// OrganizationList
+// swagger:response OrganizationList
+type swaggerResponseOrganizationList struct {
+ // in:body
+ Body []api.Organization `json:"body"`
+}
+
+// Team
+// swagger:response Team
+type swaggerResponseTeam struct {
+ // in:body
+ Body api.Team `json:"body"`
+}
+
+// TeamList
+// swagger:response TeamList
+type swaggerResponseTeamList struct {
+ // in:body
+ Body []api.Team `json:"body"`
+}
+
+// OrganizationPermissions
+// swagger:response OrganizationPermissions
+type swaggerResponseOrganizationPermissions struct {
+ // in:body
+ Body api.OrganizationPermissions `json:"body"`
+}
diff --git a/routers/api/v1/swagger/package.go b/routers/api/v1/swagger/package.go
new file mode 100644
index 0000000..eada12d
--- /dev/null
+++ b/routers/api/v1/swagger/package.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// Package
+// swagger:response Package
+type swaggerResponsePackage struct {
+ // in:body
+ Body api.Package `json:"body"`
+}
+
+// PackageList
+// swagger:response PackageList
+type swaggerResponsePackageList struct {
+ // in:body
+ Body []api.Package `json:"body"`
+}
+
+// PackageFileList
+// swagger:response PackageFileList
+type swaggerResponsePackageFileList struct {
+ // in:body
+ Body []api.PackageFile `json:"body"`
+}
diff --git a/routers/api/v1/swagger/quota.go b/routers/api/v1/swagger/quota.go
new file mode 100644
index 0000000..35e633c
--- /dev/null
+++ b/routers/api/v1/swagger/quota.go
@@ -0,0 +1,64 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// QuotaInfo
+// swagger:response QuotaInfo
+type swaggerResponseQuotaInfo struct {
+ // in:body
+ Body api.QuotaInfo `json:"body"`
+}
+
+// QuotaRuleInfoList
+// swagger:response QuotaRuleInfoList
+type swaggerResponseQuotaRuleInfoList struct {
+ // in:body
+ Body []api.QuotaRuleInfo `json:"body"`
+}
+
+// QuotaRuleInfo
+// swagger:response QuotaRuleInfo
+type swaggerResponseQuotaRuleInfo struct {
+ // in:body
+ Body api.QuotaRuleInfo `json:"body"`
+}
+
+// QuotaUsedAttachmentList
+// swagger:response QuotaUsedAttachmentList
+type swaggerQuotaUsedAttachmentList struct {
+ // in:body
+ Body api.QuotaUsedAttachmentList `json:"body"`
+}
+
+// QuotaUsedPackageList
+// swagger:response QuotaUsedPackageList
+type swaggerQuotaUsedPackageList struct {
+ // in:body
+ Body api.QuotaUsedPackageList `json:"body"`
+}
+
+// QuotaUsedArtifactList
+// swagger:response QuotaUsedArtifactList
+type swaggerQuotaUsedArtifactList struct {
+ // in:body
+ Body api.QuotaUsedArtifactList `json:"body"`
+}
+
+// QuotaGroup
+// swagger:response QuotaGroup
+type swaggerResponseQuotaGroup struct {
+ // in:body
+ Body api.QuotaGroup `json:"body"`
+}
+
+// QuotaGroupList
+// swagger:response QuotaGroupList
+type swaggerResponseQuotaGroupList struct {
+ // in:body
+ Body api.QuotaGroupList `json:"body"`
+}
diff --git a/routers/api/v1/swagger/repo.go b/routers/api/v1/swagger/repo.go
new file mode 100644
index 0000000..ca214b4
--- /dev/null
+++ b/routers/api/v1/swagger/repo.go
@@ -0,0 +1,450 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// Repository
+// swagger:response Repository
+type swaggerResponseRepository struct {
+ // in:body
+ Body api.Repository `json:"body"`
+}
+
+// RepositoryList
+// swagger:response RepositoryList
+type swaggerResponseRepositoryList struct {
+ // in:body
+ Body []api.Repository `json:"body"`
+}
+
+// Branch
+// swagger:response Branch
+type swaggerResponseBranch struct {
+ // in:body
+ Body api.Branch `json:"body"`
+}
+
+// BranchList
+// swagger:response BranchList
+type swaggerResponseBranchList struct {
+ // in:body
+ Body []api.Branch `json:"body"`
+}
+
+// BranchProtection
+// swagger:response BranchProtection
+type swaggerResponseBranchProtection struct {
+ // in:body
+ Body api.BranchProtection `json:"body"`
+}
+
+// BranchProtectionList
+// swagger:response BranchProtectionList
+type swaggerResponseBranchProtectionList struct {
+ // in:body
+ Body []api.BranchProtection `json:"body"`
+}
+
+// TagList
+// swagger:response TagList
+type swaggerResponseTagList struct {
+ // in:body
+ Body []api.Tag `json:"body"`
+}
+
+// Tag
+// swagger:response Tag
+type swaggerResponseTag struct {
+ // in:body
+ Body api.Tag `json:"body"`
+}
+
+// AnnotatedTag
+// swagger:response AnnotatedTag
+type swaggerResponseAnnotatedTag struct {
+ // in:body
+ Body api.AnnotatedTag `json:"body"`
+}
+
+// TagProtectionList
+// swagger:response TagProtectionList
+type swaggerResponseTagProtectionList struct {
+ // in:body
+ Body []api.TagProtection `json:"body"`
+}
+
+// TagProtection
+// swagger:response TagProtection
+type swaggerResponseTagProtection struct {
+ // in:body
+ Body api.TagProtection `json:"body"`
+}
+
+// Reference
+// swagger:response Reference
+type swaggerResponseReference struct {
+ // in:body
+ Body api.Reference `json:"body"`
+}
+
+// ReferenceList
+// swagger:response ReferenceList
+type swaggerResponseReferenceList struct {
+ // in:body
+ Body []api.Reference `json:"body"`
+}
+
+// Hook
+// swagger:response Hook
+type swaggerResponseHook struct {
+ // in:body
+ Body api.Hook `json:"body"`
+}
+
+// HookList
+// swagger:response HookList
+type swaggerResponseHookList struct {
+ // in:body
+ Body []api.Hook `json:"body"`
+}
+
+// GitHook
+// swagger:response GitHook
+type swaggerResponseGitHook struct {
+ // in:body
+ Body api.GitHook `json:"body"`
+}
+
+// GitHookList
+// swagger:response GitHookList
+type swaggerResponseGitHookList struct {
+ // in:body
+ Body []api.GitHook `json:"body"`
+}
+
+// Release
+// swagger:response Release
+type swaggerResponseRelease struct {
+ // in:body
+ Body api.Release `json:"body"`
+}
+
+// ReleaseList
+// swagger:response ReleaseList
+type swaggerResponseReleaseList struct {
+ // in:body
+ Body []api.Release `json:"body"`
+}
+
+// PullRequest
+// swagger:response PullRequest
+type swaggerResponsePullRequest struct {
+ // in:body
+ Body api.PullRequest `json:"body"`
+}
+
+// PullRequestList
+// swagger:response PullRequestList
+type swaggerResponsePullRequestList struct {
+ // in:body
+ Body []api.PullRequest `json:"body"`
+}
+
+// PullReview
+// swagger:response PullReview
+type swaggerResponsePullReview struct {
+ // in:body
+ Body api.PullReview `json:"body"`
+}
+
+// PullReviewList
+// swagger:response PullReviewList
+type swaggerResponsePullReviewList struct {
+ // in:body
+ Body []api.PullReview `json:"body"`
+}
+
+// PullComment
+// swagger:response PullReviewComment
+type swaggerPullReviewComment struct {
+ // in:body
+ Body api.PullReviewComment `json:"body"`
+}
+
+// PullCommentList
+// swagger:response PullReviewCommentList
+type swaggerResponsePullReviewCommentList struct {
+ // in:body
+ Body []api.PullReviewComment `json:"body"`
+}
+
+// CommitStatus
+// swagger:response CommitStatus
+type swaggerResponseStatus struct {
+ // in:body
+ Body api.CommitStatus `json:"body"`
+}
+
+// CommitStatusList
+// swagger:response CommitStatusList
+type swaggerResponseCommitStatusList struct {
+ // in:body
+ Body []api.CommitStatus `json:"body"`
+}
+
+// WatchInfo
+// swagger:response WatchInfo
+type swaggerResponseWatchInfo struct {
+ // in:body
+ Body api.WatchInfo `json:"body"`
+}
+
+// SearchResults
+// swagger:response SearchResults
+type swaggerResponseSearchResults struct {
+ // in:body
+ Body api.SearchResults `json:"body"`
+}
+
+// AttachmentList
+// swagger:response AttachmentList
+type swaggerResponseAttachmentList struct {
+ // in: body
+ Body []api.Attachment `json:"body"`
+}
+
+// Attachment
+// swagger:response Attachment
+type swaggerResponseAttachment struct {
+ // in: body
+ Body api.Attachment `json:"body"`
+}
+
+// GitTreeResponse
+// swagger:response GitTreeResponse
+type swaggerGitTreeResponse struct {
+ // in: body
+ Body api.GitTreeResponse `json:"body"`
+}
+
+// GitBlobResponse
+// swagger:response GitBlobResponse
+type swaggerGitBlobResponse struct {
+ // in: body
+ Body api.GitBlobResponse `json:"body"`
+}
+
+// Commit
+// swagger:response Commit
+type swaggerCommit struct {
+ // in: body
+ Body api.Commit `json:"body"`
+}
+
+// CommitList
+// swagger:response CommitList
+type swaggerCommitList struct {
+ // The current page
+ Page int `json:"X-Page"`
+
+ // Commits per page
+ PerPage int `json:"X-PerPage"`
+
+ // Total commit count
+ Total int `json:"X-Total"`
+
+ // Total number of pages
+ PageCount int `json:"X-PageCount"`
+
+ // True if there is another page
+ HasMore bool `json:"X-HasMore"`
+
+ // in: body
+ Body []api.Commit `json:"body"`
+}
+
+// ChangedFileList
+// swagger:response ChangedFileList
+type swaggerChangedFileList struct {
+ // The current page
+ Page int `json:"X-Page"`
+
+ // Commits per page
+ PerPage int `json:"X-PerPage"`
+
+ // Total commit count
+ Total int `json:"X-Total-Count"`
+
+ // Total number of pages
+ PageCount int `json:"X-PageCount"`
+
+ // True if there is another page
+ HasMore bool `json:"X-HasMore"`
+
+ // in: body
+ Body []api.ChangedFile `json:"body"`
+}
+
+// Note
+// swagger:response Note
+type swaggerNote struct {
+ // in: body
+ Body api.Note `json:"body"`
+}
+
+// EmptyRepository
+// swagger:response EmptyRepository
+type swaggerEmptyRepository struct {
+ // in: body
+ Body api.APIError `json:"body"`
+}
+
+// FileResponse
+// swagger:response FileResponse
+type swaggerFileResponse struct {
+ // in: body
+ Body api.FileResponse `json:"body"`
+}
+
+// FilesResponse
+// swagger:response FilesResponse
+type swaggerFilesResponse struct {
+ // in: body
+ Body api.FilesResponse `json:"body"`
+}
+
+// ContentsResponse
+// swagger:response ContentsResponse
+type swaggerContentsResponse struct {
+ // in: body
+ Body api.ContentsResponse `json:"body"`
+}
+
+// ContentsListResponse
+// swagger:response ContentsListResponse
+type swaggerContentsListResponse struct {
+ // in:body
+ Body []api.ContentsResponse `json:"body"`
+}
+
+// FileDeleteResponse
+// swagger:response FileDeleteResponse
+type swaggerFileDeleteResponse struct {
+ // in: body
+ Body api.FileDeleteResponse `json:"body"`
+}
+
+// TopicListResponse
+// swagger:response TopicListResponse
+type swaggerTopicListResponse struct {
+ // in: body
+ Body []api.TopicResponse `json:"body"`
+}
+
+// TopicNames
+// swagger:response TopicNames
+type swaggerTopicNames struct {
+ // in: body
+ Body api.TopicName `json:"body"`
+}
+
+// LanguageStatistics
+// swagger:response LanguageStatistics
+type swaggerLanguageStatistics struct {
+ // in: body
+ Body map[string]int64 `json:"body"`
+}
+
+// CombinedStatus
+// swagger:response CombinedStatus
+type swaggerCombinedStatus struct {
+ // in: body
+ Body api.CombinedStatus `json:"body"`
+}
+
+// WikiPageList
+// swagger:response WikiPageList
+type swaggerWikiPageList struct {
+ // in:body
+ Body []api.WikiPageMetaData `json:"body"`
+}
+
+// WikiPage
+// swagger:response WikiPage
+type swaggerWikiPage struct {
+ // in:body
+ Body api.WikiPage `json:"body"`
+}
+
+// WikiCommitList
+// swagger:response WikiCommitList
+type swaggerWikiCommitList struct {
+ // in:body
+ Body api.WikiCommitList `json:"body"`
+}
+
+// PushMirror
+// swagger:response PushMirror
+type swaggerPushMirror struct {
+ // in:body
+ Body api.PushMirror `json:"body"`
+}
+
+// PushMirrorList
+// swagger:response PushMirrorList
+type swaggerPushMirrorList struct {
+ // in:body
+ Body []api.PushMirror `json:"body"`
+}
+
+// RepoCollaboratorPermission
+// swagger:response RepoCollaboratorPermission
+type swaggerRepoCollaboratorPermission struct {
+ // in:body
+ Body api.RepoCollaboratorPermission `json:"body"`
+}
+
+// RepoIssueConfig
+// swagger:response RepoIssueConfig
+type swaggerRepoIssueConfig struct {
+ // in:body
+ Body api.IssueConfig `json:"body"`
+}
+
+// RepoIssueConfigValidation
+// swagger:response RepoIssueConfigValidation
+type swaggerRepoIssueConfigValidation struct {
+ // in:body
+ Body api.IssueConfigValidation `json:"body"`
+}
+
+// RepoNewIssuePinsAllowed
+// swagger:response RepoNewIssuePinsAllowed
+type swaggerRepoNewIssuePinsAllowed struct {
+ // in:body
+ Body api.NewIssuePinsAllowed `json:"body"`
+}
+
+// BlockedUserList
+// swagger:response BlockedUserList
+type swaggerBlockedUserList struct {
+ // in:body
+ Body []api.BlockedUser `json:"body"`
+}
+
+// TasksList
+// swagger:response TasksList
+type swaggerRepoTasksList struct {
+ // in:body
+ Body api.ActionTaskResponse `json:"body"`
+}
+
+// swagger:response Compare
+type swaggerCompare struct {
+ // in:body
+ Body api.Compare `json:"body"`
+}
diff --git a/routers/api/v1/swagger/settings.go b/routers/api/v1/swagger/settings.go
new file mode 100644
index 0000000..a946669
--- /dev/null
+++ b/routers/api/v1/swagger/settings.go
@@ -0,0 +1,34 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import api "code.gitea.io/gitea/modules/structs"
+
+// GeneralRepoSettings
+// swagger:response GeneralRepoSettings
+type swaggerResponseGeneralRepoSettings struct {
+ // in:body
+ Body api.GeneralRepoSettings `json:"body"`
+}
+
+// GeneralUISettings
+// swagger:response GeneralUISettings
+type swaggerResponseGeneralUISettings struct {
+ // in:body
+ Body api.GeneralUISettings `json:"body"`
+}
+
+// GeneralAPISettings
+// swagger:response GeneralAPISettings
+type swaggerResponseGeneralAPISettings struct {
+ // in:body
+ Body api.GeneralAPISettings `json:"body"`
+}
+
+// GeneralAttachmentSettings
+// swagger:response GeneralAttachmentSettings
+type swaggerResponseGeneralAttachmentSettings struct {
+ // in:body
+ Body api.GeneralAttachmentSettings `json:"body"`
+}
diff --git a/routers/api/v1/swagger/user.go b/routers/api/v1/swagger/user.go
new file mode 100644
index 0000000..37e2866
--- /dev/null
+++ b/routers/api/v1/swagger/user.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package swagger
+
+import (
+ activities_model "code.gitea.io/gitea/models/activities"
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// User
+// swagger:response User
+type swaggerResponseUser struct {
+ // in:body
+ Body api.User `json:"body"`
+}
+
+// UserList
+// swagger:response UserList
+type swaggerResponseUserList struct {
+ // in:body
+ Body []api.User `json:"body"`
+}
+
+// EmailList
+// swagger:response EmailList
+type swaggerResponseEmailList struct {
+ // in:body
+ Body []api.Email `json:"body"`
+}
+
+// swagger:model EditUserOption
+type swaggerModelEditUserOption struct {
+ // in:body
+ Options api.EditUserOption
+}
+
+// UserHeatmapData
+// swagger:response UserHeatmapData
+type swaggerResponseUserHeatmapData struct {
+ // in:body
+ Body []activities_model.UserHeatmapData `json:"body"`
+}
+
+// UserSettings
+// swagger:response UserSettings
+type swaggerResponseUserSettings struct {
+ // in:body
+ Body api.UserSettings `json:"body"`
+}
diff --git a/routers/api/v1/user/action.go b/routers/api/v1/user/action.go
new file mode 100644
index 0000000..ec5289f
--- /dev/null
+++ b/routers/api/v1/user/action.go
@@ -0,0 +1,381 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "errors"
+ "net/http"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ actions_service "code.gitea.io/gitea/services/actions"
+ "code.gitea.io/gitea/services/context"
+ secret_service "code.gitea.io/gitea/services/secrets"
+)
+
+// create or update one secret of the user scope
+func CreateOrUpdateSecret(ctx *context.APIContext) {
+ // swagger:operation PUT /user/actions/secrets/{secretname} user updateUserSecret
+ // ---
+ // summary: Create or Update a secret value in a user scope
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: secretname
+ // in: path
+ // description: name of the secret
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateOrUpdateSecretOption"
+ // responses:
+ // "201":
+ // description: response when creating a secret
+ // "204":
+ // description: response when updating a secret
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.CreateOrUpdateSecretOption)
+
+ _, created, err := secret_service.CreateOrUpdateSecret(ctx, ctx.Doer.ID, 0, ctx.Params("secretname"), opt.Data)
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "CreateOrUpdateSecret", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "CreateOrUpdateSecret", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateOrUpdateSecret", err)
+ }
+ return
+ }
+
+ if created {
+ ctx.Status(http.StatusCreated)
+ } else {
+ ctx.Status(http.StatusNoContent)
+ }
+}
+
+// DeleteSecret delete one secret of the user scope
+func DeleteSecret(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/actions/secrets/{secretname} user deleteUserSecret
+ // ---
+ // summary: Delete a secret in a user scope
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: secretname
+ // in: path
+ // description: name of the secret
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // description: delete one secret of the user
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := secret_service.DeleteSecretByName(ctx, ctx.Doer.ID, 0, ctx.Params("secretname"))
+ if err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "DeleteSecret", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "DeleteSecret", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteSecret", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreateVariable create a user-level variable
+func CreateVariable(ctx *context.APIContext) {
+ // swagger:operation POST /user/actions/variables/{variablename} user createUserVariable
+ // ---
+ // summary: Create a user-level variable
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateVariableOption"
+ // responses:
+ // "201":
+ // description: response when creating a variable
+ // "204":
+ // description: response when creating a variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.CreateVariableOption)
+
+ ownerID := ctx.Doer.ID
+ variableName := ctx.Params("variablename")
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ OwnerID: ownerID,
+ Name: variableName,
+ })
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ return
+ }
+ if v != nil && v.ID > 0 {
+ ctx.Error(http.StatusConflict, "VariableNameAlreadyExists", util.NewAlreadyExistErrorf("variable name %s already exists", variableName))
+ return
+ }
+
+ if _, err := actions_service.CreateVariable(ctx, ownerID, 0, variableName, opt.Value); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "CreateVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "CreateVariable", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// UpdateVariable update a user-level variable which is created by current doer
+func UpdateVariable(ctx *context.APIContext) {
+ // swagger:operation PUT /user/actions/variables/{variablename} user updateUserVariable
+ // ---
+ // summary: Update a user-level variable which is created by current doer
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/UpdateVariableOption"
+ // responses:
+ // "201":
+ // description: response when updating a variable
+ // "204":
+ // description: response when updating a variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opt := web.GetForm(ctx).(*api.UpdateVariableOption)
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ OwnerID: ctx.Doer.ID,
+ Name: ctx.Params("variablename"),
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "GetVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ }
+ return
+ }
+
+ if opt.Name == "" {
+ opt.Name = ctx.Params("variablename")
+ }
+ if _, err := actions_service.UpdateVariable(ctx, v.ID, opt.Name, opt.Value); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "UpdateVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UpdateVariable", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteVariable delete a user-level variable which is created by current doer
+func DeleteVariable(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/actions/variables/{variablename} user deleteUserVariable
+ // ---
+ // summary: Delete a user-level variable which is created by current doer
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // responses:
+ // "201":
+ // description: response when deleting a variable
+ // "204":
+ // description: response when deleting a variable
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := actions_service.DeleteVariableByName(ctx, ctx.Doer.ID, 0, ctx.Params("variablename")); err != nil {
+ if errors.Is(err, util.ErrInvalidArgument) {
+ ctx.Error(http.StatusBadRequest, "DeleteVariableByName", err)
+ } else if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "DeleteVariableByName", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteVariableByName", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetVariable get a user-level variable which is created by current doer
+func GetVariable(ctx *context.APIContext) {
+ // swagger:operation GET /user/actions/variables/{variablename} user getUserVariable
+ // ---
+ // summary: Get a user-level variable which is created by current doer
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: variablename
+ // in: path
+ // description: name of the variable
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActionVariable"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ v, err := actions_service.GetVariable(ctx, actions_model.FindVariablesOpts{
+ OwnerID: ctx.Doer.ID,
+ Name: ctx.Params("variablename"),
+ })
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ ctx.Error(http.StatusNotFound, "GetVariable", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetVariable", err)
+ }
+ return
+ }
+
+ variable := &api.ActionVariable{
+ OwnerID: v.OwnerID,
+ RepoID: v.RepoID,
+ Name: v.Name,
+ Data: v.Data,
+ }
+
+ ctx.JSON(http.StatusOK, variable)
+}
+
+// ListVariables list user-level variables
+func ListVariables(ctx *context.APIContext) {
+ // swagger:operation GET /user/actions/variables user getUserVariablesList
+ // ---
+ // summary: Get the user-level list of variables which is created by current doer
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/VariableList"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ vars, count, err := db.FindAndCount[actions_model.ActionVariable](ctx, &actions_model.FindVariablesOpts{
+ OwnerID: ctx.Doer.ID,
+ ListOptions: utils.GetListOptions(ctx),
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "FindVariables", err)
+ return
+ }
+
+ variables := make([]*api.ActionVariable, len(vars))
+ for i, v := range vars {
+ variables[i] = &api.ActionVariable{
+ OwnerID: v.OwnerID,
+ RepoID: v.RepoID,
+ Name: v.Name,
+ Data: v.Data,
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, variables)
+}
diff --git a/routers/api/v1/user/app.go b/routers/api/v1/user/app.go
new file mode 100644
index 0000000..c4fb2ea
--- /dev/null
+++ b/routers/api/v1/user/app.go
@@ -0,0 +1,434 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// ListAccessTokens list all the access tokens
+func ListAccessTokens(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/tokens user userGetTokens
+ // ---
+ // summary: List the authenticated user's access tokens
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/AccessTokenList"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ opts := auth_model.ListAccessTokensOptions{UserID: ctx.ContextUser.ID, ListOptions: utils.GetListOptions(ctx)}
+
+ tokens, count, err := db.FindAndCount[auth_model.AccessToken](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiTokens := make([]*api.AccessToken, len(tokens))
+ for i := range tokens {
+ apiTokens[i] = &api.AccessToken{
+ ID: tokens[i].ID,
+ Name: tokens[i].Name,
+ TokenLastEight: tokens[i].TokenLastEight,
+ Scopes: tokens[i].Scope.StringSlice(),
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, &apiTokens)
+}
+
+// CreateAccessToken create access tokens
+func CreateAccessToken(ctx *context.APIContext) {
+ // swagger:operation POST /users/{username}/tokens user userCreateToken
+ // ---
+ // summary: Create an access token
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // required: true
+ // type: string
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateAccessTokenOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/AccessToken"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.CreateAccessTokenOption)
+
+ t := &auth_model.AccessToken{
+ UID: ctx.ContextUser.ID,
+ Name: form.Name,
+ }
+
+ exist, err := auth_model.AccessTokenByNameExists(ctx, t)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ if exist {
+ ctx.Error(http.StatusBadRequest, "AccessTokenByNameExists", errors.New("access token name has been used already"))
+ return
+ }
+
+ scope, err := auth_model.AccessTokenScope(strings.Join(form.Scopes, ",")).Normalize()
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "AccessTokenScope.Normalize", fmt.Errorf("invalid access token scope provided: %w", err))
+ return
+ }
+ if scope == "" {
+ ctx.Error(http.StatusBadRequest, "AccessTokenScope", "access token must have a scope")
+ return
+ }
+ t.Scope = scope
+
+ if err := auth_model.NewAccessToken(ctx, t); err != nil {
+ ctx.Error(http.StatusInternalServerError, "NewAccessToken", err)
+ return
+ }
+ ctx.JSON(http.StatusCreated, &api.AccessToken{
+ Name: t.Name,
+ Token: t.Token,
+ ID: t.ID,
+ TokenLastEight: t.TokenLastEight,
+ Scopes: t.Scope.StringSlice(),
+ })
+}
+
+// DeleteAccessToken delete access tokens
+func DeleteAccessToken(ctx *context.APIContext) {
+ // swagger:operation DELETE /users/{username}/tokens/{token} user userDeleteAccessToken
+ // ---
+ // summary: delete an access token
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: token
+ // in: path
+ // description: token to be deleted, identified by ID and if not available by name
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/error"
+
+ token := ctx.Params(":id")
+ tokenID, _ := strconv.ParseInt(token, 0, 64)
+
+ if tokenID == 0 {
+ tokens, err := db.Find[auth_model.AccessToken](ctx, auth_model.ListAccessTokensOptions{
+ Name: token,
+ UserID: ctx.ContextUser.ID,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListAccessTokens", err)
+ return
+ }
+
+ switch len(tokens) {
+ case 0:
+ ctx.NotFound()
+ return
+ case 1:
+ tokenID = tokens[0].ID
+ default:
+ ctx.Error(http.StatusUnprocessableEntity, "DeleteAccessTokenByID", fmt.Errorf("multiple matches for token name '%s'", token))
+ return
+ }
+ }
+ if tokenID == 0 {
+ ctx.Error(http.StatusInternalServerError, "Invalid TokenID", nil)
+ return
+ }
+
+ if err := auth_model.DeleteAccessTokenByID(ctx, tokenID, ctx.ContextUser.ID); err != nil {
+ if auth_model.IsErrAccessTokenNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteAccessTokenByID", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// CreateOauth2Application is the handler to create a new OAuth2 Application for the authenticated user
+func CreateOauth2Application(ctx *context.APIContext) {
+ // swagger:operation POST /user/applications/oauth2 user userCreateOAuth2Application
+ // ---
+ // summary: creates a new OAuth2 application
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreateOAuth2ApplicationOptions"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/OAuth2Application"
+ // "400":
+ // "$ref": "#/responses/error"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ data := web.GetForm(ctx).(*api.CreateOAuth2ApplicationOptions)
+
+ app, err := auth_model.CreateOAuth2Application(ctx, auth_model.CreateOAuth2ApplicationOptions{
+ Name: data.Name,
+ UserID: ctx.Doer.ID,
+ RedirectURIs: data.RedirectURIs,
+ ConfidentialClient: data.ConfidentialClient,
+ })
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "", "error creating oauth2 application")
+ return
+ }
+ secret, err := app.GenerateClientSecret(ctx)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "", "error creating application secret")
+ return
+ }
+ app.ClientSecret = secret
+
+ ctx.JSON(http.StatusCreated, convert.ToOAuth2Application(app))
+}
+
+// ListOauth2Applications list all the Oauth2 application
+func ListOauth2Applications(ctx *context.APIContext) {
+ // swagger:operation GET /user/applications/oauth2 user userGetOAuth2Applications
+ // ---
+ // summary: List the authenticated user's oauth2 applications
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OAuth2ApplicationList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ apps, total, err := db.FindAndCount[auth_model.OAuth2Application](ctx, auth_model.FindOAuth2ApplicationsOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ OwnerID: ctx.Doer.ID,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListOAuth2Applications", err)
+ return
+ }
+
+ apiApps := make([]*api.OAuth2Application, len(apps))
+ for i := range apps {
+ apiApps[i] = convert.ToOAuth2Application(apps[i])
+ apiApps[i].ClientSecret = "" // Hide secret on application list
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, &apiApps)
+}
+
+// DeleteOauth2Application delete OAuth2 Application
+func DeleteOauth2Application(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/applications/oauth2/{id} user userDeleteOAuth2Application
+ // ---
+ // summary: delete an OAuth2 Application
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: token to be deleted
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ appID := ctx.ParamsInt64(":id")
+ if err := auth_model.DeleteOAuth2Application(ctx, appID, ctx.Doer.ID); err != nil {
+ if auth_model.IsErrOAuthApplicationNotFound(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteOauth2ApplicationByID", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// GetOauth2Application get OAuth2 Application
+func GetOauth2Application(ctx *context.APIContext) {
+ // swagger:operation GET /user/applications/oauth2/{id} user userGetOAuth2Application
+ // ---
+ // summary: get an OAuth2 Application
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: Application ID to be found
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OAuth2Application"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ appID := ctx.ParamsInt64(":id")
+ app, err := auth_model.GetOAuth2ApplicationByID(ctx, appID)
+ if err != nil {
+ if auth_model.IsErrOauthClientIDInvalid(err) || auth_model.IsErrOAuthApplicationNotFound(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetOauth2ApplicationByID", err)
+ }
+ return
+ }
+ if app.UID != ctx.Doer.ID {
+ ctx.NotFound()
+ return
+ }
+
+ app.ClientSecret = ""
+
+ ctx.JSON(http.StatusOK, convert.ToOAuth2Application(app))
+}
+
+// UpdateOauth2Application update OAuth2 Application
+func UpdateOauth2Application(ctx *context.APIContext) {
+ // swagger:operation PATCH /user/applications/oauth2/{id} user userUpdateOAuth2Application
+ // ---
+ // summary: update an OAuth2 Application, this includes regenerating the client secret
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: application to be updated
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreateOAuth2ApplicationOptions"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/OAuth2Application"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ appID := ctx.ParamsInt64(":id")
+
+ data := web.GetForm(ctx).(*api.CreateOAuth2ApplicationOptions)
+
+ app, err := auth_model.UpdateOAuth2Application(ctx, auth_model.UpdateOAuth2ApplicationOptions{
+ Name: data.Name,
+ UserID: ctx.Doer.ID,
+ ID: appID,
+ RedirectURIs: data.RedirectURIs,
+ ConfidentialClient: data.ConfidentialClient,
+ })
+ if err != nil {
+ if auth_model.IsErrOauthClientIDInvalid(err) || auth_model.IsErrOAuthApplicationNotFound(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "UpdateOauth2ApplicationByID", err)
+ }
+ return
+ }
+ app.ClientSecret, err = app.GenerateClientSecret(ctx)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "", "error updating application secret")
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.ToOAuth2Application(app))
+}
diff --git a/routers/api/v1/user/avatar.go b/routers/api/v1/user/avatar.go
new file mode 100644
index 0000000..d3833a3
--- /dev/null
+++ b/routers/api/v1/user/avatar.go
@@ -0,0 +1,73 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "encoding/base64"
+ "net/http"
+
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ user_service "code.gitea.io/gitea/services/user"
+)
+
+// UpdateAvatar updates the Avatar of an User
+func UpdateAvatar(ctx *context.APIContext) {
+ // swagger:operation POST /user/avatar user userUpdateAvatar
+ // ---
+ // summary: Update Avatar
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/UpdateUserAvatarOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ form := web.GetForm(ctx).(*api.UpdateUserAvatarOption)
+
+ content, err := base64.StdEncoding.DecodeString(form.Image)
+ if err != nil {
+ ctx.Error(http.StatusBadRequest, "DecodeImage", err)
+ return
+ }
+
+ err = user_service.UploadAvatar(ctx, ctx.Doer, content)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UploadAvatar", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// DeleteAvatar deletes the Avatar of an User
+func DeleteAvatar(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/avatar user userDeleteAvatar
+ // ---
+ // summary: Delete Avatar
+ // produces:
+ // - application/json
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ err := user_service.DeleteAvatar(ctx, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "DeleteAvatar", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/user/email.go b/routers/api/v1/user/email.go
new file mode 100644
index 0000000..af5d355
--- /dev/null
+++ b/routers/api/v1/user/email.go
@@ -0,0 +1,144 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "fmt"
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ user_service "code.gitea.io/gitea/services/user"
+)
+
+// ListEmails list all of the authenticated user's email addresses
+// see https://github.com/gogits/go-gogs-client/wiki/Users-Emails#list-email-addresses-for-a-user
+func ListEmails(ctx *context.APIContext) {
+ // swagger:operation GET /user/emails user userListEmails
+ // ---
+ // summary: List the authenticated user's email addresses
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/EmailList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ emails, err := user_model.GetEmailAddresses(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetEmailAddresses", err)
+ return
+ }
+ apiEmails := make([]*api.Email, len(emails))
+ for i := range emails {
+ apiEmails[i] = convert.ToEmail(emails[i])
+ }
+ ctx.JSON(http.StatusOK, &apiEmails)
+}
+
+// AddEmail add an email address
+func AddEmail(ctx *context.APIContext) {
+ // swagger:operation POST /user/emails user userAddEmail
+ // ---
+ // summary: Add email addresses
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateEmailOption"
+ // responses:
+ // '201':
+ // "$ref": "#/responses/EmailList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateEmailOption)
+ if len(form.Emails) == 0 {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Email list empty")
+ return
+ }
+
+ if err := user_service.AddEmailAddresses(ctx, ctx.Doer, form.Emails); err != nil {
+ if user_model.IsErrEmailAlreadyUsed(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Email address has been used: "+err.(user_model.ErrEmailAlreadyUsed).Email)
+ } else if user_model.IsErrEmailCharIsNotSupported(err) || user_model.IsErrEmailInvalid(err) {
+ email := ""
+ if typedError, ok := err.(user_model.ErrEmailInvalid); ok {
+ email = typedError.Email
+ }
+ if typedError, ok := err.(user_model.ErrEmailCharIsNotSupported); ok {
+ email = typedError.Email
+ }
+
+ errMsg := fmt.Sprintf("Email address %q invalid", email)
+ ctx.Error(http.StatusUnprocessableEntity, "", errMsg)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "AddEmailAddresses", err)
+ }
+ return
+ }
+
+ emails, err := user_model.GetEmailAddresses(ctx, ctx.Doer.ID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetEmailAddresses", err)
+ return
+ }
+
+ apiEmails := make([]*api.Email, 0, len(emails))
+ for _, email := range emails {
+ apiEmails = append(apiEmails, convert.ToEmail(email))
+ }
+ ctx.JSON(http.StatusCreated, apiEmails)
+}
+
+// DeleteEmail delete email
+func DeleteEmail(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/emails user userDeleteEmail
+ // ---
+ // summary: Delete email addresses
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/DeleteEmailOption"
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ form := web.GetForm(ctx).(*api.DeleteEmailOption)
+ if len(form.Emails) == 0 {
+ ctx.Status(http.StatusNoContent)
+ return
+ }
+
+ if err := user_service.DeleteEmailAddresses(ctx, ctx.Doer, form.Emails); err != nil {
+ if user_model.IsErrEmailAddressNotExist(err) {
+ ctx.Error(http.StatusNotFound, "DeleteEmailAddresses", err)
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteEmailAddresses", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/user/follower.go b/routers/api/v1/user/follower.go
new file mode 100644
index 0000000..784e232
--- /dev/null
+++ b/routers/api/v1/user/follower.go
@@ -0,0 +1,281 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "errors"
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+func responseAPIUsers(ctx *context.APIContext, users []*user_model.User) {
+ apiUsers := make([]*api.User, len(users))
+ for i := range users {
+ apiUsers[i] = convert.ToUser(ctx, users[i], ctx.Doer)
+ }
+ ctx.JSON(http.StatusOK, &apiUsers)
+}
+
+func listUserFollowers(ctx *context.APIContext, u *user_model.User) {
+ users, count, err := user_model.GetUserFollowers(ctx, u, ctx.Doer, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserFollowers", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ responseAPIUsers(ctx, users)
+}
+
+// ListMyFollowers list the authenticated user's followers
+func ListMyFollowers(ctx *context.APIContext) {
+ // swagger:operation GET /user/followers user userCurrentListFollowers
+ // ---
+ // summary: List the authenticated user's followers
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listUserFollowers(ctx, ctx.Doer)
+}
+
+// ListFollowers list the given user's followers
+func ListFollowers(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/followers user userListFollowers
+ // ---
+ // summary: List the given user's followers
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listUserFollowers(ctx, ctx.ContextUser)
+}
+
+func listUserFollowing(ctx *context.APIContext, u *user_model.User) {
+ users, count, err := user_model.GetUserFollowing(ctx, u, ctx.Doer, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserFollowing", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(count)
+ responseAPIUsers(ctx, users)
+}
+
+// ListMyFollowing list the users that the authenticated user is following
+func ListMyFollowing(ctx *context.APIContext) {
+ // swagger:operation GET /user/following user userCurrentListFollowing
+ // ---
+ // summary: List the users that the authenticated user is following
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listUserFollowing(ctx, ctx.Doer)
+}
+
+// ListFollowing list the users that the given user is following
+func ListFollowing(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/following user userListFollowing
+ // ---
+ // summary: List the users that the given user is following
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listUserFollowing(ctx, ctx.ContextUser)
+}
+
+func checkUserFollowing(ctx *context.APIContext, u *user_model.User, followID int64) {
+ if user_model.IsFollowing(ctx, u.ID, followID) {
+ ctx.Status(http.StatusNoContent)
+ } else {
+ ctx.NotFound()
+ }
+}
+
+// CheckMyFollowing whether the given user is followed by the authenticated user
+func CheckMyFollowing(ctx *context.APIContext) {
+ // swagger:operation GET /user/following/{username} user userCurrentCheckFollowing
+ // ---
+ // summary: Check whether a user is followed by the authenticated user
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of followed user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ checkUserFollowing(ctx, ctx.Doer, ctx.ContextUser.ID)
+}
+
+// CheckFollowing check if one user is following another user
+func CheckFollowing(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/following/{target} user userCheckFollowing
+ // ---
+ // summary: Check if one user is following another user
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of following user
+ // type: string
+ // required: true
+ // - name: target
+ // in: path
+ // description: username of followed user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ target := GetUserByParamsName(ctx, ":target")
+ if ctx.Written() {
+ return
+ }
+ checkUserFollowing(ctx, ctx.ContextUser, target.ID)
+}
+
+// Follow follow a user
+func Follow(ctx *context.APIContext) {
+ // swagger:operation PUT /user/following/{username} user userCurrentPutFollow
+ // ---
+ // summary: Follow a user
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user to follow
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := user_model.FollowUser(ctx, ctx.Doer.ID, ctx.ContextUser.ID); err != nil {
+ if errors.Is(err, user_model.ErrBlockedByUser) {
+ ctx.Error(http.StatusForbidden, "BlockedByUser", err)
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "FollowUser", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// Unfollow unfollow a user
+func Unfollow(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/following/{username} user userCurrentDeleteFollow
+ // ---
+ // summary: Unfollow a user
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user to unfollow
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if err := user_model.UnfollowUser(ctx, ctx.Doer.ID, ctx.ContextUser.ID); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UnfollowUser", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/user/gpg_key.go b/routers/api/v1/user/gpg_key.go
new file mode 100644
index 0000000..2fe4eb8
--- /dev/null
+++ b/routers/api/v1/user/gpg_key.go
@@ -0,0 +1,333 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+func listGPGKeys(ctx *context.APIContext, uid int64, listOptions db.ListOptions) {
+ keys, total, err := db.FindAndCount[asymkey_model.GPGKey](ctx, asymkey_model.FindGPGKeyOptions{
+ ListOptions: listOptions,
+ OwnerID: uid,
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListGPGKeys", err)
+ return
+ }
+
+ if err := asymkey_model.GPGKeyList(keys).LoadSubKeys(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListGPGKeys", err)
+ return
+ }
+
+ apiKeys := make([]*api.GPGKey, len(keys))
+ for i := range keys {
+ apiKeys[i] = convert.ToGPGKey(keys[i])
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, &apiKeys)
+}
+
+// ListGPGKeys get the GPG key list of a user
+func ListGPGKeys(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/gpg_keys user userListGPGKeys
+ // ---
+ // summary: List the given user's GPG keys
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GPGKeyList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listGPGKeys(ctx, ctx.ContextUser.ID, utils.GetListOptions(ctx))
+}
+
+// ListMyGPGKeys get the GPG key list of the authenticated user
+func ListMyGPGKeys(ctx *context.APIContext) {
+ // swagger:operation GET /user/gpg_keys user userCurrentListGPGKeys
+ // ---
+ // summary: List the authenticated user's GPG keys
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GPGKeyList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listGPGKeys(ctx, ctx.Doer.ID, utils.GetListOptions(ctx))
+}
+
+// GetGPGKey get the GPG key based on a id
+func GetGPGKey(ctx *context.APIContext) {
+ // swagger:operation GET /user/gpg_keys/{id} user userCurrentGetGPGKey
+ // ---
+ // summary: Get a GPG key
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of key to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/GPGKey"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ key, err := asymkey_model.GetGPGKeyForUserByID(ctx, ctx.Doer.ID, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if asymkey_model.IsErrGPGKeyNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetGPGKeyByID", err)
+ }
+ return
+ }
+ if err := key.LoadSubKeys(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadSubKeys", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToGPGKey(key))
+}
+
+// CreateUserGPGKey creates new GPG key to given user by ID.
+func CreateUserGPGKey(ctx *context.APIContext, form api.CreateGPGKeyOption, uid int64) {
+ if user_model.IsFeatureDisabledWithLoginType(ctx.Doer, setting.UserFeatureManageGPGKeys) {
+ ctx.NotFound("Not Found", fmt.Errorf("gpg keys setting is not allowed to be visited"))
+ return
+ }
+
+ token := asymkey_model.VerificationToken(ctx.Doer, 1)
+ lastToken := asymkey_model.VerificationToken(ctx.Doer, 0)
+
+ keys, err := asymkey_model.AddGPGKey(ctx, uid, form.ArmoredKey, token, form.Signature)
+ if err != nil && asymkey_model.IsErrGPGInvalidTokenSignature(err) {
+ keys, err = asymkey_model.AddGPGKey(ctx, uid, form.ArmoredKey, lastToken, form.Signature)
+ }
+ if err != nil {
+ HandleAddGPGKeyError(ctx, err, token)
+ return
+ }
+ ctx.JSON(http.StatusCreated, convert.ToGPGKey(keys[0]))
+}
+
+// GetVerificationToken returns the current token to be signed for this user
+func GetVerificationToken(ctx *context.APIContext) {
+ // swagger:operation GET /user/gpg_key_token user getVerificationToken
+ // ---
+ // summary: Get a Token to verify
+ // produces:
+ // - text/plain
+ // parameters:
+ // responses:
+ // "200":
+ // "$ref": "#/responses/string"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ token := asymkey_model.VerificationToken(ctx.Doer, 1)
+ ctx.PlainText(http.StatusOK, token)
+}
+
+// VerifyUserGPGKey creates new GPG key to given user by ID.
+func VerifyUserGPGKey(ctx *context.APIContext) {
+ // swagger:operation POST /user/gpg_key_verify user userVerifyGPGKey
+ // ---
+ // summary: Verify a GPG key
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // responses:
+ // "201":
+ // "$ref": "#/responses/GPGKey"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.VerifyGPGKeyOption)
+ token := asymkey_model.VerificationToken(ctx.Doer, 1)
+ lastToken := asymkey_model.VerificationToken(ctx.Doer, 0)
+
+ form.KeyID = strings.TrimLeft(form.KeyID, "0")
+ if form.KeyID == "" {
+ ctx.NotFound()
+ return
+ }
+
+ _, err := asymkey_model.VerifyGPGKey(ctx, ctx.Doer.ID, form.KeyID, token, form.Signature)
+ if err != nil && asymkey_model.IsErrGPGInvalidTokenSignature(err) {
+ _, err = asymkey_model.VerifyGPGKey(ctx, ctx.Doer.ID, form.KeyID, lastToken, form.Signature)
+ }
+
+ if err != nil {
+ if asymkey_model.IsErrGPGInvalidTokenSignature(err) {
+ ctx.Error(http.StatusUnprocessableEntity, "GPGInvalidSignature", fmt.Sprintf("The provided GPG key, signature and token do not match or token is out of date. Provide a valid signature for the token: %s", token))
+ return
+ }
+ ctx.Error(http.StatusInternalServerError, "VerifyUserGPGKey", err)
+ }
+
+ keys, err := db.Find[asymkey_model.GPGKey](ctx, asymkey_model.FindGPGKeyOptions{
+ KeyID: form.KeyID,
+ IncludeSubKeys: true,
+ })
+ if err != nil {
+ if asymkey_model.IsErrGPGKeyNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetGPGKeysByKeyID", err)
+ }
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToGPGKey(keys[0]))
+}
+
+// swagger:parameters userCurrentPostGPGKey
+type swaggerUserCurrentPostGPGKey struct {
+ // in:body
+ Form api.CreateGPGKeyOption
+}
+
+// CreateGPGKey create a GPG key belonging to the authenticated user
+func CreateGPGKey(ctx *context.APIContext) {
+ // swagger:operation POST /user/gpg_keys user userCurrentPostGPGKey
+ // ---
+ // summary: Create a GPG key
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // responses:
+ // "201":
+ // "$ref": "#/responses/GPGKey"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateGPGKeyOption)
+ CreateUserGPGKey(ctx, *form, ctx.Doer.ID)
+}
+
+// DeleteGPGKey remove a GPG key belonging to the authenticated user
+func DeleteGPGKey(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/gpg_keys/{id} user userCurrentDeleteGPGKey
+ // ---
+ // summary: Remove a GPG key
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of key to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if user_model.IsFeatureDisabledWithLoginType(ctx.Doer, setting.UserFeatureManageGPGKeys) {
+ ctx.NotFound("Not Found", fmt.Errorf("gpg keys setting is not allowed to be visited"))
+ return
+ }
+
+ if err := asymkey_model.DeleteGPGKey(ctx, ctx.Doer, ctx.ParamsInt64(":id")); err != nil {
+ if asymkey_model.IsErrGPGKeyAccessDenied(err) {
+ ctx.Error(http.StatusForbidden, "", "You do not have access to this key")
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteGPGKey", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// HandleAddGPGKeyError handle add GPGKey error
+func HandleAddGPGKeyError(ctx *context.APIContext, err error, token string) {
+ switch {
+ case asymkey_model.IsErrGPGKeyAccessDenied(err):
+ ctx.Error(http.StatusUnprocessableEntity, "GPGKeyAccessDenied", "You do not have access to this GPG key")
+ case asymkey_model.IsErrGPGKeyIDAlreadyUsed(err):
+ ctx.Error(http.StatusUnprocessableEntity, "GPGKeyIDAlreadyUsed", "A key with the same id already exists")
+ case asymkey_model.IsErrGPGKeyParsing(err):
+ ctx.Error(http.StatusUnprocessableEntity, "GPGKeyParsing", err)
+ case asymkey_model.IsErrGPGNoEmailFound(err):
+ ctx.Error(http.StatusNotFound, "GPGNoEmailFound", fmt.Sprintf("None of the emails attached to the GPG key could be found. It may still be added if you provide a valid signature for the token: %s", token))
+ case asymkey_model.IsErrGPGInvalidTokenSignature(err):
+ ctx.Error(http.StatusUnprocessableEntity, "GPGInvalidSignature", fmt.Sprintf("The provided GPG key, signature and token do not match or token is out of date. Provide a valid signature for the token: %s", token))
+ default:
+ ctx.Error(http.StatusInternalServerError, "AddGPGKey", err)
+ }
+}
diff --git a/routers/api/v1/user/helper.go b/routers/api/v1/user/helper.go
new file mode 100644
index 0000000..8b5c64e
--- /dev/null
+++ b/routers/api/v1/user/helper.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/services/context"
+)
+
+// GetUserByParamsName get user by name
+func GetUserByParamsName(ctx *context.APIContext, name string) *user_model.User {
+ username := ctx.Params(name)
+ user, err := user_model.GetUserByName(ctx, username)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ if redirectUserID, err2 := user_model.LookupUserRedirect(ctx, username); err2 == nil {
+ context.RedirectToUser(ctx.Base, username, redirectUserID)
+ } else {
+ ctx.NotFound("GetUserByName", err)
+ }
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
+ }
+ return nil
+ }
+ return user
+}
+
+// GetUserByParams returns user whose name is presented in URL (":username").
+func GetUserByParams(ctx *context.APIContext) *user_model.User {
+ return GetUserByParamsName(ctx, ":username")
+}
diff --git a/routers/api/v1/user/hook.go b/routers/api/v1/user/hook.go
new file mode 100644
index 0000000..47b6498
--- /dev/null
+++ b/routers/api/v1/user/hook.go
@@ -0,0 +1,179 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "net/http"
+
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ webhook_service "code.gitea.io/gitea/services/webhook"
+)
+
+// ListHooks list the authenticated user's webhooks
+func ListHooks(ctx *context.APIContext) {
+ // swagger:operation GET /user/hooks user userListHooks
+ // ---
+ // summary: List the authenticated user's webhooks
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/HookList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ utils.ListOwnerHooks(
+ ctx,
+ ctx.Doer,
+ )
+}
+
+// GetHook get the authenticated user's hook by id
+func GetHook(ctx *context.APIContext) {
+ // swagger:operation GET /user/hooks/{id} user userGetHook
+ // ---
+ // summary: Get a hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the hook to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ hook, err := utils.GetOwnerHook(ctx, ctx.Doer.ID, ctx.ParamsInt64("id"))
+ if err != nil {
+ return
+ }
+
+ if !ctx.Doer.IsAdmin && hook.OwnerID != ctx.Doer.ID {
+ ctx.NotFound()
+ return
+ }
+
+ apiHook, err := webhook_service.ToHook(ctx.Doer.HomeLink(), hook)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ ctx.JSON(http.StatusOK, apiHook)
+}
+
+// CreateHook create a hook for the authenticated user
+func CreateHook(ctx *context.APIContext) {
+ // swagger:operation POST /user/hooks user userCreateHook
+ // ---
+ // summary: Create a hook
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // required: true
+ // schema:
+ // "$ref": "#/definitions/CreateHookOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/Hook"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ utils.AddOwnerHook(
+ ctx,
+ ctx.Doer,
+ web.GetForm(ctx).(*api.CreateHookOption),
+ )
+}
+
+// EditHook modify a hook of the authenticated user
+func EditHook(ctx *context.APIContext) {
+ // swagger:operation PATCH /user/hooks/{id} user userEditHook
+ // ---
+ // summary: Update a hook
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the hook to update
+ // type: integer
+ // format: int64
+ // required: true
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/EditHookOption"
+ // responses:
+ // "200":
+ // "$ref": "#/responses/Hook"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ utils.EditOwnerHook(
+ ctx,
+ ctx.Doer,
+ web.GetForm(ctx).(*api.EditHookOption),
+ ctx.ParamsInt64("id"),
+ )
+}
+
+// DeleteHook delete a hook of the authenticated user
+func DeleteHook(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/hooks/{id} user userDeleteHook
+ // ---
+ // summary: Delete a hook
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of the hook to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ utils.DeleteOwnerHook(
+ ctx,
+ ctx.Doer,
+ ctx.ParamsInt64("id"),
+ )
+}
diff --git a/routers/api/v1/user/key.go b/routers/api/v1/user/key.go
new file mode 100644
index 0000000..1b4ba0a
--- /dev/null
+++ b/routers/api/v1/user/key.go
@@ -0,0 +1,317 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ std_ctx "context"
+ "fmt"
+ "net/http"
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/routers/api/v1/repo"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ asymkey_service "code.gitea.io/gitea/services/asymkey"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// appendPrivateInformation appends the owner and key type information to api.PublicKey
+func appendPrivateInformation(ctx std_ctx.Context, apiKey *api.PublicKey, key *asymkey_model.PublicKey, defaultUser *user_model.User) (*api.PublicKey, error) {
+ if key.Type == asymkey_model.KeyTypeDeploy {
+ apiKey.KeyType = "deploy"
+ } else if key.Type == asymkey_model.KeyTypeUser {
+ apiKey.KeyType = "user"
+
+ if defaultUser.ID == key.OwnerID {
+ apiKey.Owner = convert.ToUser(ctx, defaultUser, defaultUser)
+ } else {
+ user, err := user_model.GetUserByID(ctx, key.OwnerID)
+ if err != nil {
+ return apiKey, err
+ }
+ apiKey.Owner = convert.ToUser(ctx, user, user)
+ }
+ } else {
+ apiKey.KeyType = "unknown"
+ }
+ apiKey.ReadOnly = key.Mode == perm.AccessModeRead
+ return apiKey, nil
+}
+
+func composePublicKeysAPILink() string {
+ return setting.AppURL + "api/v1/user/keys/"
+}
+
+func listPublicKeys(ctx *context.APIContext, user *user_model.User) {
+ var keys []*asymkey_model.PublicKey
+ var err error
+ var count int
+
+ fingerprint := ctx.FormString("fingerprint")
+ username := ctx.Params("username")
+
+ if fingerprint != "" {
+ var userID int64 // Unrestricted
+ // Querying not just listing
+ if username != "" {
+ // Restrict to provided uid
+ userID = user.ID
+ }
+ keys, err = db.Find[asymkey_model.PublicKey](ctx, asymkey_model.FindPublicKeyOptions{
+ OwnerID: userID,
+ Fingerprint: fingerprint,
+ })
+ count = len(keys)
+ } else {
+ var total int64
+ // Use ListPublicKeys
+ keys, total, err = db.FindAndCount[asymkey_model.PublicKey](ctx, asymkey_model.FindPublicKeyOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ OwnerID: user.ID,
+ NotKeytype: asymkey_model.KeyTypePrincipal,
+ })
+ count = int(total)
+ }
+
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ListPublicKeys", err)
+ return
+ }
+
+ apiLink := composePublicKeysAPILink()
+ apiKeys := make([]*api.PublicKey, len(keys))
+ for i := range keys {
+ apiKeys[i] = convert.ToPublicKey(apiLink, keys[i])
+ if ctx.Doer.IsAdmin || ctx.Doer.ID == keys[i].OwnerID {
+ apiKeys[i], _ = appendPrivateInformation(ctx, apiKeys[i], keys[i], user)
+ }
+ }
+
+ ctx.SetTotalCountHeader(int64(count))
+ ctx.JSON(http.StatusOK, &apiKeys)
+}
+
+// ListMyPublicKeys list all of the authenticated user's public keys
+func ListMyPublicKeys(ctx *context.APIContext) {
+ // swagger:operation GET /user/keys user userCurrentListKeys
+ // ---
+ // summary: List the authenticated user's public keys
+ // parameters:
+ // - name: fingerprint
+ // in: query
+ // description: fingerprint of the key
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PublicKeyList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ listPublicKeys(ctx, ctx.Doer)
+}
+
+// ListPublicKeys list the given user's public keys
+func ListPublicKeys(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/keys user userListKeys
+ // ---
+ // summary: List the given user's public keys
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: fingerprint
+ // in: query
+ // description: fingerprint of the key
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PublicKeyList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listPublicKeys(ctx, ctx.ContextUser)
+}
+
+// GetPublicKey get a public key
+func GetPublicKey(ctx *context.APIContext) {
+ // swagger:operation GET /user/keys/{id} user userCurrentGetKey
+ // ---
+ // summary: Get a public key
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of key to get
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/PublicKey"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ key, err := asymkey_model.GetPublicKeyByID(ctx, ctx.ParamsInt64(":id"))
+ if err != nil {
+ if asymkey_model.IsErrKeyNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetPublicKeyByID", err)
+ }
+ return
+ }
+
+ apiLink := composePublicKeysAPILink()
+ apiKey := convert.ToPublicKey(apiLink, key)
+ if ctx.Doer.IsAdmin || ctx.Doer.ID == key.OwnerID {
+ apiKey, _ = appendPrivateInformation(ctx, apiKey, key, ctx.Doer)
+ }
+ ctx.JSON(http.StatusOK, apiKey)
+}
+
+// CreateUserPublicKey creates new public key to given user by ID.
+func CreateUserPublicKey(ctx *context.APIContext, form api.CreateKeyOption, uid int64) {
+ if user_model.IsFeatureDisabledWithLoginType(ctx.Doer, setting.UserFeatureManageSSHKeys) {
+ ctx.NotFound("Not Found", fmt.Errorf("ssh keys setting is not allowed to be visited"))
+ return
+ }
+
+ content, err := asymkey_model.CheckPublicKeyString(form.Key)
+ if err != nil {
+ repo.HandleCheckKeyStringError(ctx, err)
+ return
+ }
+
+ key, err := asymkey_model.AddPublicKey(ctx, uid, form.Title, content, 0)
+ if err != nil {
+ repo.HandleAddKeyError(ctx, err)
+ return
+ }
+ apiLink := composePublicKeysAPILink()
+ apiKey := convert.ToPublicKey(apiLink, key)
+ if ctx.Doer.IsAdmin || ctx.Doer.ID == key.OwnerID {
+ apiKey, _ = appendPrivateInformation(ctx, apiKey, key, ctx.Doer)
+ }
+ ctx.JSON(http.StatusCreated, apiKey)
+}
+
+// CreatePublicKey create one public key for me
+func CreatePublicKey(ctx *context.APIContext) {
+ // swagger:operation POST /user/keys user userCurrentPostKey
+ // ---
+ // summary: Create a public key
+ // consumes:
+ // - application/json
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/CreateKeyOption"
+ // responses:
+ // "201":
+ // "$ref": "#/responses/PublicKey"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ form := web.GetForm(ctx).(*api.CreateKeyOption)
+ CreateUserPublicKey(ctx, *form, ctx.Doer.ID)
+}
+
+// DeletePublicKey delete one public key
+func DeletePublicKey(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/keys/{id} user userCurrentDeleteKey
+ // ---
+ // summary: Delete a public key
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: id
+ // in: path
+ // description: id of key to delete
+ // type: integer
+ // format: int64
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if user_model.IsFeatureDisabledWithLoginType(ctx.Doer, setting.UserFeatureManageSSHKeys) {
+ ctx.NotFound("Not Found", fmt.Errorf("ssh keys setting is not allowed to be visited"))
+ return
+ }
+
+ id := ctx.ParamsInt64(":id")
+ externallyManaged, err := asymkey_model.PublicKeyIsExternallyManaged(ctx, id)
+ if err != nil {
+ if asymkey_model.IsErrKeyNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "PublicKeyIsExternallyManaged", err)
+ }
+ return
+ }
+
+ if externallyManaged {
+ ctx.Error(http.StatusForbidden, "", "SSH Key is externally managed for this user")
+ return
+ }
+
+ if err := asymkey_service.DeletePublicKey(ctx, ctx.Doer, id); err != nil {
+ if asymkey_model.IsErrKeyAccessDenied(err) {
+ ctx.Error(http.StatusForbidden, "", "You do not have access to this key")
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeletePublicKey", err)
+ }
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/user/quota.go b/routers/api/v1/user/quota.go
new file mode 100644
index 0000000..ab2881b
--- /dev/null
+++ b/routers/api/v1/user/quota.go
@@ -0,0 +1,128 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "code.gitea.io/gitea/routers/api/v1/shared"
+ "code.gitea.io/gitea/services/context"
+)
+
+// GetQuota returns the quota information for the authenticated user
+func GetQuota(ctx *context.APIContext) {
+ // swagger:operation GET /user/quota user userGetQuota
+ // ---
+ // summary: Get quota information for the authenticated user
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaInfo"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ shared.GetQuota(ctx, ctx.Doer.ID)
+}
+
+// CheckQuota returns whether the authenticated user is over the subject quota
+func CheckQuota(ctx *context.APIContext) {
+ // swagger:operation GET /user/quota/check user userCheckQuota
+ // ---
+ // summary: Check if the authenticated user is over quota for a given subject
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/boolean"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ shared.CheckQuota(ctx, ctx.Doer.ID)
+}
+
+// ListQuotaAttachments lists attachments affecting the authenticated user's quota
+func ListQuotaAttachments(ctx *context.APIContext) {
+ // swagger:operation GET /user/quota/attachments user userListQuotaAttachments
+ // ---
+ // summary: List the attachments affecting the authenticated user's quota
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaUsedAttachmentList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ shared.ListQuotaAttachments(ctx, ctx.Doer.ID)
+}
+
+// ListQuotaPackages lists packages affecting the authenticated user's quota
+func ListQuotaPackages(ctx *context.APIContext) {
+ // swagger:operation GET /user/quota/packages user userListQuotaPackages
+ // ---
+ // summary: List the packages affecting the authenticated user's quota
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaUsedPackageList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ shared.ListQuotaPackages(ctx, ctx.Doer.ID)
+}
+
+// ListQuotaArtifacts lists artifacts affecting the authenticated user's quota
+func ListQuotaArtifacts(ctx *context.APIContext) {
+ // swagger:operation GET /user/quota/artifacts user userListQuotaArtifacts
+ // ---
+ // summary: List the artifacts affecting the authenticated user's quota
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/QuotaUsedArtifactList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ shared.ListQuotaArtifacts(ctx, ctx.Doer.ID)
+}
diff --git a/routers/api/v1/user/repo.go b/routers/api/v1/user/repo.go
new file mode 100644
index 0000000..f2e11e4
--- /dev/null
+++ b/routers/api/v1/user/repo.go
@@ -0,0 +1,190 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "net/http"
+
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// listUserRepos - List the repositories owned by the given user.
+func listUserRepos(ctx *context.APIContext, u *user_model.User, private bool) {
+ opts := utils.GetListOptions(ctx)
+
+ repos, count, err := repo_model.GetUserRepositories(ctx, &repo_model.SearchRepoOptions{
+ Actor: u,
+ Private: private,
+ ListOptions: opts,
+ OrderBy: "id ASC",
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepositories", err)
+ return
+ }
+
+ if err := repos.LoadAttributes(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "RepositoryList.LoadAttributes", err)
+ return
+ }
+
+ apiRepos := make([]*api.Repository, 0, len(repos))
+ for i := range repos {
+ permission, err := access_model.GetUserRepoPermission(ctx, repos[i], ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ return
+ }
+ if ctx.IsSigned && ctx.Doer.IsAdmin || permission.HasAccess() {
+ apiRepos = append(apiRepos, convert.ToRepo(ctx, repos[i], permission))
+ }
+ }
+
+ ctx.SetLinkHeader(int(count), opts.PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, &apiRepos)
+}
+
+// ListUserRepos - list the repos owned by the given user.
+func ListUserRepos(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/repos user userListRepos
+ // ---
+ // summary: List the repos owned by the given user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ private := ctx.IsSigned
+ listUserRepos(ctx, ctx.ContextUser, private)
+}
+
+// ListMyRepos - list the repositories you own or have access to.
+func ListMyRepos(ctx *context.APIContext) {
+ // swagger:operation GET /user/repos user userCurrentListRepos
+ // ---
+ // summary: List the repos that the authenticated user owns
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // - name: order_by
+ // in: query
+ // description: order the repositories by name (default), id, or size
+ // type: string
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ opts := &repo_model.SearchRepoOptions{
+ ListOptions: utils.GetListOptions(ctx),
+ Actor: ctx.Doer,
+ OwnerID: ctx.Doer.ID,
+ Private: ctx.IsSigned,
+ IncludeDescription: true,
+ }
+ orderBy := ctx.FormTrim("order_by")
+ switch orderBy {
+ case "name":
+ opts.OrderBy = "name ASC"
+ case "size":
+ opts.OrderBy = "size DESC"
+ case "id":
+ opts.OrderBy = "id ASC"
+ case "":
+ default:
+ ctx.Error(http.StatusUnprocessableEntity, "", "invalid order_by")
+ return
+ }
+
+ var err error
+ repos, count, err := repo_model.SearchRepository(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SearchRepository", err)
+ return
+ }
+
+ results := make([]*api.Repository, len(repos))
+ for i, repo := range repos {
+ if err = repo.LoadOwner(ctx); err != nil {
+ ctx.Error(http.StatusInternalServerError, "LoadOwner", err)
+ return
+ }
+ permission, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err)
+ }
+ results[i] = convert.ToRepo(ctx, repo, permission)
+ }
+
+ ctx.SetLinkHeader(int(count), opts.ListOptions.PageSize)
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, &results)
+}
+
+// ListOrgRepos - list the repositories of an organization.
+func ListOrgRepos(ctx *context.APIContext) {
+ // swagger:operation GET /orgs/{org}/repos organization orgListRepos
+ // ---
+ // summary: List an organization's repos
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: org
+ // in: path
+ // description: name of the organization
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ listUserRepos(ctx, ctx.Org.Organization.AsUser(), ctx.IsSigned)
+}
diff --git a/routers/api/v1/user/runners.go b/routers/api/v1/user/runners.go
new file mode 100644
index 0000000..dc4c187
--- /dev/null
+++ b/routers/api/v1/user/runners.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "code.gitea.io/gitea/routers/api/v1/shared"
+ "code.gitea.io/gitea/services/context"
+)
+
+// https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-a-registration-token-for-an-organization
+
+// GetRegistrationToken returns the token to register user runners
+func GetRegistrationToken(ctx *context.APIContext) {
+ // swagger:operation GET /user/actions/runners/registration-token user userGetRunnerRegistrationToken
+ // ---
+ // summary: Get an user's actions runner registration token
+ // produces:
+ // - application/json
+ // parameters:
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RegistrationToken"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ shared.GetRegistrationToken(ctx, ctx.Doer.ID, 0)
+}
diff --git a/routers/api/v1/user/settings.go b/routers/api/v1/user/settings.go
new file mode 100644
index 0000000..173f06e
--- /dev/null
+++ b/routers/api/v1/user/settings.go
@@ -0,0 +1,75 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/optional"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/web"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ user_service "code.gitea.io/gitea/services/user"
+)
+
+// GetUserSettings returns user settings
+func GetUserSettings(ctx *context.APIContext) {
+ // swagger:operation GET /user/settings user getUserSettings
+ // ---
+ // summary: Get user settings
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserSettings"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ ctx.JSON(http.StatusOK, convert.User2UserSettings(ctx.Doer))
+}
+
+// UpdateUserSettings returns user settings
+func UpdateUserSettings(ctx *context.APIContext) {
+ // swagger:operation PATCH /user/settings user updateUserSettings
+ // ---
+ // summary: Update user settings
+ // parameters:
+ // - name: body
+ // in: body
+ // schema:
+ // "$ref": "#/definitions/UserSettingsOptions"
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserSettings"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ form := web.GetForm(ctx).(*api.UserSettingsOptions)
+
+ opts := &user_service.UpdateOptions{
+ FullName: optional.FromPtr(form.FullName),
+ Description: optional.FromPtr(form.Description),
+ Pronouns: optional.FromPtr(form.Pronouns),
+ Website: optional.FromPtr(form.Website),
+ Location: optional.FromPtr(form.Location),
+ Language: optional.FromPtr(form.Language),
+ Theme: optional.FromPtr(form.Theme),
+ DiffViewStyle: optional.FromPtr(form.DiffViewStyle),
+ KeepEmailPrivate: optional.FromPtr(form.HideEmail),
+ KeepActivityPrivate: optional.FromPtr(form.HideActivity),
+ EnableRepoUnitHints: optional.FromPtr(form.EnableRepoUnitHints),
+ }
+ if err := user_service.UpdateUser(ctx, ctx.Doer, opts); err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.JSON(http.StatusOK, convert.User2UserSettings(ctx.Doer))
+}
diff --git a/routers/api/v1/user/star.go b/routers/api/v1/user/star.go
new file mode 100644
index 0000000..be84b13
--- /dev/null
+++ b/routers/api/v1/user/star.go
@@ -0,0 +1,213 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// Copyright 2024 The Forgejo Authors.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ std_context "context"
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+ "code.gitea.io/gitea/services/repository"
+)
+
+// getStarredRepos returns the repos that the user with the specified userID has
+// starred
+func getStarredRepos(ctx std_context.Context, user *user_model.User, private bool, listOptions db.ListOptions) ([]*api.Repository, error) {
+ starredRepos, err := repo_model.GetStarredRepos(ctx, user.ID, private, listOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ repos := make([]*api.Repository, len(starredRepos))
+ for i, starred := range starredRepos {
+ permission, err := access_model.GetUserRepoPermission(ctx, starred, user)
+ if err != nil {
+ return nil, err
+ }
+ repos[i] = convert.ToRepo(ctx, starred, permission)
+ }
+ return repos, nil
+}
+
+// GetStarredRepos returns the repos that the given user has starred
+func GetStarredRepos(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/starred user userListStarred
+ // ---
+ // summary: The repos that the given user has starred
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ private := ctx.ContextUser.ID == ctx.Doer.ID
+ repos, err := getStarredRepos(ctx, ctx.ContextUser, private, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "getStarredRepos", err)
+ return
+ }
+
+ ctx.SetTotalCountHeader(int64(ctx.ContextUser.NumStars))
+ ctx.JSON(http.StatusOK, &repos)
+}
+
+// GetMyStarredRepos returns the repos that the authenticated user has starred
+func GetMyStarredRepos(ctx *context.APIContext) {
+ // swagger:operation GET /user/starred user userCurrentListStarred
+ // ---
+ // summary: The repos that the authenticated user has starred
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ repos, err := getStarredRepos(ctx, ctx.Doer, true, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "getStarredRepos", err)
+ }
+
+ ctx.SetTotalCountHeader(int64(ctx.Doer.NumStars))
+ ctx.JSON(http.StatusOK, &repos)
+}
+
+// IsStarring returns whether the authenticated is starring the repo
+func IsStarring(ctx *context.APIContext) {
+ // swagger:operation GET /user/starred/{owner}/{repo} user userCurrentCheckStarring
+ // ---
+ // summary: Whether the authenticated is starring the repo
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if repo_model.IsStaring(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID) {
+ ctx.Status(http.StatusNoContent)
+ } else {
+ ctx.NotFound()
+ }
+}
+
+// Star the repo specified in the APIContext, as the authenticated user
+func Star(ctx *context.APIContext) {
+ // swagger:operation PUT /user/starred/{owner}/{repo} user userCurrentPutStar
+ // ---
+ // summary: Star the given repo
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to star
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to star
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := repository.StarRepoAndSendLikeActivities(ctx, *ctx.Doer, ctx.Repo.Repository.ID, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "StarRepo", err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// Unstar the repo specified in the APIContext, as the authenticated user
+func Unstar(ctx *context.APIContext) {
+ // swagger:operation DELETE /user/starred/{owner}/{repo} user userCurrentDeleteStar
+ // ---
+ // summary: Unstar the given repo
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo to unstar
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo to unstar
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := repository.StarRepoAndSendLikeActivities(ctx, *ctx.Doer, ctx.Repo.Repository.ID, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "StarRepo", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/user/user.go b/routers/api/v1/user/user.go
new file mode 100644
index 0000000..6c8cde7
--- /dev/null
+++ b/routers/api/v1/user/user.go
@@ -0,0 +1,322 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ "fmt"
+ "net/http"
+
+ activities_model "code.gitea.io/gitea/models/activities"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// Search search users
+func Search(ctx *context.APIContext) {
+ // swagger:operation GET /users/search user userSearch
+ // ---
+ // summary: Search for users
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: q
+ // in: query
+ // description: keyword
+ // type: string
+ // - name: uid
+ // in: query
+ // description: ID of the user to search for
+ // type: integer
+ // format: int64
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // description: "SearchResults of a successful search"
+ // schema:
+ // type: object
+ // properties:
+ // ok:
+ // type: boolean
+ // data:
+ // type: array
+ // items:
+ // "$ref": "#/definitions/User"
+
+ listOptions := utils.GetListOptions(ctx)
+
+ uid := ctx.FormInt64("uid")
+ var users []*user_model.User
+ var maxResults int64
+ var err error
+
+ switch uid {
+ case user_model.GhostUserID:
+ maxResults = 1
+ users = []*user_model.User{user_model.NewGhostUser()}
+ case user_model.ActionsUserID:
+ maxResults = 1
+ users = []*user_model.User{user_model.NewActionsUser()}
+ default:
+ var visible []structs.VisibleType
+ if ctx.PublicOnly {
+ visible = []structs.VisibleType{structs.VisibleTypePublic}
+ }
+ users, maxResults, err = user_model.SearchUsers(ctx, &user_model.SearchUserOptions{
+ Actor: ctx.Doer,
+ Keyword: ctx.FormTrim("q"),
+ UID: uid,
+ Type: user_model.UserTypeIndividual,
+ Visible: visible,
+ ListOptions: listOptions,
+ })
+ if err != nil {
+ ctx.JSON(http.StatusInternalServerError, map[string]any{
+ "ok": false,
+ "error": err.Error(),
+ })
+ return
+ }
+ }
+
+ ctx.SetLinkHeader(int(maxResults), listOptions.PageSize)
+ ctx.SetTotalCountHeader(maxResults)
+
+ ctx.JSON(http.StatusOK, map[string]any{
+ "ok": true,
+ "data": convert.ToUsers(ctx, ctx.Doer, users),
+ })
+}
+
+// GetInfo get user's information
+func GetInfo(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username} user userGet
+ // ---
+ // summary: Get a user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/User"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ if !user_model.IsUserVisibleToViewer(ctx, ctx.ContextUser, ctx.Doer) {
+ // fake ErrUserNotExist error message to not leak information about existence
+ ctx.NotFound("GetUserByName", user_model.ErrUserNotExist{Name: ctx.Params(":username")})
+ return
+ }
+ ctx.JSON(http.StatusOK, convert.ToUser(ctx, ctx.ContextUser, ctx.Doer))
+}
+
+// GetAuthenticatedUser get current user's information
+func GetAuthenticatedUser(ctx *context.APIContext) {
+ // swagger:operation GET /user user userGetCurrent
+ // ---
+ // summary: Get the authenticated user
+ // produces:
+ // - application/json
+ // responses:
+ // "200":
+ // "$ref": "#/responses/User"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ ctx.JSON(http.StatusOK, convert.ToUser(ctx, ctx.Doer, ctx.Doer))
+}
+
+// GetUserHeatmapData is the handler to get a users heatmap
+func GetUserHeatmapData(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/heatmap user userGetHeatmapData
+ // ---
+ // summary: Get a user's heatmap
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user to get
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/UserHeatmapData"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ heatmap, err := activities_model.GetUserHeatmapDataByUser(ctx, ctx.ContextUser, ctx.Doer)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetUserHeatmapDataByUser", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, heatmap)
+}
+
+func ListUserActivityFeeds(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/activities/feeds user userListActivityFeeds
+ // ---
+ // summary: List a user's activity feeds
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of user
+ // type: string
+ // required: true
+ // - name: only-performed-by
+ // in: query
+ // description: if true, only show actions performed by the requested user
+ // type: boolean
+ // - name: date
+ // in: query
+ // description: the date of the activities to be found
+ // type: string
+ // format: date
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/ActivityFeedsList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ includePrivate := ctx.IsSigned && (ctx.Doer.IsAdmin || ctx.Doer.ID == ctx.ContextUser.ID)
+ listOptions := utils.GetListOptions(ctx)
+
+ opts := activities_model.GetFeedsOptions{
+ RequestedUser: ctx.ContextUser,
+ Actor: ctx.Doer,
+ IncludePrivate: includePrivate,
+ OnlyPerformedBy: ctx.FormBool("only-performed-by"),
+ Date: ctx.FormString("date"),
+ ListOptions: listOptions,
+ }
+
+ feeds, count, err := activities_model.GetFeeds(ctx, opts)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetFeeds", err)
+ return
+ }
+ ctx.SetTotalCountHeader(count)
+
+ ctx.JSON(http.StatusOK, convert.ToActivities(ctx, feeds, ctx.Doer))
+}
+
+// ListBlockedUsers list the authenticated user's blocked users.
+func ListBlockedUsers(ctx *context.APIContext) {
+ // swagger:operation GET /user/list_blocked user userListBlockedUsers
+ // ---
+ // summary: List the authenticated user's blocked users
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/BlockedUserList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ utils.ListUserBlockedUsers(ctx, ctx.Doer)
+}
+
+// BlockUser blocks a user from the doer.
+func BlockUser(ctx *context.APIContext) {
+ // swagger:operation PUT /user/block/{username} user userBlockUser
+ // ---
+ // summary: Blocks a user from the doer.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ if ctx.ContextUser.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("%s is an organization not a user", ctx.ContextUser.Name))
+ return
+ }
+
+ utils.BlockUser(ctx, ctx.Doer, ctx.ContextUser)
+}
+
+// UnblockUser unblocks a user from the doer.
+func UnblockUser(ctx *context.APIContext) {
+ // swagger:operation PUT /user/unblock/{username} user userUnblockUser
+ // ---
+ // summary: Unblocks a user from the doer.
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // in: path
+ // description: username of the user
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+ // "404":
+ // "$ref": "#/responses/notFound"
+ // "422":
+ // "$ref": "#/responses/validationError"
+
+ if ctx.ContextUser.IsOrganization() {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Errorf("%s is an organization not a user", ctx.ContextUser.Name))
+ return
+ }
+
+ utils.UnblockUser(ctx, ctx.Doer, ctx.ContextUser)
+}
diff --git a/routers/api/v1/user/watch.go b/routers/api/v1/user/watch.go
new file mode 100644
index 0000000..dc27a38
--- /dev/null
+++ b/routers/api/v1/user/watch.go
@@ -0,0 +1,215 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package user
+
+import (
+ std_context "context"
+ "net/http"
+
+ "code.gitea.io/gitea/models/db"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/routers/api/v1/utils"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// getWatchedRepos returns the repos that the user with the specified userID is watching
+func getWatchedRepos(ctx std_context.Context, user *user_model.User, private bool, listOptions db.ListOptions) ([]*api.Repository, int64, error) {
+ watchedRepos, total, err := repo_model.GetWatchedRepos(ctx, user.ID, private, listOptions)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ repos := make([]*api.Repository, len(watchedRepos))
+ for i, watched := range watchedRepos {
+ permission, err := access_model.GetUserRepoPermission(ctx, watched, user)
+ if err != nil {
+ return nil, 0, err
+ }
+ repos[i] = convert.ToRepo(ctx, watched, permission)
+ }
+ return repos, total, nil
+}
+
+// GetWatchedRepos returns the repos that the user specified in ctx is watching
+func GetWatchedRepos(ctx *context.APIContext) {
+ // swagger:operation GET /users/{username}/subscriptions user userListSubscriptions
+ // ---
+ // summary: List the repositories watched by a user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: username
+ // type: string
+ // in: path
+ // description: username of the user
+ // required: true
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ private := ctx.ContextUser.ID == ctx.Doer.ID
+ repos, total, err := getWatchedRepos(ctx, ctx.ContextUser, private, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "getWatchedRepos", err)
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, &repos)
+}
+
+// GetMyWatchedRepos returns the repos that the authenticated user is watching
+func GetMyWatchedRepos(ctx *context.APIContext) {
+ // swagger:operation GET /user/subscriptions user userCurrentListSubscriptions
+ // ---
+ // summary: List repositories watched by the authenticated user
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/RepositoryList"
+ // "401":
+ // "$ref": "#/responses/unauthorized"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ repos, total, err := getWatchedRepos(ctx, ctx.Doer, true, utils.GetListOptions(ctx))
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "getWatchedRepos", err)
+ }
+
+ ctx.SetTotalCountHeader(total)
+ ctx.JSON(http.StatusOK, &repos)
+}
+
+// IsWatching returns whether the authenticated user is watching the repo
+// specified in ctx
+func IsWatching(ctx *context.APIContext) {
+ // swagger:operation GET /repos/{owner}/{repo}/subscription repository userCurrentCheckSubscription
+ // ---
+ // summary: Check if the current user is watching a repo
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/WatchInfo"
+ // "404":
+ // description: User is not watching this repo or repo do not exist
+
+ if repo_model.IsWatching(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID) {
+ ctx.JSON(http.StatusOK, api.WatchInfo{
+ Subscribed: true,
+ Ignored: false,
+ Reason: nil,
+ CreatedAt: ctx.Repo.Repository.CreatedUnix.AsTime(),
+ URL: subscriptionURL(ctx.Repo.Repository),
+ RepositoryURL: ctx.Repo.Repository.APIURL(),
+ })
+ } else {
+ ctx.NotFound()
+ }
+}
+
+// Watch the repo specified in ctx, as the authenticated user
+func Watch(ctx *context.APIContext) {
+ // swagger:operation PUT /repos/{owner}/{repo}/subscription repository userCurrentPutSubscription
+ // ---
+ // summary: Watch a repo
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "200":
+ // "$ref": "#/responses/WatchInfo"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := repo_model.WatchRepo(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID, true)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "WatchRepo", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, api.WatchInfo{
+ Subscribed: true,
+ Ignored: false,
+ Reason: nil,
+ CreatedAt: ctx.Repo.Repository.CreatedUnix.AsTime(),
+ URL: subscriptionURL(ctx.Repo.Repository),
+ RepositoryURL: ctx.Repo.Repository.APIURL(),
+ })
+}
+
+// Unwatch the repo specified in ctx, as the authenticated user
+func Unwatch(ctx *context.APIContext) {
+ // swagger:operation DELETE /repos/{owner}/{repo}/subscription repository userCurrentDeleteSubscription
+ // ---
+ // summary: Unwatch a repo
+ // parameters:
+ // - name: owner
+ // in: path
+ // description: owner of the repo
+ // type: string
+ // required: true
+ // - name: repo
+ // in: path
+ // description: name of the repo
+ // type: string
+ // required: true
+ // responses:
+ // "204":
+ // "$ref": "#/responses/empty"
+ // "404":
+ // "$ref": "#/responses/notFound"
+
+ err := repo_model.WatchRepo(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID, false)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "UnwatchRepo", err)
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
+
+// subscriptionURL returns the URL of the subscription API endpoint of a repo
+func subscriptionURL(repo *repo_model.Repository) string {
+ return repo.APIURL() + "/subscription"
+}
diff --git a/routers/api/v1/utils/block.go b/routers/api/v1/utils/block.go
new file mode 100644
index 0000000..34fad96
--- /dev/null
+++ b/routers/api/v1/utils/block.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package utils
+
+import (
+ "net/http"
+
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/services/context"
+ user_service "code.gitea.io/gitea/services/user"
+)
+
+// ListUserBlockedUsers lists the blocked users of the provided doer.
+func ListUserBlockedUsers(ctx *context.APIContext, doer *user_model.User) {
+ count, err := user_model.CountBlockedUsers(ctx, doer.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ blockedUsers, err := user_model.ListBlockedUsers(ctx, doer.ID, GetListOptions(ctx))
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiBlockedUsers := make([]*api.BlockedUser, len(blockedUsers))
+ for i, blockedUser := range blockedUsers {
+ apiBlockedUsers[i] = &api.BlockedUser{
+ BlockID: blockedUser.ID,
+ Created: blockedUser.CreatedUnix.AsTime(),
+ }
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiBlockedUsers)
+}
+
+// BlockUser blocks the blockUser from the doer.
+func BlockUser(ctx *context.APIContext, doer, blockUser *user_model.User) {
+ err := user_service.BlockUser(ctx, doer.ID, blockUser.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
+
+// UnblockUser unblocks the blockUser from the doer.
+func UnblockUser(ctx *context.APIContext, doer, blockUser *user_model.User) {
+ err := user_model.UnblockUser(ctx, doer.ID, blockUser.ID)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/utils/git.go b/routers/api/v1/utils/git.go
new file mode 100644
index 0000000..4e25137
--- /dev/null
+++ b/routers/api/v1/utils/git.go
@@ -0,0 +1,99 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package utils
+
+import (
+ gocontext "context"
+ "fmt"
+ "net/http"
+
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/services/context"
+)
+
+// ResolveRefOrSha resolve ref to sha if exist
+func ResolveRefOrSha(ctx *context.APIContext, ref string) string {
+ if len(ref) == 0 {
+ ctx.Error(http.StatusBadRequest, "ref not given", nil)
+ return ""
+ }
+
+ sha := ref
+ // Search branches and tags
+ for _, refType := range []string{"heads", "tags"} {
+ refSHA, lastMethodName, err := searchRefCommitByType(ctx, refType, ref)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, lastMethodName, err)
+ return ""
+ }
+ if refSHA != "" {
+ sha = refSHA
+ break
+ }
+ }
+
+ sha = MustConvertToSHA1(ctx, ctx.Repo, sha)
+
+ if ctx.Repo.GitRepo != nil {
+ err := ctx.Repo.GitRepo.AddLastCommitCache(ctx.Repo.Repository.GetCommitsCountCacheKey(ref, ref != sha), ctx.Repo.Repository.FullName(), sha)
+ if err != nil {
+ log.Error("Unable to get commits count for %s in %s. Error: %v", sha, ctx.Repo.Repository.FullName(), err)
+ }
+ }
+
+ return sha
+}
+
+// GetGitRefs return git references based on filter
+func GetGitRefs(ctx *context.APIContext, filter string) ([]*git.Reference, string, error) {
+ if ctx.Repo.GitRepo == nil {
+ return nil, "", fmt.Errorf("no open git repo found in context")
+ }
+ if len(filter) > 0 {
+ filter = "refs/" + filter
+ }
+ refs, err := ctx.Repo.GitRepo.GetRefsFiltered(filter)
+ return refs, "GetRefsFiltered", err
+}
+
+func searchRefCommitByType(ctx *context.APIContext, refType, filter string) (string, string, error) {
+ refs, lastMethodName, err := GetGitRefs(ctx, refType+"/"+filter) // Search by type
+ if err != nil {
+ return "", lastMethodName, err
+ }
+ if len(refs) > 0 {
+ return refs[0].Object.String(), "", nil // Return found SHA
+ }
+ return "", "", nil
+}
+
+// ConvertToObjectID returns a full-length SHA1 from a potential ID string
+func ConvertToObjectID(ctx gocontext.Context, repo *context.Repository, commitID string) (git.ObjectID, error) {
+ objectFormat := repo.GetObjectFormat()
+ if len(commitID) == objectFormat.FullLength() && objectFormat.IsValid(commitID) {
+ sha, err := git.NewIDFromString(commitID)
+ if err == nil {
+ return sha, nil
+ }
+ }
+
+ gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, repo.Repository)
+ if err != nil {
+ return objectFormat.EmptyObjectID(), fmt.Errorf("RepositoryFromContextOrOpen: %w", err)
+ }
+ defer closer.Close()
+
+ return gitRepo.ConvertToGitID(commitID)
+}
+
+// MustConvertToSHA1 returns a full-length SHA1 string from a potential ID string, or returns origin input if it can't convert to SHA1
+func MustConvertToSHA1(ctx gocontext.Context, repo *context.Repository, commitID string) string {
+ sha, err := ConvertToObjectID(ctx, repo, commitID)
+ if err != nil {
+ return commitID
+ }
+ return sha.String()
+}
diff --git a/routers/api/v1/utils/hook.go b/routers/api/v1/utils/hook.go
new file mode 100644
index 0000000..f1abd49
--- /dev/null
+++ b/routers/api/v1/utils/hook.go
@@ -0,0 +1,419 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package utils
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/models/webhook"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+ "code.gitea.io/gitea/services/context"
+ webhook_service "code.gitea.io/gitea/services/webhook"
+)
+
+// ListOwnerHooks lists the webhooks of the provided owner
+func ListOwnerHooks(ctx *context.APIContext, owner *user_model.User) {
+ opts := &webhook.ListWebhookOptions{
+ ListOptions: GetListOptions(ctx),
+ OwnerID: owner.ID,
+ }
+
+ hooks, count, err := db.FindAndCount[webhook.Webhook](ctx, opts)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+
+ apiHooks := make([]*api.Hook, len(hooks))
+ for i, hook := range hooks {
+ apiHooks[i], err = webhook_service.ToHook(owner.HomeLink(), hook)
+ if err != nil {
+ ctx.InternalServerError(err)
+ return
+ }
+ }
+
+ ctx.SetTotalCountHeader(count)
+ ctx.JSON(http.StatusOK, apiHooks)
+}
+
+// GetOwnerHook gets an user or organization webhook. Errors are written to ctx.
+func GetOwnerHook(ctx *context.APIContext, ownerID, hookID int64) (*webhook.Webhook, error) {
+ w, err := webhook.GetWebhookByOwnerID(ctx, ownerID, hookID)
+ if err != nil {
+ if webhook.IsErrWebhookNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetWebhookByOwnerID", err)
+ }
+ return nil, err
+ }
+ return w, nil
+}
+
+// GetRepoHook get a repo's webhook. If there is an error, write to `ctx`
+// accordingly and return the error
+func GetRepoHook(ctx *context.APIContext, repoID, hookID int64) (*webhook.Webhook, error) {
+ w, err := webhook.GetWebhookByRepoID(ctx, repoID, hookID)
+ if err != nil {
+ if webhook.IsErrWebhookNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "GetWebhookByID", err)
+ }
+ return nil, err
+ }
+ return w, nil
+}
+
+// checkCreateHookOption check if a CreateHookOption form is valid. If invalid,
+// write the appropriate error to `ctx`. Return whether the form is valid
+func checkCreateHookOption(ctx *context.APIContext, form *api.CreateHookOption) bool {
+ if !webhook_service.IsValidHookTaskType(form.Type) {
+ ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("Invalid hook type: %s", form.Type))
+ return false
+ }
+ for _, name := range []string{"url", "content_type"} {
+ if _, ok := form.Config[name]; !ok {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Missing config option: "+name)
+ return false
+ }
+ }
+ if !webhook.IsValidHookContentType(form.Config["content_type"]) {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Invalid content type")
+ return false
+ }
+ return true
+}
+
+// AddSystemHook add a system hook
+func AddSystemHook(ctx *context.APIContext, form *api.CreateHookOption) {
+ hook, ok := addHook(ctx, form, 0, 0)
+ if ok {
+ h, err := webhook_service.ToHook(setting.AppSubURL+"/admin", hook)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToHook", err)
+ return
+ }
+ ctx.JSON(http.StatusCreated, h)
+ }
+}
+
+// AddOwnerHook adds a hook to an user or organization
+func AddOwnerHook(ctx *context.APIContext, owner *user_model.User, form *api.CreateHookOption) {
+ hook, ok := addHook(ctx, form, owner.ID, 0)
+ if !ok {
+ return
+ }
+ apiHook, ok := toAPIHook(ctx, owner.HomeLink(), hook)
+ if !ok {
+ return
+ }
+ ctx.JSON(http.StatusCreated, apiHook)
+}
+
+// AddRepoHook add a hook to a repo. Writes to `ctx` accordingly
+func AddRepoHook(ctx *context.APIContext, form *api.CreateHookOption) {
+ repo := ctx.Repo
+ hook, ok := addHook(ctx, form, 0, repo.Repository.ID)
+ if !ok {
+ return
+ }
+ apiHook, ok := toAPIHook(ctx, repo.RepoLink, hook)
+ if !ok {
+ return
+ }
+ ctx.JSON(http.StatusCreated, apiHook)
+}
+
+// toAPIHook converts the hook to its API representation.
+// If there is an error, write to `ctx` accordingly. Return (hook, ok)
+func toAPIHook(ctx *context.APIContext, repoLink string, hook *webhook.Webhook) (*api.Hook, bool) {
+ apiHook, err := webhook_service.ToHook(repoLink, hook)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "ToHook", err)
+ return nil, false
+ }
+ return apiHook, true
+}
+
+func issuesHook(events []string, event string) bool {
+ return util.SliceContainsString(events, event, true) || util.SliceContainsString(events, string(webhook_module.HookEventIssues), true)
+}
+
+func pullHook(events []string, event string) bool {
+ return util.SliceContainsString(events, event, true) || util.SliceContainsString(events, string(webhook_module.HookEventPullRequest), true)
+}
+
+// addHook add the hook specified by `form`, `ownerID` and `repoID`. If there is
+// an error, write to `ctx` accordingly. Return (webhook, ok)
+func addHook(ctx *context.APIContext, form *api.CreateHookOption, ownerID, repoID int64) (*webhook.Webhook, bool) {
+ var isSystemWebhook bool
+ if !checkCreateHookOption(ctx, form) {
+ return nil, false
+ }
+
+ if len(form.Events) == 0 {
+ form.Events = []string{"push"}
+ }
+ if form.Config["is_system_webhook"] != "" {
+ sw, err := strconv.ParseBool(form.Config["is_system_webhook"])
+ if err != nil {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Invalid is_system_webhook value")
+ return nil, false
+ }
+ isSystemWebhook = sw
+ }
+ w := &webhook.Webhook{
+ OwnerID: ownerID,
+ RepoID: repoID,
+ URL: form.Config["url"],
+ ContentType: webhook.ToHookContentType(form.Config["content_type"]),
+ Secret: form.Config["secret"],
+ HTTPMethod: "POST",
+ IsSystemWebhook: isSystemWebhook,
+ HookEvent: &webhook_module.HookEvent{
+ ChooseEvents: true,
+ HookEvents: webhook_module.HookEvents{
+ Create: util.SliceContainsString(form.Events, string(webhook_module.HookEventCreate), true),
+ Delete: util.SliceContainsString(form.Events, string(webhook_module.HookEventDelete), true),
+ Fork: util.SliceContainsString(form.Events, string(webhook_module.HookEventFork), true),
+ Issues: issuesHook(form.Events, "issues_only"),
+ IssueAssign: issuesHook(form.Events, string(webhook_module.HookEventIssueAssign)),
+ IssueLabel: issuesHook(form.Events, string(webhook_module.HookEventIssueLabel)),
+ IssueMilestone: issuesHook(form.Events, string(webhook_module.HookEventIssueMilestone)),
+ IssueComment: issuesHook(form.Events, string(webhook_module.HookEventIssueComment)),
+ Push: util.SliceContainsString(form.Events, string(webhook_module.HookEventPush), true),
+ PullRequest: pullHook(form.Events, "pull_request_only"),
+ PullRequestAssign: pullHook(form.Events, string(webhook_module.HookEventPullRequestAssign)),
+ PullRequestLabel: pullHook(form.Events, string(webhook_module.HookEventPullRequestLabel)),
+ PullRequestMilestone: pullHook(form.Events, string(webhook_module.HookEventPullRequestMilestone)),
+ PullRequestComment: pullHook(form.Events, string(webhook_module.HookEventPullRequestComment)),
+ PullRequestReview: pullHook(form.Events, "pull_request_review"),
+ PullRequestReviewRequest: pullHook(form.Events, string(webhook_module.HookEventPullRequestReviewRequest)),
+ PullRequestSync: pullHook(form.Events, string(webhook_module.HookEventPullRequestSync)),
+ Wiki: util.SliceContainsString(form.Events, string(webhook_module.HookEventWiki), true),
+ Repository: util.SliceContainsString(form.Events, string(webhook_module.HookEventRepository), true),
+ Release: util.SliceContainsString(form.Events, string(webhook_module.HookEventRelease), true),
+ },
+ BranchFilter: form.BranchFilter,
+ },
+ IsActive: form.Active,
+ Type: form.Type,
+ }
+ err := w.SetHeaderAuthorization(form.AuthorizationHeader)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SetHeaderAuthorization", err)
+ return nil, false
+ }
+ if w.Type == webhook_module.SLACK {
+ channel, ok := form.Config["channel"]
+ if !ok {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Missing config option: channel")
+ return nil, false
+ }
+ channel = strings.TrimSpace(channel)
+
+ if !webhook_service.IsValidSlackChannel(channel) {
+ ctx.Error(http.StatusBadRequest, "", "Invalid slack channel name")
+ return nil, false
+ }
+
+ meta, err := json.Marshal(&webhook_service.SlackMeta{
+ Channel: channel,
+ Username: form.Config["username"],
+ IconURL: form.Config["icon_url"],
+ Color: form.Config["color"],
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "slack: JSON marshal failed", err)
+ return nil, false
+ }
+ w.Meta = string(meta)
+ }
+
+ if err := w.UpdateEvent(); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateEvent", err)
+ return nil, false
+ } else if err := webhook.CreateWebhook(ctx, w); err != nil {
+ ctx.Error(http.StatusInternalServerError, "CreateWebhook", err)
+ return nil, false
+ }
+ return w, true
+}
+
+// EditSystemHook edit system webhook `w` according to `form`. Writes to `ctx` accordingly
+func EditSystemHook(ctx *context.APIContext, form *api.EditHookOption, hookID int64) {
+ hook, err := webhook.GetSystemOrDefaultWebhook(ctx, hookID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetSystemOrDefaultWebhook", err)
+ return
+ }
+ if !editHook(ctx, form, hook) {
+ ctx.Error(http.StatusInternalServerError, "editHook", err)
+ return
+ }
+ updated, err := webhook.GetSystemOrDefaultWebhook(ctx, hookID)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "GetSystemOrDefaultWebhook", err)
+ return
+ }
+ h, err := webhook_service.ToHook(setting.AppURL+"/admin", updated)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "convert.ToHook", err)
+ return
+ }
+ ctx.JSON(http.StatusOK, h)
+}
+
+// EditOwnerHook updates a webhook of an user or organization
+func EditOwnerHook(ctx *context.APIContext, owner *user_model.User, form *api.EditHookOption, hookID int64) {
+ hook, err := GetOwnerHook(ctx, owner.ID, hookID)
+ if err != nil {
+ return
+ }
+ if !editHook(ctx, form, hook) {
+ return
+ }
+ updated, err := GetOwnerHook(ctx, owner.ID, hookID)
+ if err != nil {
+ return
+ }
+ apiHook, ok := toAPIHook(ctx, owner.HomeLink(), updated)
+ if !ok {
+ return
+ }
+ ctx.JSON(http.StatusOK, apiHook)
+}
+
+// EditRepoHook edit webhook `w` according to `form`. Writes to `ctx` accordingly
+func EditRepoHook(ctx *context.APIContext, form *api.EditHookOption, hookID int64) {
+ repo := ctx.Repo
+ hook, err := GetRepoHook(ctx, repo.Repository.ID, hookID)
+ if err != nil {
+ return
+ }
+ if !editHook(ctx, form, hook) {
+ return
+ }
+ updated, err := GetRepoHook(ctx, repo.Repository.ID, hookID)
+ if err != nil {
+ return
+ }
+ apiHook, ok := toAPIHook(ctx, repo.RepoLink, updated)
+ if !ok {
+ return
+ }
+ ctx.JSON(http.StatusOK, apiHook)
+}
+
+// editHook edit the webhook `w` according to `form`. If an error occurs, write
+// to `ctx` accordingly and return the error. Return whether successful
+func editHook(ctx *context.APIContext, form *api.EditHookOption, w *webhook.Webhook) bool {
+ if form.Config != nil {
+ if url, ok := form.Config["url"]; ok {
+ w.URL = url
+ }
+ if ct, ok := form.Config["content_type"]; ok {
+ if !webhook.IsValidHookContentType(ct) {
+ ctx.Error(http.StatusUnprocessableEntity, "", "Invalid content type")
+ return false
+ }
+ w.ContentType = webhook.ToHookContentType(ct)
+ }
+
+ if w.Type == webhook_module.SLACK {
+ if channel, ok := form.Config["channel"]; ok {
+ meta, err := json.Marshal(&webhook_service.SlackMeta{
+ Channel: channel,
+ Username: form.Config["username"],
+ IconURL: form.Config["icon_url"],
+ Color: form.Config["color"],
+ })
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "slack: JSON marshal failed", err)
+ return false
+ }
+ w.Meta = string(meta)
+ }
+ }
+ }
+
+ // Update events
+ if len(form.Events) == 0 {
+ form.Events = []string{"push"}
+ }
+ w.PushOnly = false
+ w.SendEverything = false
+ w.ChooseEvents = true
+ w.Create = util.SliceContainsString(form.Events, string(webhook_module.HookEventCreate), true)
+ w.Push = util.SliceContainsString(form.Events, string(webhook_module.HookEventPush), true)
+ w.Create = util.SliceContainsString(form.Events, string(webhook_module.HookEventCreate), true)
+ w.Delete = util.SliceContainsString(form.Events, string(webhook_module.HookEventDelete), true)
+ w.Fork = util.SliceContainsString(form.Events, string(webhook_module.HookEventFork), true)
+ w.Repository = util.SliceContainsString(form.Events, string(webhook_module.HookEventRepository), true)
+ w.Wiki = util.SliceContainsString(form.Events, string(webhook_module.HookEventWiki), true)
+ w.Release = util.SliceContainsString(form.Events, string(webhook_module.HookEventRelease), true)
+ w.BranchFilter = form.BranchFilter
+
+ err := w.SetHeaderAuthorization(form.AuthorizationHeader)
+ if err != nil {
+ ctx.Error(http.StatusInternalServerError, "SetHeaderAuthorization", err)
+ return false
+ }
+
+ // Issues
+ w.Issues = issuesHook(form.Events, "issues_only")
+ w.IssueAssign = issuesHook(form.Events, string(webhook_module.HookEventIssueAssign))
+ w.IssueLabel = issuesHook(form.Events, string(webhook_module.HookEventIssueLabel))
+ w.IssueMilestone = issuesHook(form.Events, string(webhook_module.HookEventIssueMilestone))
+ w.IssueComment = issuesHook(form.Events, string(webhook_module.HookEventIssueComment))
+
+ // Pull requests
+ w.PullRequest = pullHook(form.Events, "pull_request_only")
+ w.PullRequestAssign = pullHook(form.Events, string(webhook_module.HookEventPullRequestAssign))
+ w.PullRequestLabel = pullHook(form.Events, string(webhook_module.HookEventPullRequestLabel))
+ w.PullRequestMilestone = pullHook(form.Events, string(webhook_module.HookEventPullRequestMilestone))
+ w.PullRequestComment = pullHook(form.Events, string(webhook_module.HookEventPullRequestComment))
+ w.PullRequestReview = pullHook(form.Events, "pull_request_review")
+ w.PullRequestReviewRequest = pullHook(form.Events, string(webhook_module.HookEventPullRequestReviewRequest))
+ w.PullRequestSync = pullHook(form.Events, string(webhook_module.HookEventPullRequestSync))
+
+ if err := w.UpdateEvent(); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateEvent", err)
+ return false
+ }
+
+ if form.Active != nil {
+ w.IsActive = *form.Active
+ }
+
+ if err := webhook.UpdateWebhook(ctx, w); err != nil {
+ ctx.Error(http.StatusInternalServerError, "UpdateWebhook", err)
+ return false
+ }
+ return true
+}
+
+// DeleteOwnerHook deletes the hook owned by the owner.
+func DeleteOwnerHook(ctx *context.APIContext, owner *user_model.User, hookID int64) {
+ if err := webhook.DeleteWebhookByOwnerID(ctx, owner.ID, hookID); err != nil {
+ if webhook.IsErrWebhookNotExist(err) {
+ ctx.NotFound()
+ } else {
+ ctx.Error(http.StatusInternalServerError, "DeleteWebhookByOwnerID", err)
+ }
+ return
+ }
+ ctx.Status(http.StatusNoContent)
+}
diff --git a/routers/api/v1/utils/page.go b/routers/api/v1/utils/page.go
new file mode 100644
index 0000000..024ba7b
--- /dev/null
+++ b/routers/api/v1/utils/page.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package utils
+
+import (
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/services/context"
+ "code.gitea.io/gitea/services/convert"
+)
+
+// GetListOptions returns list options using the page and limit parameters
+func GetListOptions(ctx *context.APIContext) db.ListOptions {
+ return db.ListOptions{
+ Page: ctx.FormInt("page"),
+ PageSize: convert.ToCorrectPageSize(ctx.FormInt("limit")),
+ }
+}