summaryrefslogtreecommitdiffstats
path: root/services/packages/container
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
commitdd136858f1ea40ad3c94191d647487fa4f31926c (patch)
tree58fec94a7b2a12510c9664b21793f1ed560c6518 /services/packages/container
parentInitial commit. (diff)
downloadforgejo-debian.tar.xz
forgejo-debian.zip
Adding upstream version 9.0.0.HEADupstream/9.0.0upstreamdebian
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to 'services/packages/container')
-rw-r--r--services/packages/container/blob_uploader.go133
-rw-r--r--services/packages/container/cleanup.go111
-rw-r--r--services/packages/container/cleanup_sha256.go158
-rw-r--r--services/packages/container/common.go35
4 files changed, 437 insertions, 0 deletions
diff --git a/services/packages/container/blob_uploader.go b/services/packages/container/blob_uploader.go
new file mode 100644
index 0000000..bae2e2d
--- /dev/null
+++ b/services/packages/container/blob_uploader.go
@@ -0,0 +1,133 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "errors"
+ "io"
+ "os"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+var (
+ // errWriteAfterRead occurs if Write is called after a read operation
+ errWriteAfterRead = errors.New("write is unsupported after a read operation")
+ // errOffsetMissmatch occurs if the file offset is different than the model
+ errOffsetMissmatch = errors.New("offset mismatch between file and model")
+)
+
+// BlobUploader handles chunked blob uploads
+type BlobUploader struct {
+ *packages_model.PackageBlobUpload
+ *packages_module.MultiHasher
+ file *os.File
+ reading bool
+}
+
+func buildFilePath(id string) string {
+ return util.FilePathJoinAbs(setting.Packages.ChunkedUploadPath, id)
+}
+
+// NewBlobUploader creates a new blob uploader for the given id
+func NewBlobUploader(ctx context.Context, id string) (*BlobUploader, error) {
+ model, err := packages_model.GetBlobUploadByID(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+
+ hash := packages_module.NewMultiHasher()
+ if len(model.HashStateBytes) != 0 {
+ if err := hash.UnmarshalBinary(model.HashStateBytes); err != nil {
+ return nil, err
+ }
+ }
+
+ f, err := os.OpenFile(buildFilePath(model.ID), os.O_RDWR|os.O_CREATE, 0o666)
+ if err != nil {
+ return nil, err
+ }
+
+ return &BlobUploader{
+ model,
+ hash,
+ f,
+ false,
+ }, nil
+}
+
+// Close implements io.Closer
+func (u *BlobUploader) Close() error {
+ return u.file.Close()
+}
+
+// Append appends a chunk of data and updates the model
+func (u *BlobUploader) Append(ctx context.Context, r io.Reader) error {
+ if u.reading {
+ return errWriteAfterRead
+ }
+
+ offset, err := u.file.Seek(0, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+ if offset != u.BytesReceived {
+ return errOffsetMissmatch
+ }
+
+ n, err := io.Copy(io.MultiWriter(u.file, u.MultiHasher), r)
+ if err != nil {
+ return err
+ }
+
+ // fast path if nothing was written
+ if n == 0 {
+ return nil
+ }
+
+ u.BytesReceived += n
+
+ u.HashStateBytes, err = u.MultiHasher.MarshalBinary()
+ if err != nil {
+ return err
+ }
+
+ return packages_model.UpdateBlobUpload(ctx, u.PackageBlobUpload)
+}
+
+func (u *BlobUploader) Size() int64 {
+ return u.BytesReceived
+}
+
+// Read implements io.Reader
+func (u *BlobUploader) Read(p []byte) (int, error) {
+ if !u.reading {
+ _, err := u.file.Seek(0, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ u.reading = true
+ }
+
+ return u.file.Read(p)
+}
+
+// Remove deletes the data and the model of a blob upload
+func RemoveBlobUploadByID(ctx context.Context, id string) error {
+ if err := packages_model.DeleteBlobUploadByID(ctx, id); err != nil {
+ return err
+ }
+
+ err := os.Remove(buildFilePath(id))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ return nil
+}
diff --git a/services/packages/container/cleanup.go b/services/packages/container/cleanup.go
new file mode 100644
index 0000000..b5563c6
--- /dev/null
+++ b/services/packages/container/cleanup.go
@@ -0,0 +1,111 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ container_model "code.gitea.io/gitea/models/packages/container"
+ "code.gitea.io/gitea/modules/optional"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+// Cleanup removes expired container data
+func Cleanup(ctx context.Context, olderThan time.Duration) error {
+ if err := cleanupExpiredBlobUploads(ctx, olderThan); err != nil {
+ return err
+ }
+ if err := CleanupSHA256(ctx, olderThan); err != nil {
+ return err
+ }
+ return cleanupExpiredUploadedBlobs(ctx, olderThan)
+}
+
+// cleanupExpiredBlobUploads removes expired blob uploads
+func cleanupExpiredBlobUploads(ctx context.Context, olderThan time.Duration) error {
+ pbus, err := packages_model.FindExpiredBlobUploads(ctx, olderThan)
+ if err != nil {
+ return err
+ }
+
+ for _, pbu := range pbus {
+ if err := RemoveBlobUploadByID(ctx, pbu.ID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// cleanupExpiredUploadedBlobs removes expired uploaded blobs not referenced by a manifest
+func cleanupExpiredUploadedBlobs(ctx context.Context, olderThan time.Duration) error {
+ pfs, err := container_model.SearchExpiredUploadedBlobs(ctx, olderThan)
+ if err != nil {
+ return err
+ }
+
+ for _, pf := range pfs {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ Type: packages_model.TypeContainer,
+ Version: packages_model.SearchValue{
+ ExactMatch: true,
+ Value: container_model.UploadVersion,
+ },
+ IsInternal: optional.Some(true),
+ HasFiles: optional.Some(false),
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, pv := range pvs {
+ if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypeVersion, pv.ID); err != nil {
+ return err
+ }
+
+ if err := packages_model.DeleteVersionByID(ctx, pv.ID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func ShouldBeSkipped(ctx context.Context, pcr *packages_model.PackageCleanupRule, p *packages_model.Package, pv *packages_model.PackageVersion) (bool, error) {
+ // Always skip the "latest" tag
+ if pv.LowerVersion == "latest" {
+ return true, nil
+ }
+
+ // Check if the version is a digest (or untagged)
+ if digest.Digest(pv.LowerVersion).Validate() == nil {
+ // Check if there is another manifest referencing this version
+ has, err := packages_model.ExistVersion(ctx, &packages_model.PackageSearchOptions{
+ PackageID: p.ID,
+ Properties: map[string]string{
+ container_module.PropertyManifestReference: pv.LowerVersion,
+ },
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Skip it if the version is referenced
+ if has {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
diff --git a/services/packages/container/cleanup_sha256.go b/services/packages/container/cleanup_sha256.go
new file mode 100644
index 0000000..16afc74
--- /dev/null
+++ b/services/packages/container/cleanup_sha256.go
@@ -0,0 +1,158 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package container
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+var (
+ SHA256BatchSize = 500
+ SHA256Log = "cleanup dangling images with a sha256:* version"
+ SHA256LogStart = "Start to " + SHA256Log
+ SHA256LogFinish = "Finished to " + SHA256Log
+)
+
+func CleanupSHA256(ctx context.Context, olderThan time.Duration) error {
+ log.Info(SHA256LogStart)
+ err := cleanupSHA256(ctx, olderThan)
+ log.Info(SHA256LogFinish)
+ return err
+}
+
+func cleanupSHA256(outerCtx context.Context, olderThan time.Duration) error {
+ ctx, committer, err := db.TxContext(outerCtx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ foundAtLeastOneSHA256 := false
+ type packageVersion struct {
+ id int64
+ created timeutil.TimeStamp
+ }
+ shaToPackageVersion := make(map[string]packageVersion, 100)
+ knownSHA := make(map[string]any, 100)
+
+ // compute before making the inventory to not race against ongoing
+ // image creations
+ old := timeutil.TimeStamp(time.Now().Add(-olderThan).Unix())
+
+ log.Debug("Look for all package_version.version that start with sha256:")
+
+ // Iterate over all container versions in ascending order and store
+ // in shaToPackageVersion all versions with a sha256: prefix. If an index
+ // manifest is found, the sha256: digest it references are removed
+ // from shaToPackageVersion. If the sha256: digest found in an index
+ // manifest is not already in shaToPackageVersion, it is stored in
+ // knownSHA to be dealt with later.
+ //
+ // Although it is theoretically possible that a sha256: is uploaded
+ // after the index manifest that references it, this is not the
+ // normal order of operations. First the sha256: version is uploaded
+ // and then the index manifest. When the iteration completes,
+ // knownSHA will therefore be empty most of the time and
+ // shaToPackageVersion will only contain unreferenced sha256: versions.
+ if err := db.GetEngine(ctx).
+ Select("`package_version`.`id`, `package_version`.`created_unix`, `package_version`.`lower_version`, `package_version`.`metadata_json`").
+ Join("INNER", "`package`", "`package`.`id` = `package_version`.`package_id`").
+ Where("`package`.`type` = ?", packages.TypeContainer).
+ OrderBy("`package_version`.`id` ASC").
+ Iterate(new(packages.PackageVersion), func(_ int, bean any) error {
+ v := bean.(*packages.PackageVersion)
+ if strings.HasPrefix(v.LowerVersion, "sha256:") {
+ shaToPackageVersion[v.LowerVersion] = packageVersion{id: v.ID, created: v.CreatedUnix}
+ foundAtLeastOneSHA256 = true
+ } else if strings.Contains(v.MetadataJSON, `"manifests":[{`) {
+ var metadata container_module.Metadata
+ if err := json.Unmarshal([]byte(v.MetadataJSON), &metadata); err != nil {
+ log.Error("package_version.id = %d package_version.metadata_json %s is not a JSON string containing valid metadata. It was ignored but it is an inconsistency in the database that should be looked at. %v", v.ID, v.MetadataJSON, err)
+ return nil
+ }
+ for _, manifest := range metadata.Manifests {
+ if _, ok := shaToPackageVersion[manifest.Digest]; ok {
+ delete(shaToPackageVersion, manifest.Digest)
+ } else {
+ knownSHA[manifest.Digest] = true
+ }
+ }
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ for sha := range knownSHA {
+ delete(shaToPackageVersion, sha)
+ }
+
+ if len(shaToPackageVersion) == 0 {
+ if foundAtLeastOneSHA256 {
+ log.Debug("All container images with a version matching sha256:* are referenced by an index manifest")
+ } else {
+ log.Debug("There are no container images with a version matching sha256:*")
+ }
+ log.Info("Nothing to cleanup")
+ return nil
+ }
+
+ found := len(shaToPackageVersion)
+
+ log.Warn("%d container image(s) with a version matching sha256:* are not referenced by an index manifest", found)
+
+ log.Debug("Deleting unreferenced image versions from `package_version`, `package_file` and `package_property` (%d at a time)", SHA256BatchSize)
+
+ packageVersionIDs := make([]int64, 0, SHA256BatchSize)
+ tooYoung := 0
+ for _, p := range shaToPackageVersion {
+ if p.created < old {
+ packageVersionIDs = append(packageVersionIDs, p.id)
+ } else {
+ tooYoung++
+ }
+ }
+
+ if tooYoung > 0 {
+ log.Warn("%d out of %d container image(s) are not deleted because they were created less than %v ago", tooYoung, found, olderThan)
+ }
+
+ for len(packageVersionIDs) > 0 {
+ upper := min(len(packageVersionIDs), SHA256BatchSize)
+ versionIDs := packageVersionIDs[0:upper]
+
+ var packageFileIDs []int64
+ if err := db.GetEngine(ctx).Select("id").Table("package_file").In("version_id", versionIDs).Find(&packageFileIDs); err != nil {
+ return err
+ }
+ log.Info("Removing %d entries from `package_file` and `package_property`", len(packageFileIDs))
+ if _, err := db.GetEngine(ctx).In("id", packageFileIDs).Delete(&packages.PackageFile{}); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).In("ref_id", packageFileIDs).And("ref_type = ?", packages.PropertyTypeFile).Delete(&packages.PackageProperty{}); err != nil {
+ return err
+ }
+
+ log.Info("Removing %d entries from `package_version` and `package_property`", upper)
+ if _, err := db.GetEngine(ctx).In("id", versionIDs).Delete(&packages.PackageVersion{}); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).In("ref_id", versionIDs).And("ref_type = ?", packages.PropertyTypeVersion).Delete(&packages.PackageProperty{}); err != nil {
+ return err
+ }
+
+ packageVersionIDs = packageVersionIDs[upper:]
+ }
+
+ return committer.Commit()
+}
diff --git a/services/packages/container/common.go b/services/packages/container/common.go
new file mode 100644
index 0000000..5a14ed5
--- /dev/null
+++ b/services/packages/container/common.go
@@ -0,0 +1,35 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ user_model "code.gitea.io/gitea/models/user"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+)
+
+// UpdateRepositoryNames updates the repository name property for all packages of the specific owner
+func UpdateRepositoryNames(ctx context.Context, owner *user_model.User, newOwnerName string) error {
+ ps, err := packages_model.GetPackagesByType(ctx, owner.ID, packages_model.TypeContainer)
+ if err != nil {
+ return err
+ }
+
+ newOwnerName = strings.ToLower(newOwnerName)
+
+ for _, p := range ps {
+ if err := packages_model.DeletePropertyByName(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository); err != nil {
+ return err
+ }
+
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository, newOwnerName+"/"+p.LowerName); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}