summaryrefslogtreecommitdiffstats
path: root/services/packages
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-12-12 23:57:56 +0100
commite68b9d00a6e05b3a941f63ffb696f91e554ac5ec (patch)
tree97775d6c13b0f416af55314eb6a89ef792474615 /services/packages
parentInitial commit. (diff)
downloadforgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.tar.xz
forgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.zip
Adding upstream version 9.0.3.
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to '')
-rw-r--r--services/packages/alpine/repository.go337
-rw-r--r--services/packages/arch/repository.go368
-rw-r--r--services/packages/auth.go75
-rw-r--r--services/packages/cargo/index.go315
-rw-r--r--services/packages/cleanup/cleanup.go198
-rw-r--r--services/packages/cleanup/cleanup_sha256_test.go116
-rw-r--r--services/packages/cleanup/main_test.go14
-rw-r--r--services/packages/container/blob_uploader.go133
-rw-r--r--services/packages/container/cleanup.go111
-rw-r--r--services/packages/container/cleanup_sha256.go158
-rw-r--r--services/packages/container/common.go35
-rw-r--r--services/packages/debian/repository.go413
-rw-r--r--services/packages/packages.go665
-rw-r--r--services/packages/rpm/repository.go674
14 files changed, 3612 insertions, 0 deletions
diff --git a/services/packages/alpine/repository.go b/services/packages/alpine/repository.go
new file mode 100644
index 0000000..92f475b
--- /dev/null
+++ b/services/packages/alpine/repository.go
@@ -0,0 +1,337 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package alpine
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ alpine_model "code.gitea.io/gitea/models/packages/alpine"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ alpine_module "code.gitea.io/gitea/modules/packages/alpine"
+ "code.gitea.io/gitea/modules/util"
+ packages_service "code.gitea.io/gitea/services/packages"
+)
+
+const (
+ IndexFilename = "APKINDEX"
+ IndexArchiveFilename = IndexFilename + ".tar.gz"
+)
+
+// GetOrCreateRepositoryVersion gets or creates the internal repository package
+// The Alpine registry needs multiple index files which are stored in this package.
+func GetOrCreateRepositoryVersion(ctx context.Context, ownerID int64) (*packages_model.PackageVersion, error) {
+ return packages_service.GetOrCreateInternalPackageVersion(ctx, ownerID, packages_model.TypeAlpine, alpine_module.RepositoryPackage, alpine_module.RepositoryVersion)
+}
+
+// GetOrCreateKeyPair gets or creates the RSA keys used to sign repository files
+func GetOrCreateKeyPair(ctx context.Context, ownerID int64) (string, string, error) {
+ priv, err := user_model.GetSetting(ctx, ownerID, alpine_module.SettingKeyPrivate)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ pub, err := user_model.GetSetting(ctx, ownerID, alpine_module.SettingKeyPublic)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ if priv == "" || pub == "" {
+ priv, pub, err = util.GenerateKeyPair(4096)
+ if err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, alpine_module.SettingKeyPrivate, priv); err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, alpine_module.SettingKeyPublic, pub); err != nil {
+ return "", "", err
+ }
+ }
+
+ return priv, pub, nil
+}
+
+// BuildAllRepositoryFiles (re)builds all repository files for every available distributions, components and architectures
+func BuildAllRepositoryFiles(ctx context.Context, ownerID int64) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ // 1. Delete all existing repository files
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+
+ for _, pf := range pfs {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ // 2. (Re)Build repository files for existing packages
+ branches, err := alpine_model.GetBranches(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ for _, branch := range branches {
+ repositories, err := alpine_model.GetRepositories(ctx, ownerID, branch)
+ if err != nil {
+ return err
+ }
+ for _, repository := range repositories {
+ architectures, err := alpine_model.GetArchitectures(ctx, ownerID, repository)
+ if err != nil {
+ return err
+ }
+ for _, architecture := range architectures {
+ if err := buildPackagesIndex(ctx, ownerID, pv, branch, repository, architecture); err != nil {
+ return fmt.Errorf("failed to build repository files [%s/%s/%s]: %w", branch, repository, architecture, err)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// BuildSpecificRepositoryFiles builds index files for the repository
+func BuildSpecificRepositoryFiles(ctx context.Context, ownerID int64, branch, repository, architecture string) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ return buildPackagesIndex(ctx, ownerID, pv, branch, repository, architecture)
+}
+
+type packageData struct {
+ Package *packages_model.Package
+ Version *packages_model.PackageVersion
+ Blob *packages_model.PackageBlob
+ VersionMetadata *alpine_module.VersionMetadata
+ FileMetadata *alpine_module.FileMetadata
+}
+
+type packageCache = map[*packages_model.PackageFile]*packageData
+
+// https://wiki.alpinelinux.org/wiki/Apk_spec#APKINDEX_Format
+func buildPackagesIndex(ctx context.Context, ownerID int64, repoVersion *packages_model.PackageVersion, branch, repository, architecture string) error {
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ OwnerID: ownerID,
+ PackageType: packages_model.TypeAlpine,
+ Query: "%.apk",
+ Properties: map[string]string{
+ alpine_module.PropertyBranch: branch,
+ alpine_module.PropertyRepository: repository,
+ alpine_module.PropertyArchitecture: architecture,
+ },
+ })
+ if err != nil {
+ return err
+ }
+
+ // Delete the package indices if there are no packages
+ if len(pfs) == 0 {
+ pf, err := packages_model.GetFileForVersionByName(ctx, repoVersion.ID, IndexArchiveFilename, fmt.Sprintf("%s|%s|%s", branch, repository, architecture))
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return err
+ } else if pf == nil {
+ return nil
+ }
+
+ return packages_service.DeletePackageFile(ctx, pf)
+ }
+
+ // Cache data needed for all repository files
+ cache := make(packageCache)
+ for _, pf := range pfs {
+ pv, err := packages_model.GetVersionByID(ctx, pf.VersionID)
+ if err != nil {
+ return err
+ }
+ p, err := packages_model.GetPackageByID(ctx, pv.PackageID)
+ if err != nil {
+ return err
+ }
+ pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
+ if err != nil {
+ return err
+ }
+ pps, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeFile, pf.ID, alpine_module.PropertyMetadata)
+ if err != nil {
+ return err
+ }
+
+ pd := &packageData{
+ Package: p,
+ Version: pv,
+ Blob: pb,
+ }
+
+ if err := json.Unmarshal([]byte(pv.MetadataJSON), &pd.VersionMetadata); err != nil {
+ return err
+ }
+ if len(pps) > 0 {
+ if err := json.Unmarshal([]byte(pps[0].Value), &pd.FileMetadata); err != nil {
+ return err
+ }
+ }
+
+ cache[pf] = pd
+ }
+
+ var buf bytes.Buffer
+ for _, pf := range pfs {
+ pd := cache[pf]
+
+ fmt.Fprintf(&buf, "C:%s\n", pd.FileMetadata.Checksum)
+ fmt.Fprintf(&buf, "P:%s\n", pd.Package.Name)
+ fmt.Fprintf(&buf, "V:%s\n", pd.Version.Version)
+ fmt.Fprintf(&buf, "A:%s\n", pd.FileMetadata.Architecture)
+ if pd.VersionMetadata.Description != "" {
+ fmt.Fprintf(&buf, "T:%s\n", pd.VersionMetadata.Description)
+ }
+ if pd.VersionMetadata.ProjectURL != "" {
+ fmt.Fprintf(&buf, "U:%s\n", pd.VersionMetadata.ProjectURL)
+ }
+ if pd.VersionMetadata.License != "" {
+ fmt.Fprintf(&buf, "L:%s\n", pd.VersionMetadata.License)
+ }
+ fmt.Fprintf(&buf, "S:%d\n", pd.Blob.Size)
+ fmt.Fprintf(&buf, "I:%d\n", pd.FileMetadata.Size)
+ fmt.Fprintf(&buf, "o:%s\n", pd.FileMetadata.Origin)
+ fmt.Fprintf(&buf, "m:%s\n", pd.VersionMetadata.Maintainer)
+ fmt.Fprintf(&buf, "t:%d\n", pd.FileMetadata.BuildDate)
+ if pd.FileMetadata.CommitHash != "" {
+ fmt.Fprintf(&buf, "c:%s\n", pd.FileMetadata.CommitHash)
+ }
+ if len(pd.FileMetadata.Dependencies) > 0 {
+ fmt.Fprintf(&buf, "D:%s\n", strings.Join(pd.FileMetadata.Dependencies, " "))
+ }
+ if len(pd.FileMetadata.Provides) > 0 {
+ fmt.Fprintf(&buf, "p:%s\n", strings.Join(pd.FileMetadata.Provides, " "))
+ }
+ if pd.FileMetadata.InstallIf != "" {
+ fmt.Fprintf(&buf, "i:%s\n", pd.FileMetadata.InstallIf)
+ }
+ if pd.FileMetadata.ProviderPriority > 0 {
+ fmt.Fprintf(&buf, "k:%d\n", pd.FileMetadata.ProviderPriority)
+ }
+ fmt.Fprint(&buf, "\n")
+ }
+
+ unsignedIndexContent, _ := packages_module.NewHashedBuffer()
+ defer unsignedIndexContent.Close()
+
+ h := sha1.New()
+
+ if err := writeGzipStream(io.MultiWriter(unsignedIndexContent, h), IndexFilename, buf.Bytes(), true); err != nil {
+ return err
+ }
+
+ priv, _, err := GetOrCreateKeyPair(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ privPem, _ := pem.Decode([]byte(priv))
+ if privPem == nil {
+ return fmt.Errorf("failed to decode private key pem")
+ }
+
+ privKey, err := x509.ParsePKCS1PrivateKey(privPem.Bytes)
+ if err != nil {
+ return err
+ }
+
+ sign, err := rsa.SignPKCS1v15(rand.Reader, privKey, crypto.SHA1, h.Sum(nil))
+ if err != nil {
+ return err
+ }
+
+ owner, err := user_model.GetUserByID(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ fingerprint, err := util.CreatePublicKeyFingerprint(&privKey.PublicKey)
+ if err != nil {
+ return err
+ }
+
+ signedIndexContent, _ := packages_module.NewHashedBuffer()
+ defer signedIndexContent.Close()
+
+ if err := writeGzipStream(
+ signedIndexContent,
+ fmt.Sprintf(".SIGN.RSA.%s@%s.rsa.pub", owner.LowerName, hex.EncodeToString(fingerprint)),
+ sign,
+ false,
+ ); err != nil {
+ return err
+ }
+
+ if _, err := io.Copy(signedIndexContent, unsignedIndexContent); err != nil {
+ return err
+ }
+
+ _, err = packages_service.AddFileToPackageVersionInternal(
+ ctx,
+ repoVersion,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: IndexArchiveFilename,
+ CompositeKey: fmt.Sprintf("%s|%s|%s", branch, repository, architecture),
+ },
+ Creator: user_model.NewGhostUser(),
+ Data: signedIndexContent,
+ IsLead: false,
+ OverwriteExisting: true,
+ },
+ )
+ return err
+}
+
+func writeGzipStream(w io.Writer, filename string, content []byte, addTarEnd bool) error {
+ zw := gzip.NewWriter(w)
+ defer zw.Close()
+
+ tw := tar.NewWriter(zw)
+ if addTarEnd {
+ defer tw.Close()
+ }
+ hdr := &tar.Header{
+ Name: filename,
+ Mode: 0o600,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := tw.Write(content); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/services/packages/arch/repository.go b/services/packages/arch/repository.go
new file mode 100644
index 0000000..763a0a2
--- /dev/null
+++ b/services/packages/arch/repository.go
@@ -0,0 +1,368 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package arch
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ user_model "code.gitea.io/gitea/models/user"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ arch_module "code.gitea.io/gitea/modules/packages/arch"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/sync"
+ "code.gitea.io/gitea/modules/util"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/armor"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+)
+
+var locker = sync.NewExclusivePool()
+
+func GetOrCreateRepositoryVersion(ctx context.Context, ownerID int64) (*packages_model.PackageVersion, error) {
+ return packages_service.GetOrCreateInternalPackageVersion(ctx, ownerID, packages_model.TypeArch, arch_module.RepositoryPackage, arch_module.RepositoryVersion)
+}
+
+func BuildAllRepositoryFiles(ctx context.Context, ownerID int64) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ // remove old db files
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+ for _, pf := range pfs {
+ if strings.HasSuffix(pf.Name, ".db") {
+ arch := strings.TrimSuffix(pf.Name, ".db")
+ if err := BuildPacmanDB(ctx, ownerID, pf.CompositeKey, arch); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func BuildCustomRepositoryFiles(ctx context.Context, ownerID int64, disco string) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ // remove old db files
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+ for _, pf := range pfs {
+ if strings.HasSuffix(pf.Name, ".db") && pf.CompositeKey == disco {
+ arch := strings.TrimSuffix(strings.TrimPrefix(pf.Name, fmt.Sprintf("%s-", pf.CompositeKey)), ".db")
+ if err := BuildPacmanDB(ctx, ownerID, pf.CompositeKey, arch); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func NewFileSign(ctx context.Context, ownerID int64, input io.Reader) (*packages_module.HashedBuffer, error) {
+ // If no signature is specified, it will be generated by Gitea.
+ priv, _, err := GetOrCreateKeyPair(ctx, ownerID)
+ if err != nil {
+ return nil, err
+ }
+ block, err := armor.Decode(strings.NewReader(priv))
+ if err != nil {
+ return nil, err
+ }
+ e, err := openpgp.ReadEntity(packet.NewReader(block.Body))
+ if err != nil {
+ return nil, err
+ }
+ pkgSig, err := packages_module.NewHashedBuffer()
+ if err != nil {
+ return nil, err
+ }
+ defer pkgSig.Close()
+ if err := openpgp.DetachSign(pkgSig, e, input, nil); err != nil {
+ return nil, err
+ }
+ return pkgSig, nil
+}
+
+// BuildPacmanDB Create db signature cache
+func BuildPacmanDB(ctx context.Context, ownerID int64, group, arch string) error {
+ key := fmt.Sprintf("pkg_%d_arch_db_%s", ownerID, group)
+ locker.CheckIn(key)
+ defer locker.CheckOut(key)
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ // remove old db files
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+ for _, pf := range pfs {
+ if pf.CompositeKey == group && pf.Name == fmt.Sprintf("%s.db", arch) {
+ // remove group and arch
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+ }
+
+ db, err := createDB(ctx, ownerID, group, arch)
+ if errors.Is(err, io.EOF) {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ defer db.Close()
+ // Create db signature cache
+ _, err = db.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ sig, err := NewFileSign(ctx, ownerID, db)
+ if err != nil {
+ return err
+ }
+ defer sig.Close()
+ _, err = db.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ for name, data := range map[string]*packages_module.HashedBuffer{
+ fmt.Sprintf("%s.db", arch): db,
+ fmt.Sprintf("%s.db.sig", arch): sig,
+ } {
+ _, err = packages_service.AddFileToPackageVersionInternal(ctx, pv, &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: name,
+ CompositeKey: group,
+ },
+ Creator: user_model.NewGhostUser(),
+ Data: data,
+ IsLead: false,
+ OverwriteExisting: true,
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func createDB(ctx context.Context, ownerID int64, group, arch string) (*packages_module.HashedBuffer, error) {
+ pkgs, err := packages_model.GetPackagesByType(ctx, ownerID, packages_model.TypeArch)
+ if err != nil {
+ return nil, err
+ }
+ if len(pkgs) == 0 {
+ return nil, io.EOF
+ }
+ db, err := packages_module.NewHashedBuffer()
+ if err != nil {
+ return nil, err
+ }
+ defer db.Close()
+ gw := gzip.NewWriter(db)
+ defer gw.Close()
+ tw := tar.NewWriter(gw)
+ defer tw.Close()
+ count := 0
+ for _, pkg := range pkgs {
+ versions, err := packages_model.GetVersionsByPackageName(
+ ctx, ownerID, packages_model.TypeArch, pkg.Name,
+ )
+ if err != nil {
+ return nil, err
+ }
+ sort.Slice(versions, func(i, j int) bool {
+ return versions[i].CreatedUnix > versions[j].CreatedUnix
+ })
+
+ for _, ver := range versions {
+ files, err := packages_model.GetFilesByVersionID(ctx, ver.ID)
+ if err != nil {
+ return nil, err
+ }
+ var pf *packages_model.PackageFile
+ for _, file := range files {
+ ext := filepath.Ext(file.Name)
+ if file.CompositeKey == group && ext != "" && ext != ".db" && ext != ".sig" {
+ if pf == nil && strings.HasSuffix(file.Name, fmt.Sprintf("any.pkg.tar%s", ext)) {
+ pf = file
+ }
+ if strings.HasSuffix(file.Name, fmt.Sprintf("%s.pkg.tar%s", arch, ext)) {
+ pf = file
+ break
+ }
+ }
+ }
+ if pf == nil {
+ // file not exists
+ continue
+ }
+ pps, err := packages_model.GetPropertiesByName(
+ ctx, packages_model.PropertyTypeFile, pf.ID, arch_module.PropertyDescription,
+ )
+ if err != nil {
+ return nil, err
+ }
+ if len(pps) >= 1 {
+ meta := []byte(pps[0].Value)
+ header := &tar.Header{
+ Name: pkg.Name + "-" + ver.Version + "/desc",
+ Size: int64(len(meta)),
+ Mode: int64(os.ModePerm),
+ }
+ if err = tw.WriteHeader(header); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write(meta); err != nil {
+ return nil, err
+ }
+ count++
+ break
+ }
+ }
+ }
+ if count == 0 {
+ return nil, io.EOF
+ }
+ return db, nil
+}
+
+// GetPackageFile Get data related to provided filename and distribution, for package files
+// update download counter.
+func GetPackageFile(ctx context.Context, group, file string, ownerID int64) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+ fileSplit := strings.Split(file, "-")
+ if len(fileSplit) <= 3 {
+ return nil, nil, nil, errors.New("invalid file format, need <name>-<version>-<release>-<arch>.pkg.<archive>")
+ }
+ var (
+ pkgName = strings.Join(fileSplit[0:len(fileSplit)-3], "-")
+ pkgVer = fileSplit[len(fileSplit)-3] + "-" + fileSplit[len(fileSplit)-2]
+ )
+ version, err := packages_model.GetVersionByNameAndVersion(ctx, ownerID, packages_model.TypeArch, pkgName, pkgVer)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ pkgFile, err := packages_model.GetFileForVersionByName(ctx, version.ID, file, group)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return packages_service.GetPackageFileStream(ctx, pkgFile)
+}
+
+func GetPackageDBFile(ctx context.Context, ownerID int64, group, arch string, sigFile bool) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ fileName := fmt.Sprintf("%s.db", arch)
+ if sigFile {
+ fileName = fmt.Sprintf("%s.db.sig", arch)
+ }
+ file, err := packages_model.GetFileForVersionByName(ctx, pv.ID, fileName, group)
+ // fail back to any db
+ if errors.Is(err, util.ErrNotExist) && arch != "any" {
+ fileName = "any.db"
+ if sigFile {
+ fileName = "any.db.sig"
+ }
+ file, err = packages_model.GetFileForVersionByName(ctx, pv.ID, fileName, group)
+ }
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return packages_service.GetPackageFileStream(ctx, file)
+}
+
+// GetOrCreateKeyPair gets or creates the PGP keys used to sign repository metadata files
+func GetOrCreateKeyPair(ctx context.Context, ownerID int64) (string, string, error) {
+ priv, err := user_model.GetSetting(ctx, ownerID, arch_module.SettingKeyPrivate)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ pub, err := user_model.GetSetting(ctx, ownerID, arch_module.SettingKeyPublic)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ if priv == "" || pub == "" {
+ user, err := user_model.GetUserByID(ctx, ownerID)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ priv, pub, err = generateKeypair(user.Name)
+ if err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, arch_module.SettingKeyPrivate, priv); err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, arch_module.SettingKeyPublic, pub); err != nil {
+ return "", "", err
+ }
+ }
+
+ return priv, pub, nil
+}
+
+func generateKeypair(owner string) (string, string, error) {
+ e, err := openpgp.NewEntity(
+ owner,
+ "Arch Package signature only",
+ fmt.Sprintf("%s@noreply.%s", owner, setting.Packages.RegistryHost), &packet.Config{
+ RSABits: 4096,
+ })
+ if err != nil {
+ return "", "", err
+ }
+
+ var priv strings.Builder
+ var pub strings.Builder
+
+ w, err := armor.Encode(&priv, openpgp.PrivateKeyType, nil)
+ if err != nil {
+ return "", "", err
+ }
+ if err := e.SerializePrivate(w, nil); err != nil {
+ return "", "", err
+ }
+ w.Close()
+
+ w, err = armor.Encode(&pub, openpgp.PublicKeyType, nil)
+ if err != nil {
+ return "", "", err
+ }
+ if err := e.Serialize(w); err != nil {
+ return "", "", err
+ }
+ w.Close()
+
+ return priv.String(), pub.String(), nil
+}
diff --git a/services/packages/auth.go b/services/packages/auth.go
new file mode 100644
index 0000000..c5bf5af
--- /dev/null
+++ b/services/packages/auth.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ auth_model "code.gitea.io/gitea/models/auth"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/golang-jwt/jwt/v5"
+)
+
+type packageClaims struct {
+ jwt.RegisteredClaims
+ UserID int64
+ Scope auth_model.AccessTokenScope
+}
+
+func CreateAuthorizationToken(u *user_model.User, scope auth_model.AccessTokenScope) (string, error) {
+ now := time.Now()
+
+ claims := packageClaims{
+ RegisteredClaims: jwt.RegisteredClaims{
+ ExpiresAt: jwt.NewNumericDate(now.Add(24 * time.Hour)),
+ NotBefore: jwt.NewNumericDate(now),
+ },
+ UserID: u.ID,
+ Scope: scope,
+ }
+ token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
+
+ tokenString, err := token.SignedString(setting.GetGeneralTokenSigningSecret())
+ if err != nil {
+ return "", err
+ }
+
+ return tokenString, nil
+}
+
+func ParseAuthorizationToken(req *http.Request) (int64, auth_model.AccessTokenScope, error) {
+ h := req.Header.Get("Authorization")
+ if h == "" {
+ return 0, "", nil
+ }
+
+ parts := strings.SplitN(h, " ", 2)
+ if len(parts) != 2 {
+ log.Error("split token failed: %s", h)
+ return 0, "", fmt.Errorf("split token failed")
+ }
+
+ token, err := jwt.ParseWithClaims(parts[1], &packageClaims{}, func(t *jwt.Token) (any, error) {
+ if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok {
+ return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"])
+ }
+ return setting.GetGeneralTokenSigningSecret(), nil
+ })
+ if err != nil {
+ return 0, "", err
+ }
+
+ c, ok := token.Claims.(*packageClaims)
+ if !token.Valid || !ok {
+ return 0, "", fmt.Errorf("invalid token claim")
+ }
+
+ return c.UserID, c.Scope, nil
+}
diff --git a/services/packages/cargo/index.go b/services/packages/cargo/index.go
new file mode 100644
index 0000000..59823cd
--- /dev/null
+++ b/services/packages/cargo/index.go
@@ -0,0 +1,315 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package cargo
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "path"
+ "strconv"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/json"
+ cargo_module "code.gitea.io/gitea/modules/packages/cargo"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+ repo_service "code.gitea.io/gitea/services/repository"
+ files_service "code.gitea.io/gitea/services/repository/files"
+)
+
+const (
+ IndexRepositoryName = "_cargo-index"
+ ConfigFileName = "config.json"
+)
+
+// https://doc.rust-lang.org/cargo/reference/registries.html#index-format
+
+func BuildPackagePath(name string) string {
+ switch len(name) {
+ case 0:
+ panic("Cargo package name can not be empty")
+ case 1:
+ return path.Join("1", name)
+ case 2:
+ return path.Join("2", name)
+ case 3:
+ return path.Join("3", string(name[0]), name)
+ default:
+ return path.Join(name[0:2], name[2:4], name)
+ }
+}
+
+func InitializeIndexRepository(ctx context.Context, doer, owner *user_model.User) error {
+ repo, err := getOrCreateIndexRepository(ctx, doer, owner)
+ if err != nil {
+ return err
+ }
+
+ if err := createOrUpdateConfigFile(ctx, repo, doer, owner); err != nil {
+ return fmt.Errorf("createOrUpdateConfigFile: %w", err)
+ }
+
+ return nil
+}
+
+func RebuildIndex(ctx context.Context, doer, owner *user_model.User) error {
+ repo, err := repo_model.GetRepositoryByOwnerAndName(ctx, owner.Name, IndexRepositoryName)
+ if err != nil {
+ return fmt.Errorf("GetRepositoryByOwnerAndName: %w", err)
+ }
+
+ ps, err := packages_model.GetPackagesByType(ctx, owner.ID, packages_model.TypeCargo)
+ if err != nil {
+ return fmt.Errorf("GetPackagesByType: %w", err)
+ }
+
+ return alterRepositoryContent(
+ ctx,
+ doer,
+ repo,
+ "Rebuild Cargo Index",
+ func(t *files_service.TemporaryUploadRepository) error {
+ // Remove all existing content but the Cargo config
+ files, err := t.LsFiles()
+ if err != nil {
+ return err
+ }
+ for i, file := range files {
+ if file == ConfigFileName {
+ files[i] = files[len(files)-1]
+ files = files[:len(files)-1]
+ break
+ }
+ }
+ if err := t.RemoveFilesFromIndex(files...); err != nil {
+ return err
+ }
+
+ // Add all packages
+ for _, p := range ps {
+ if err := addOrUpdatePackageIndex(ctx, t, p); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ },
+ )
+}
+
+func UpdatePackageIndexIfExists(ctx context.Context, doer, owner *user_model.User, packageID int64) error {
+ // We do not want to force the creation of the repo here
+ // cargo http index does not rely on the repo itself,
+ // so if the repo does not exist, we just do nothing.
+ repo, err := repo_model.GetRepositoryByOwnerAndName(ctx, owner.Name, IndexRepositoryName)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ return nil
+ }
+ return fmt.Errorf("GetRepositoryByOwnerAndName: %w", err)
+ }
+
+ p, err := packages_model.GetPackageByID(ctx, packageID)
+ if err != nil {
+ return fmt.Errorf("GetPackageByID[%d]: %w", packageID, err)
+ }
+
+ return alterRepositoryContent(
+ ctx,
+ doer,
+ repo,
+ "Update "+p.Name,
+ func(t *files_service.TemporaryUploadRepository) error {
+ return addOrUpdatePackageIndex(ctx, t, p)
+ },
+ )
+}
+
+type IndexVersionEntry struct {
+ Name string `json:"name"`
+ Version string `json:"vers"`
+ Dependencies []*cargo_module.Dependency `json:"deps"`
+ FileChecksum string `json:"cksum"`
+ Features map[string][]string `json:"features"`
+ Yanked bool `json:"yanked"`
+ Links string `json:"links,omitempty"`
+}
+
+func BuildPackageIndex(ctx context.Context, p *packages_model.Package) (*bytes.Buffer, error) {
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ PackageID: p.ID,
+ Sort: packages_model.SortVersionAsc,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("SearchVersions[%s]: %w", p.Name, err)
+ }
+ if len(pvs) == 0 {
+ return nil, nil
+ }
+
+ pds, err := packages_model.GetPackageDescriptors(ctx, pvs)
+ if err != nil {
+ return nil, fmt.Errorf("GetPackageDescriptors[%s]: %w", p.Name, err)
+ }
+
+ var b bytes.Buffer
+ for _, pd := range pds {
+ metadata := pd.Metadata.(*cargo_module.Metadata)
+
+ dependencies := metadata.Dependencies
+ if dependencies == nil {
+ dependencies = make([]*cargo_module.Dependency, 0)
+ }
+
+ features := metadata.Features
+ if features == nil {
+ features = make(map[string][]string)
+ }
+
+ yanked, _ := strconv.ParseBool(pd.VersionProperties.GetByName(cargo_module.PropertyYanked))
+ entry, err := json.Marshal(&IndexVersionEntry{
+ Name: pd.Package.Name,
+ Version: pd.Version.Version,
+ Dependencies: dependencies,
+ FileChecksum: pd.Files[0].Blob.HashSHA256,
+ Features: features,
+ Yanked: yanked,
+ Links: metadata.Links,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ b.Write(entry)
+ b.WriteString("\n")
+ }
+
+ return &b, nil
+}
+
+func addOrUpdatePackageIndex(ctx context.Context, t *files_service.TemporaryUploadRepository, p *packages_model.Package) error {
+ b, err := BuildPackageIndex(ctx, p)
+ if err != nil {
+ return err
+ }
+ if b == nil {
+ return nil
+ }
+
+ return writeObjectToIndex(t, BuildPackagePath(p.LowerName), b)
+}
+
+func getOrCreateIndexRepository(ctx context.Context, doer, owner *user_model.User) (*repo_model.Repository, error) {
+ repo, err := repo_model.GetRepositoryByOwnerAndName(ctx, owner.Name, IndexRepositoryName)
+ if err != nil {
+ if errors.Is(err, util.ErrNotExist) {
+ repo, err = repo_service.CreateRepositoryDirectly(ctx, doer, owner, repo_service.CreateRepoOptions{
+ Name: IndexRepositoryName,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("CreateRepository: %w", err)
+ }
+ } else {
+ return nil, fmt.Errorf("GetRepositoryByOwnerAndName: %w", err)
+ }
+ }
+
+ return repo, nil
+}
+
+type Config struct {
+ DownloadURL string `json:"dl"`
+ APIURL string `json:"api"`
+ AuthRequired bool `json:"auth-required"`
+}
+
+func BuildConfig(owner *user_model.User, isPrivate bool) *Config {
+ return &Config{
+ DownloadURL: setting.AppURL + "api/packages/" + owner.Name + "/cargo/api/v1/crates",
+ APIURL: setting.AppURL + "api/packages/" + owner.Name + "/cargo",
+ AuthRequired: isPrivate,
+ }
+}
+
+func createOrUpdateConfigFile(ctx context.Context, repo *repo_model.Repository, doer, owner *user_model.User) error {
+ return alterRepositoryContent(
+ ctx,
+ doer,
+ repo,
+ "Initialize Cargo Config",
+ func(t *files_service.TemporaryUploadRepository) error {
+ var b bytes.Buffer
+ err := json.NewEncoder(&b).Encode(BuildConfig(owner, setting.Service.RequireSignInView || owner.Visibility != structs.VisibleTypePublic || repo.IsPrivate))
+ if err != nil {
+ return err
+ }
+
+ return writeObjectToIndex(t, ConfigFileName, &b)
+ },
+ )
+}
+
+// This is a shorter version of CreateOrUpdateRepoFile which allows to perform multiple actions on a git repository
+func alterRepositoryContent(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, commitMessage string, fn func(*files_service.TemporaryUploadRepository) error) error {
+ t, err := files_service.NewTemporaryUploadRepository(ctx, repo)
+ if err != nil {
+ return err
+ }
+ defer t.Close()
+
+ var lastCommitID string
+ if err := t.Clone(repo.DefaultBranch, true); err != nil {
+ if !git.IsErrBranchNotExist(err) || !repo.IsEmpty {
+ return err
+ }
+ if err := t.Init(repo.ObjectFormatName); err != nil {
+ return err
+ }
+ } else {
+ if err := t.SetDefaultIndex(); err != nil {
+ return err
+ }
+
+ commit, err := t.GetBranchCommit(repo.DefaultBranch)
+ if err != nil {
+ return err
+ }
+
+ lastCommitID = commit.ID.String()
+ }
+
+ if err := fn(t); err != nil {
+ return err
+ }
+
+ treeHash, err := t.WriteTree()
+ if err != nil {
+ return err
+ }
+
+ now := time.Now()
+ commitHash, err := t.CommitTreeWithDate(lastCommitID, doer, doer, treeHash, commitMessage, false, now, now)
+ if err != nil {
+ return err
+ }
+
+ return t.Push(doer, commitHash, repo.DefaultBranch)
+}
+
+func writeObjectToIndex(t *files_service.TemporaryUploadRepository, path string, r io.Reader) error {
+ hash, err := t.HashObject(r)
+ if err != nil {
+ return err
+ }
+
+ return t.AddObjectToIndex("100644", hash, path)
+}
diff --git a/services/packages/cleanup/cleanup.go b/services/packages/cleanup/cleanup.go
new file mode 100644
index 0000000..ab419a9
--- /dev/null
+++ b/services/packages/cleanup/cleanup.go
@@ -0,0 +1,198 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ packages_service "code.gitea.io/gitea/services/packages"
+ alpine_service "code.gitea.io/gitea/services/packages/alpine"
+ arch_service "code.gitea.io/gitea/services/packages/arch"
+ cargo_service "code.gitea.io/gitea/services/packages/cargo"
+ container_service "code.gitea.io/gitea/services/packages/container"
+ debian_service "code.gitea.io/gitea/services/packages/debian"
+ rpm_service "code.gitea.io/gitea/services/packages/rpm"
+)
+
+// Task method to execute cleanup rules and cleanup expired package data
+func CleanupTask(ctx context.Context, olderThan time.Duration) error {
+ if err := ExecuteCleanupRules(ctx); err != nil {
+ return err
+ }
+
+ return CleanupExpiredData(ctx, olderThan)
+}
+
+func ExecuteCleanupRules(outerCtx context.Context) error {
+ ctx, committer, err := db.TxContext(outerCtx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ err = packages_model.IterateEnabledCleanupRules(ctx, func(ctx context.Context, pcr *packages_model.PackageCleanupRule) error {
+ select {
+ case <-outerCtx.Done():
+ return db.ErrCancelledf("While processing package cleanup rules")
+ default:
+ }
+
+ if err := pcr.CompiledPattern(); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: CompilePattern failed: %w", pcr.ID, err)
+ }
+
+ olderThan := time.Now().AddDate(0, 0, -pcr.RemoveDays)
+
+ packages, err := packages_model.GetPackagesByType(ctx, pcr.OwnerID, pcr.Type)
+ if err != nil {
+ return fmt.Errorf("CleanupRule [%d]: GetPackagesByType failed: %w", pcr.ID, err)
+ }
+
+ anyVersionDeleted := false
+ for _, p := range packages {
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ PackageID: p.ID,
+ IsInternal: optional.Some(false),
+ Sort: packages_model.SortCreatedDesc,
+ Paginator: db.NewAbsoluteListOptions(pcr.KeepCount, 200),
+ })
+ if err != nil {
+ return fmt.Errorf("CleanupRule [%d]: SearchVersions failed: %w", pcr.ID, err)
+ }
+ versionDeleted := false
+ for _, pv := range pvs {
+ if pcr.Type == packages_model.TypeContainer {
+ if skip, err := container_service.ShouldBeSkipped(ctx, pcr, p, pv); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: container.ShouldBeSkipped failed: %w", pcr.ID, err)
+ } else if skip {
+ log.Debug("Rule[%d]: keep '%s/%s' (container)", pcr.ID, p.Name, pv.Version)
+ continue
+ }
+ }
+
+ toMatch := pv.LowerVersion
+ if pcr.MatchFullName {
+ toMatch = p.LowerName + "/" + pv.LowerVersion
+ }
+
+ if pcr.KeepPatternMatcher != nil && pcr.KeepPatternMatcher.MatchString(toMatch) {
+ log.Debug("Rule[%d]: keep '%s/%s' (keep pattern)", pcr.ID, p.Name, pv.Version)
+ continue
+ }
+ if pv.CreatedUnix.AsLocalTime().After(olderThan) {
+ log.Debug("Rule[%d]: keep '%s/%s' (remove days)", pcr.ID, p.Name, pv.Version)
+ continue
+ }
+ if pcr.RemovePatternMatcher != nil && !pcr.RemovePatternMatcher.MatchString(toMatch) {
+ log.Debug("Rule[%d]: keep '%s/%s' (remove pattern)", pcr.ID, p.Name, pv.Version)
+ continue
+ }
+
+ log.Debug("Rule[%d]: remove '%s/%s'", pcr.ID, p.Name, pv.Version)
+
+ if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: DeletePackageVersionAndReferences failed: %w", pcr.ID, err)
+ }
+
+ versionDeleted = true
+ anyVersionDeleted = true
+ }
+
+ if versionDeleted {
+ if pcr.Type == packages_model.TypeCargo {
+ owner, err := user_model.GetUserByID(ctx, pcr.OwnerID)
+ if err != nil {
+ return fmt.Errorf("GetUserByID failed: %w", err)
+ }
+ if err := cargo_service.UpdatePackageIndexIfExists(ctx, owner, owner, p.ID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: cargo.UpdatePackageIndexIfExists failed: %w", pcr.ID, err)
+ }
+ }
+ }
+ }
+
+ if anyVersionDeleted {
+ if pcr.Type == packages_model.TypeDebian {
+ if err := debian_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: debian.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
+ }
+ } else if pcr.Type == packages_model.TypeAlpine {
+ if err := alpine_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: alpine.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
+ }
+ } else if pcr.Type == packages_model.TypeRpm {
+ if err := rpm_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: rpm.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
+ }
+ } else if pcr.Type == packages_model.TypeArch {
+ if err := arch_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: arch.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
+ }
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+func CleanupExpiredData(outerCtx context.Context, olderThan time.Duration) error {
+ ctx, committer, err := db.TxContext(outerCtx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := container_service.Cleanup(ctx, olderThan); err != nil {
+ return err
+ }
+
+ pIDs, err := packages_model.FindUnreferencedPackages(ctx)
+ if err != nil {
+ return err
+ }
+ for _, pID := range pIDs {
+ if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypePackage, pID); err != nil {
+ return err
+ }
+ if err := packages_model.DeletePackageByID(ctx, pID); err != nil {
+ return err
+ }
+ }
+
+ pbs, err := packages_model.FindExpiredUnreferencedBlobs(ctx, olderThan)
+ if err != nil {
+ return err
+ }
+
+ for _, pb := range pbs {
+ if err := packages_model.DeleteBlobByID(ctx, pb.ID); err != nil {
+ return err
+ }
+ }
+
+ if err := committer.Commit(); err != nil {
+ return err
+ }
+
+ contentStore := packages_module.NewContentStore()
+ for _, pb := range pbs {
+ if err := contentStore.Delete(packages_module.BlobHash256Key(pb.HashSHA256)); err != nil {
+ log.Error("Error deleting package blob [%v]: %v", pb.ID, err)
+ }
+ }
+
+ return nil
+}
diff --git a/services/packages/cleanup/cleanup_sha256_test.go b/services/packages/cleanup/cleanup_sha256_test.go
new file mode 100644
index 0000000..6d7cc47
--- /dev/null
+++ b/services/packages/cleanup/cleanup_sha256_test.go
@@ -0,0 +1,116 @@
+// Copyright 2024 The Forgejo Authors.
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package container
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/test"
+ "code.gitea.io/gitea/modules/timeutil"
+ container_service "code.gitea.io/gitea/services/packages/container"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCleanupSHA256(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ defer test.MockVariableValue(&container_service.SHA256BatchSize, 1)()
+
+ ctx := db.DefaultContext
+
+ createContainer := func(t *testing.T, name, version, digest string, created timeutil.TimeStamp) {
+ t.Helper()
+
+ ownerID := int64(2001)
+
+ p := packages.Package{
+ OwnerID: ownerID,
+ LowerName: name,
+ Type: packages.TypeContainer,
+ }
+ _, err := db.GetEngine(ctx).Insert(&p)
+ // package_version").Where("version = ?", multiTag).Update(&packages_model.PackageVersion{MetadataJSON: `corrupted "manifests":[{ bad`})
+ require.NoError(t, err)
+
+ var metadata string
+ if digest != "" {
+ m := container_module.Metadata{
+ Manifests: []*container_module.Manifest{
+ {
+ Digest: digest,
+ },
+ },
+ }
+ mt, err := json.Marshal(m)
+ require.NoError(t, err)
+ metadata = string(mt)
+ }
+ v := packages.PackageVersion{
+ PackageID: p.ID,
+ LowerVersion: version,
+ MetadataJSON: metadata,
+ CreatedUnix: created,
+ }
+ _, err = db.GetEngine(ctx).NoAutoTime().Insert(&v)
+ require.NoError(t, err)
+ }
+
+ cleanupAndCheckLogs := func(t *testing.T, olderThan time.Duration, expected ...string) {
+ t.Helper()
+ logChecker, cleanup := test.NewLogChecker(log.DEFAULT, log.TRACE)
+ logChecker.Filter(expected...)
+ logChecker.StopMark(container_service.SHA256LogFinish)
+ defer cleanup()
+
+ require.NoError(t, CleanupExpiredData(ctx, olderThan))
+
+ logFiltered, logStopped := logChecker.Check(5 * time.Second)
+ assert.True(t, logStopped)
+ filtered := make([]bool, 0, len(expected))
+ for range expected {
+ filtered = append(filtered, true)
+ }
+ assert.EqualValues(t, filtered, logFiltered, expected)
+ }
+
+ ancient := 1 * time.Hour
+
+ t.Run("no packages, cleanup nothing", func(t *testing.T) {
+ cleanupAndCheckLogs(t, ancient, "Nothing to cleanup")
+ })
+
+ orphan := "orphan"
+ createdLongAgo := timeutil.TimeStamp(time.Now().Add(-(ancient * 2)).Unix())
+ createdRecently := timeutil.TimeStamp(time.Now().Add(-(ancient / 2)).Unix())
+
+ t.Run("an orphaned package created a long time ago is removed", func(t *testing.T) {
+ createContainer(t, orphan, "sha256:"+orphan, "", createdLongAgo)
+ cleanupAndCheckLogs(t, ancient, "Removing 1 entries from `package_version`")
+ cleanupAndCheckLogs(t, ancient, "Nothing to cleanup")
+ })
+
+ t.Run("a newly created orphaned package is not cleaned up", func(t *testing.T) {
+ createContainer(t, orphan, "sha256:"+orphan, "", createdRecently)
+ cleanupAndCheckLogs(t, ancient, "1 out of 1 container image(s) are not deleted because they were created less than")
+ cleanupAndCheckLogs(t, 0, "Removing 1 entries from `package_version`")
+ cleanupAndCheckLogs(t, 0, "Nothing to cleanup")
+ })
+
+ t.Run("a referenced package is not removed", func(t *testing.T) {
+ referenced := "referenced"
+ digest := "sha256:" + referenced
+ createContainer(t, referenced, digest, "", createdRecently)
+ index := "index"
+ createContainer(t, index, index, digest, createdRecently)
+ cleanupAndCheckLogs(t, ancient, "Nothing to cleanup")
+ })
+}
diff --git a/services/packages/cleanup/main_test.go b/services/packages/cleanup/main_test.go
new file mode 100644
index 0000000..ded3d76
--- /dev/null
+++ b/services/packages/cleanup/main_test.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Forgejo Authors.
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package container
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/services/packages/container/blob_uploader.go b/services/packages/container/blob_uploader.go
new file mode 100644
index 0000000..bae2e2d
--- /dev/null
+++ b/services/packages/container/blob_uploader.go
@@ -0,0 +1,133 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "errors"
+ "io"
+ "os"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+var (
+ // errWriteAfterRead occurs if Write is called after a read operation
+ errWriteAfterRead = errors.New("write is unsupported after a read operation")
+ // errOffsetMissmatch occurs if the file offset is different than the model
+ errOffsetMissmatch = errors.New("offset mismatch between file and model")
+)
+
+// BlobUploader handles chunked blob uploads
+type BlobUploader struct {
+ *packages_model.PackageBlobUpload
+ *packages_module.MultiHasher
+ file *os.File
+ reading bool
+}
+
+func buildFilePath(id string) string {
+ return util.FilePathJoinAbs(setting.Packages.ChunkedUploadPath, id)
+}
+
+// NewBlobUploader creates a new blob uploader for the given id
+func NewBlobUploader(ctx context.Context, id string) (*BlobUploader, error) {
+ model, err := packages_model.GetBlobUploadByID(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+
+ hash := packages_module.NewMultiHasher()
+ if len(model.HashStateBytes) != 0 {
+ if err := hash.UnmarshalBinary(model.HashStateBytes); err != nil {
+ return nil, err
+ }
+ }
+
+ f, err := os.OpenFile(buildFilePath(model.ID), os.O_RDWR|os.O_CREATE, 0o666)
+ if err != nil {
+ return nil, err
+ }
+
+ return &BlobUploader{
+ model,
+ hash,
+ f,
+ false,
+ }, nil
+}
+
+// Close implements io.Closer
+func (u *BlobUploader) Close() error {
+ return u.file.Close()
+}
+
+// Append appends a chunk of data and updates the model
+func (u *BlobUploader) Append(ctx context.Context, r io.Reader) error {
+ if u.reading {
+ return errWriteAfterRead
+ }
+
+ offset, err := u.file.Seek(0, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+ if offset != u.BytesReceived {
+ return errOffsetMissmatch
+ }
+
+ n, err := io.Copy(io.MultiWriter(u.file, u.MultiHasher), r)
+ if err != nil {
+ return err
+ }
+
+ // fast path if nothing was written
+ if n == 0 {
+ return nil
+ }
+
+ u.BytesReceived += n
+
+ u.HashStateBytes, err = u.MultiHasher.MarshalBinary()
+ if err != nil {
+ return err
+ }
+
+ return packages_model.UpdateBlobUpload(ctx, u.PackageBlobUpload)
+}
+
+func (u *BlobUploader) Size() int64 {
+ return u.BytesReceived
+}
+
+// Read implements io.Reader
+func (u *BlobUploader) Read(p []byte) (int, error) {
+ if !u.reading {
+ _, err := u.file.Seek(0, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ u.reading = true
+ }
+
+ return u.file.Read(p)
+}
+
+// Remove deletes the data and the model of a blob upload
+func RemoveBlobUploadByID(ctx context.Context, id string) error {
+ if err := packages_model.DeleteBlobUploadByID(ctx, id); err != nil {
+ return err
+ }
+
+ err := os.Remove(buildFilePath(id))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ return nil
+}
diff --git a/services/packages/container/cleanup.go b/services/packages/container/cleanup.go
new file mode 100644
index 0000000..b5563c6
--- /dev/null
+++ b/services/packages/container/cleanup.go
@@ -0,0 +1,111 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ container_model "code.gitea.io/gitea/models/packages/container"
+ "code.gitea.io/gitea/modules/optional"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+// Cleanup removes expired container data
+func Cleanup(ctx context.Context, olderThan time.Duration) error {
+ if err := cleanupExpiredBlobUploads(ctx, olderThan); err != nil {
+ return err
+ }
+ if err := CleanupSHA256(ctx, olderThan); err != nil {
+ return err
+ }
+ return cleanupExpiredUploadedBlobs(ctx, olderThan)
+}
+
+// cleanupExpiredBlobUploads removes expired blob uploads
+func cleanupExpiredBlobUploads(ctx context.Context, olderThan time.Duration) error {
+ pbus, err := packages_model.FindExpiredBlobUploads(ctx, olderThan)
+ if err != nil {
+ return err
+ }
+
+ for _, pbu := range pbus {
+ if err := RemoveBlobUploadByID(ctx, pbu.ID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// cleanupExpiredUploadedBlobs removes expired uploaded blobs not referenced by a manifest
+func cleanupExpiredUploadedBlobs(ctx context.Context, olderThan time.Duration) error {
+ pfs, err := container_model.SearchExpiredUploadedBlobs(ctx, olderThan)
+ if err != nil {
+ return err
+ }
+
+ for _, pf := range pfs {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ Type: packages_model.TypeContainer,
+ Version: packages_model.SearchValue{
+ ExactMatch: true,
+ Value: container_model.UploadVersion,
+ },
+ IsInternal: optional.Some(true),
+ HasFiles: optional.Some(false),
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, pv := range pvs {
+ if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypeVersion, pv.ID); err != nil {
+ return err
+ }
+
+ if err := packages_model.DeleteVersionByID(ctx, pv.ID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func ShouldBeSkipped(ctx context.Context, pcr *packages_model.PackageCleanupRule, p *packages_model.Package, pv *packages_model.PackageVersion) (bool, error) {
+ // Always skip the "latest" tag
+ if pv.LowerVersion == "latest" {
+ return true, nil
+ }
+
+ // Check if the version is a digest (or untagged)
+ if digest.Digest(pv.LowerVersion).Validate() == nil {
+ // Check if there is another manifest referencing this version
+ has, err := packages_model.ExistVersion(ctx, &packages_model.PackageSearchOptions{
+ PackageID: p.ID,
+ Properties: map[string]string{
+ container_module.PropertyManifestReference: pv.LowerVersion,
+ },
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Skip it if the version is referenced
+ if has {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
diff --git a/services/packages/container/cleanup_sha256.go b/services/packages/container/cleanup_sha256.go
new file mode 100644
index 0000000..16afc74
--- /dev/null
+++ b/services/packages/container/cleanup_sha256.go
@@ -0,0 +1,158 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package container
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+var (
+ SHA256BatchSize = 500
+ SHA256Log = "cleanup dangling images with a sha256:* version"
+ SHA256LogStart = "Start to " + SHA256Log
+ SHA256LogFinish = "Finished to " + SHA256Log
+)
+
+func CleanupSHA256(ctx context.Context, olderThan time.Duration) error {
+ log.Info(SHA256LogStart)
+ err := cleanupSHA256(ctx, olderThan)
+ log.Info(SHA256LogFinish)
+ return err
+}
+
+func cleanupSHA256(outerCtx context.Context, olderThan time.Duration) error {
+ ctx, committer, err := db.TxContext(outerCtx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ foundAtLeastOneSHA256 := false
+ type packageVersion struct {
+ id int64
+ created timeutil.TimeStamp
+ }
+ shaToPackageVersion := make(map[string]packageVersion, 100)
+ knownSHA := make(map[string]any, 100)
+
+ // compute before making the inventory to not race against ongoing
+ // image creations
+ old := timeutil.TimeStamp(time.Now().Add(-olderThan).Unix())
+
+ log.Debug("Look for all package_version.version that start with sha256:")
+
+ // Iterate over all container versions in ascending order and store
+ // in shaToPackageVersion all versions with a sha256: prefix. If an index
+ // manifest is found, the sha256: digest it references are removed
+ // from shaToPackageVersion. If the sha256: digest found in an index
+ // manifest is not already in shaToPackageVersion, it is stored in
+ // knownSHA to be dealt with later.
+ //
+ // Although it is theoretically possible that a sha256: is uploaded
+ // after the index manifest that references it, this is not the
+ // normal order of operations. First the sha256: version is uploaded
+ // and then the index manifest. When the iteration completes,
+ // knownSHA will therefore be empty most of the time and
+ // shaToPackageVersion will only contain unreferenced sha256: versions.
+ if err := db.GetEngine(ctx).
+ Select("`package_version`.`id`, `package_version`.`created_unix`, `package_version`.`lower_version`, `package_version`.`metadata_json`").
+ Join("INNER", "`package`", "`package`.`id` = `package_version`.`package_id`").
+ Where("`package`.`type` = ?", packages.TypeContainer).
+ OrderBy("`package_version`.`id` ASC").
+ Iterate(new(packages.PackageVersion), func(_ int, bean any) error {
+ v := bean.(*packages.PackageVersion)
+ if strings.HasPrefix(v.LowerVersion, "sha256:") {
+ shaToPackageVersion[v.LowerVersion] = packageVersion{id: v.ID, created: v.CreatedUnix}
+ foundAtLeastOneSHA256 = true
+ } else if strings.Contains(v.MetadataJSON, `"manifests":[{`) {
+ var metadata container_module.Metadata
+ if err := json.Unmarshal([]byte(v.MetadataJSON), &metadata); err != nil {
+ log.Error("package_version.id = %d package_version.metadata_json %s is not a JSON string containing valid metadata. It was ignored but it is an inconsistency in the database that should be looked at. %v", v.ID, v.MetadataJSON, err)
+ return nil
+ }
+ for _, manifest := range metadata.Manifests {
+ if _, ok := shaToPackageVersion[manifest.Digest]; ok {
+ delete(shaToPackageVersion, manifest.Digest)
+ } else {
+ knownSHA[manifest.Digest] = true
+ }
+ }
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ for sha := range knownSHA {
+ delete(shaToPackageVersion, sha)
+ }
+
+ if len(shaToPackageVersion) == 0 {
+ if foundAtLeastOneSHA256 {
+ log.Debug("All container images with a version matching sha256:* are referenced by an index manifest")
+ } else {
+ log.Debug("There are no container images with a version matching sha256:*")
+ }
+ log.Info("Nothing to cleanup")
+ return nil
+ }
+
+ found := len(shaToPackageVersion)
+
+ log.Warn("%d container image(s) with a version matching sha256:* are not referenced by an index manifest", found)
+
+ log.Debug("Deleting unreferenced image versions from `package_version`, `package_file` and `package_property` (%d at a time)", SHA256BatchSize)
+
+ packageVersionIDs := make([]int64, 0, SHA256BatchSize)
+ tooYoung := 0
+ for _, p := range shaToPackageVersion {
+ if p.created < old {
+ packageVersionIDs = append(packageVersionIDs, p.id)
+ } else {
+ tooYoung++
+ }
+ }
+
+ if tooYoung > 0 {
+ log.Warn("%d out of %d container image(s) are not deleted because they were created less than %v ago", tooYoung, found, olderThan)
+ }
+
+ for len(packageVersionIDs) > 0 {
+ upper := min(len(packageVersionIDs), SHA256BatchSize)
+ versionIDs := packageVersionIDs[0:upper]
+
+ var packageFileIDs []int64
+ if err := db.GetEngine(ctx).Select("id").Table("package_file").In("version_id", versionIDs).Find(&packageFileIDs); err != nil {
+ return err
+ }
+ log.Info("Removing %d entries from `package_file` and `package_property`", len(packageFileIDs))
+ if _, err := db.GetEngine(ctx).In("id", packageFileIDs).Delete(&packages.PackageFile{}); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).In("ref_id", packageFileIDs).And("ref_type = ?", packages.PropertyTypeFile).Delete(&packages.PackageProperty{}); err != nil {
+ return err
+ }
+
+ log.Info("Removing %d entries from `package_version` and `package_property`", upper)
+ if _, err := db.GetEngine(ctx).In("id", versionIDs).Delete(&packages.PackageVersion{}); err != nil {
+ return err
+ }
+ if _, err := db.GetEngine(ctx).In("ref_id", versionIDs).And("ref_type = ?", packages.PropertyTypeVersion).Delete(&packages.PackageProperty{}); err != nil {
+ return err
+ }
+
+ packageVersionIDs = packageVersionIDs[upper:]
+ }
+
+ return committer.Commit()
+}
diff --git a/services/packages/container/common.go b/services/packages/container/common.go
new file mode 100644
index 0000000..5a14ed5
--- /dev/null
+++ b/services/packages/container/common.go
@@ -0,0 +1,35 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package container
+
+import (
+ "context"
+ "strings"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ user_model "code.gitea.io/gitea/models/user"
+ container_module "code.gitea.io/gitea/modules/packages/container"
+)
+
+// UpdateRepositoryNames updates the repository name property for all packages of the specific owner
+func UpdateRepositoryNames(ctx context.Context, owner *user_model.User, newOwnerName string) error {
+ ps, err := packages_model.GetPackagesByType(ctx, owner.ID, packages_model.TypeContainer)
+ if err != nil {
+ return err
+ }
+
+ newOwnerName = strings.ToLower(newOwnerName)
+
+ for _, p := range ps {
+ if err := packages_model.DeletePropertyByName(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository); err != nil {
+ return err
+ }
+
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository, newOwnerName+"/"+p.LowerName); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/services/packages/debian/repository.go b/services/packages/debian/repository.go
new file mode 100644
index 0000000..e400f1e
--- /dev/null
+++ b/services/packages/debian/repository.go
@@ -0,0 +1,413 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package debian
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ debian_model "code.gitea.io/gitea/models/packages/debian"
+ user_model "code.gitea.io/gitea/models/user"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ debian_module "code.gitea.io/gitea/modules/packages/debian"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/armor"
+ "github.com/ProtonMail/go-crypto/openpgp/clearsign"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+ "github.com/ulikunitz/xz"
+)
+
+// GetOrCreateRepositoryVersion gets or creates the internal repository package
+// The Debian registry needs multiple index files which are stored in this package.
+func GetOrCreateRepositoryVersion(ctx context.Context, ownerID int64) (*packages_model.PackageVersion, error) {
+ return packages_service.GetOrCreateInternalPackageVersion(ctx, ownerID, packages_model.TypeDebian, debian_module.RepositoryPackage, debian_module.RepositoryVersion)
+}
+
+// GetOrCreateKeyPair gets or creates the PGP keys used to sign repository files
+func GetOrCreateKeyPair(ctx context.Context, ownerID int64) (string, string, error) {
+ priv, err := user_model.GetSetting(ctx, ownerID, debian_module.SettingKeyPrivate)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ pub, err := user_model.GetSetting(ctx, ownerID, debian_module.SettingKeyPublic)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ if priv == "" || pub == "" {
+ priv, pub, err = generateKeypair()
+ if err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, debian_module.SettingKeyPrivate, priv); err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, debian_module.SettingKeyPublic, pub); err != nil {
+ return "", "", err
+ }
+ }
+
+ return priv, pub, nil
+}
+
+func generateKeypair() (string, string, error) {
+ e, err := openpgp.NewEntity("", "Debian Registry", "", nil)
+ if err != nil {
+ return "", "", err
+ }
+
+ var priv strings.Builder
+ var pub strings.Builder
+
+ w, err := armor.Encode(&priv, openpgp.PrivateKeyType, nil)
+ if err != nil {
+ return "", "", err
+ }
+ if err := e.SerializePrivate(w, nil); err != nil {
+ return "", "", err
+ }
+ w.Close()
+
+ w, err = armor.Encode(&pub, openpgp.PublicKeyType, nil)
+ if err != nil {
+ return "", "", err
+ }
+ if err := e.Serialize(w); err != nil {
+ return "", "", err
+ }
+ w.Close()
+
+ return priv.String(), pub.String(), nil
+}
+
+// BuildAllRepositoryFiles (re)builds all repository files for every available distributions, components and architectures
+func BuildAllRepositoryFiles(ctx context.Context, ownerID int64) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ // 1. Delete all existing repository files
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+
+ for _, pf := range pfs {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ // 2. (Re)Build repository files for existing packages
+ distributions, err := debian_model.GetDistributions(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ for _, distribution := range distributions {
+ components, err := debian_model.GetComponents(ctx, ownerID, distribution)
+ if err != nil {
+ return err
+ }
+ architectures, err := debian_model.GetArchitectures(ctx, ownerID, distribution)
+ if err != nil {
+ return err
+ }
+
+ for _, component := range components {
+ for _, architecture := range architectures {
+ if err := buildRepositoryFiles(ctx, ownerID, pv, distribution, component, architecture); err != nil {
+ return fmt.Errorf("failed to build repository files [%s/%s/%s]: %w", distribution, component, architecture, err)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// BuildSpecificRepositoryFiles builds index files for the repository
+func BuildSpecificRepositoryFiles(ctx context.Context, ownerID int64, distribution, component, architecture string) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ return buildRepositoryFiles(ctx, ownerID, pv, distribution, component, architecture)
+}
+
+func buildRepositoryFiles(ctx context.Context, ownerID int64, repoVersion *packages_model.PackageVersion, distribution, component, architecture string) error {
+ if err := buildPackagesIndices(ctx, ownerID, repoVersion, distribution, component, architecture); err != nil {
+ return err
+ }
+
+ return buildReleaseFiles(ctx, ownerID, repoVersion, distribution)
+}
+
+// https://wiki.debian.org/DebianRepository/Format#A.22Packages.22_Indices
+func buildPackagesIndices(ctx context.Context, ownerID int64, repoVersion *packages_model.PackageVersion, distribution, component, architecture string) error {
+ opts := &debian_model.PackageSearchOptions{
+ OwnerID: ownerID,
+ Distribution: distribution,
+ Component: component,
+ Architecture: architecture,
+ }
+
+ // Delete the package indices if there are no packages
+ if has, err := debian_model.ExistPackages(ctx, opts); err != nil {
+ return err
+ } else if !has {
+ key := fmt.Sprintf("%s|%s|%s", distribution, component, architecture)
+ for _, filename := range []string{"Packages", "Packages.gz", "Packages.xz"} {
+ pf, err := packages_model.GetFileForVersionByName(ctx, repoVersion.ID, filename, key)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return err
+ } else if pf == nil {
+ continue
+ }
+
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ packagesContent, _ := packages_module.NewHashedBuffer()
+ defer packagesContent.Close()
+
+ packagesGzipContent, _ := packages_module.NewHashedBuffer()
+ defer packagesGzipContent.Close()
+
+ gzw := gzip.NewWriter(packagesGzipContent)
+
+ packagesXzContent, _ := packages_module.NewHashedBuffer()
+ defer packagesXzContent.Close()
+
+ xzw, _ := xz.NewWriter(packagesXzContent)
+
+ w := io.MultiWriter(packagesContent, gzw, xzw)
+
+ addSeparator := false
+ if err := debian_model.SearchPackages(ctx, opts, func(pfd *packages_model.PackageFileDescriptor) {
+ if addSeparator {
+ fmt.Fprintln(w)
+ }
+ addSeparator = true
+
+ fmt.Fprintf(w, "%s\n", strings.TrimSpace(pfd.Properties.GetByName(debian_module.PropertyControl)))
+
+ fmt.Fprintf(w, "Filename: pool/%s/%s/%s\n", distribution, component, pfd.File.Name)
+ fmt.Fprintf(w, "Size: %d\n", pfd.Blob.Size)
+ fmt.Fprintf(w, "MD5sum: %s\n", pfd.Blob.HashMD5)
+ fmt.Fprintf(w, "SHA1: %s\n", pfd.Blob.HashSHA1)
+ fmt.Fprintf(w, "SHA256: %s\n", pfd.Blob.HashSHA256)
+ fmt.Fprintf(w, "SHA512: %s\n", pfd.Blob.HashSHA512)
+ }); err != nil {
+ return err
+ }
+
+ gzw.Close()
+ xzw.Close()
+
+ for _, file := range []struct {
+ Name string
+ Data packages_module.HashedSizeReader
+ }{
+ {"Packages", packagesContent},
+ {"Packages.gz", packagesGzipContent},
+ {"Packages.xz", packagesXzContent},
+ } {
+ _, err := packages_service.AddFileToPackageVersionInternal(
+ ctx,
+ repoVersion,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: file.Name,
+ CompositeKey: fmt.Sprintf("%s|%s|%s", distribution, component, architecture),
+ },
+ Creator: user_model.NewGhostUser(),
+ Data: file.Data,
+ IsLead: false,
+ OverwriteExisting: true,
+ Properties: map[string]string{
+ debian_module.PropertyRepositoryIncludeInRelease: "",
+ debian_module.PropertyDistribution: distribution,
+ debian_module.PropertyComponent: component,
+ debian_module.PropertyArchitecture: architecture,
+ },
+ },
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// https://wiki.debian.org/DebianRepository/Format#A.22Release.22_files
+func buildReleaseFiles(ctx context.Context, ownerID int64, repoVersion *packages_model.PackageVersion, distribution string) error {
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ VersionID: repoVersion.ID,
+ Properties: map[string]string{
+ debian_module.PropertyRepositoryIncludeInRelease: "",
+ debian_module.PropertyDistribution: distribution,
+ },
+ })
+ if err != nil {
+ return err
+ }
+
+ // Delete the release files if there are no packages
+ if len(pfs) == 0 {
+ for _, filename := range []string{"Release", "Release.gpg", "InRelease"} {
+ pf, err := packages_model.GetFileForVersionByName(ctx, repoVersion.ID, filename, distribution)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return err
+ } else if pf == nil {
+ continue
+ }
+
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ components, err := debian_model.GetComponents(ctx, ownerID, distribution)
+ if err != nil {
+ return err
+ }
+
+ sort.Strings(components)
+
+ architectures, err := debian_model.GetArchitectures(ctx, ownerID, distribution)
+ if err != nil {
+ return err
+ }
+
+ sort.Strings(architectures)
+
+ priv, _, err := GetOrCreateKeyPair(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ block, err := armor.Decode(strings.NewReader(priv))
+ if err != nil {
+ return err
+ }
+
+ e, err := openpgp.ReadEntity(packet.NewReader(block.Body))
+ if err != nil {
+ return err
+ }
+
+ inReleaseContent, _ := packages_module.NewHashedBuffer()
+ defer inReleaseContent.Close()
+
+ sw, err := clearsign.Encode(inReleaseContent, e.PrivateKey, nil)
+ if err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ w := io.MultiWriter(sw, &buf)
+
+ fmt.Fprintf(w, "Origin: %s\n", setting.AppName)
+ fmt.Fprintf(w, "Label: %s\n", setting.AppName)
+ fmt.Fprintf(w, "Suite: %s\n", distribution)
+ fmt.Fprintf(w, "Codename: %s\n", distribution)
+ fmt.Fprintf(w, "Components: %s\n", strings.Join(components, " "))
+ fmt.Fprintf(w, "Architectures: %s\n", strings.Join(architectures, " "))
+ fmt.Fprintf(w, "Date: %s\n", time.Now().UTC().Format(time.RFC1123))
+ fmt.Fprint(w, "Acquire-By-Hash: yes\n")
+
+ pfds, err := packages_model.GetPackageFileDescriptors(ctx, pfs)
+ if err != nil {
+ return err
+ }
+
+ var md5, sha1, sha256, sha512 strings.Builder
+ for _, pfd := range pfds {
+ path := fmt.Sprintf("%s/binary-%s/%s", pfd.Properties.GetByName(debian_module.PropertyComponent), pfd.Properties.GetByName(debian_module.PropertyArchitecture), pfd.File.Name)
+ fmt.Fprintf(&md5, " %s %d %s\n", pfd.Blob.HashMD5, pfd.Blob.Size, path)
+ fmt.Fprintf(&sha1, " %s %d %s\n", pfd.Blob.HashSHA1, pfd.Blob.Size, path)
+ fmt.Fprintf(&sha256, " %s %d %s\n", pfd.Blob.HashSHA256, pfd.Blob.Size, path)
+ fmt.Fprintf(&sha512, " %s %d %s\n", pfd.Blob.HashSHA512, pfd.Blob.Size, path)
+ }
+
+ fmt.Fprintln(w, "MD5Sum:")
+ fmt.Fprint(w, md5.String())
+ fmt.Fprintln(w, "SHA1:")
+ fmt.Fprint(w, sha1.String())
+ fmt.Fprintln(w, "SHA256:")
+ fmt.Fprint(w, sha256.String())
+ fmt.Fprintln(w, "SHA512:")
+ fmt.Fprint(w, sha512.String())
+
+ sw.Close()
+
+ releaseGpgContent, _ := packages_module.NewHashedBuffer()
+ defer releaseGpgContent.Close()
+
+ if err := openpgp.ArmoredDetachSign(releaseGpgContent, e, bytes.NewReader(buf.Bytes()), nil); err != nil {
+ return err
+ }
+
+ releaseContent, _ := packages_module.CreateHashedBufferFromReader(&buf)
+ defer releaseContent.Close()
+
+ for _, file := range []struct {
+ Name string
+ Data packages_module.HashedSizeReader
+ }{
+ {"Release", releaseContent},
+ {"Release.gpg", releaseGpgContent},
+ {"InRelease", inReleaseContent},
+ } {
+ _, err = packages_service.AddFileToPackageVersionInternal(
+ ctx,
+ repoVersion,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: file.Name,
+ CompositeKey: distribution,
+ },
+ Creator: user_model.NewGhostUser(),
+ Data: file.Data,
+ IsLead: false,
+ OverwriteExisting: true,
+ Properties: map[string]string{
+ debian_module.PropertyDistribution: distribution,
+ },
+ },
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/services/packages/packages.go b/services/packages/packages.go
new file mode 100644
index 0000000..a5b8450
--- /dev/null
+++ b/services/packages/packages.go
@@ -0,0 +1,665 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ packages_model "code.gitea.io/gitea/models/packages"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/optional"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ notify_service "code.gitea.io/gitea/services/notify"
+)
+
+var (
+ ErrQuotaTypeSize = errors.New("maximum allowed package type size exceeded")
+ ErrQuotaTotalSize = errors.New("maximum allowed package storage quota exceeded")
+ ErrQuotaTotalCount = errors.New("maximum allowed package count exceeded")
+)
+
+// PackageInfo describes a package
+type PackageInfo struct {
+ Owner *user_model.User
+ PackageType packages_model.Type
+ Name string
+ Version string
+}
+
+// PackageCreationInfo describes a package to create
+type PackageCreationInfo struct {
+ PackageInfo
+ SemverCompatible bool
+ Creator *user_model.User
+ Metadata any
+ PackageProperties map[string]string
+ VersionProperties map[string]string
+}
+
+// PackageFileInfo describes a package file
+type PackageFileInfo struct {
+ Filename string
+ CompositeKey string
+}
+
+// PackageFileCreationInfo describes a package file to create
+type PackageFileCreationInfo struct {
+ PackageFileInfo
+ Creator *user_model.User
+ Data packages_module.HashedSizeReader
+ IsLead bool
+ Properties map[string]string
+ OverwriteExisting bool
+}
+
+// CreatePackageAndAddFile creates a package with a file. If the same package exists already, ErrDuplicatePackageVersion is returned
+func CreatePackageAndAddFile(ctx context.Context, pvci *PackageCreationInfo, pfci *PackageFileCreationInfo) (*packages_model.PackageVersion, *packages_model.PackageFile, error) {
+ return createPackageAndAddFile(ctx, pvci, pfci, false)
+}
+
+// CreatePackageOrAddFileToExisting creates a package with a file or adds the file if the package exists already
+func CreatePackageOrAddFileToExisting(ctx context.Context, pvci *PackageCreationInfo, pfci *PackageFileCreationInfo) (*packages_model.PackageVersion, *packages_model.PackageFile, error) {
+ return createPackageAndAddFile(ctx, pvci, pfci, true)
+}
+
+func createPackageAndAddFile(ctx context.Context, pvci *PackageCreationInfo, pfci *PackageFileCreationInfo, allowDuplicate bool) (*packages_model.PackageVersion, *packages_model.PackageFile, error) {
+ dbCtx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer committer.Close()
+
+ pv, created, err := createPackageAndVersion(dbCtx, pvci, allowDuplicate)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pf, pb, blobCreated, err := addFileToPackageVersion(dbCtx, pv, &pvci.PackageInfo, pfci)
+ removeBlob := false
+ defer func() {
+ if blobCreated && removeBlob {
+ contentStore := packages_module.NewContentStore()
+ if err := contentStore.Delete(packages_module.BlobHash256Key(pb.HashSHA256)); err != nil {
+ log.Error("Error deleting package blob from content store: %v", err)
+ }
+ }
+ }()
+ if err != nil {
+ removeBlob = true
+ return nil, nil, err
+ }
+
+ if err := committer.Commit(); err != nil {
+ removeBlob = true
+ return nil, nil, err
+ }
+
+ if created {
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ notify_service.PackageCreate(ctx, pvci.Creator, pd)
+ }
+
+ return pv, pf, nil
+}
+
+func createPackageAndVersion(ctx context.Context, pvci *PackageCreationInfo, allowDuplicate bool) (*packages_model.PackageVersion, bool, error) {
+ log.Trace("Creating package: %v, %v, %v, %s, %s, %+v, %+v, %v", pvci.Creator.ID, pvci.Owner.ID, pvci.PackageType, pvci.Name, pvci.Version, pvci.PackageProperties, pvci.VersionProperties, allowDuplicate)
+
+ packageCreated := true
+ p := &packages_model.Package{
+ OwnerID: pvci.Owner.ID,
+ Type: pvci.PackageType,
+ Name: pvci.Name,
+ LowerName: strings.ToLower(pvci.Name),
+ SemverCompatible: pvci.SemverCompatible,
+ }
+ var err error
+ if p, err = packages_model.TryInsertPackage(ctx, p); err != nil {
+ if err == packages_model.ErrDuplicatePackage {
+ packageCreated = false
+ } else {
+ log.Error("Error inserting package: %v", err)
+ return nil, false, err
+ }
+ }
+
+ if packageCreated {
+ for name, value := range pvci.PackageProperties {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypePackage, p.ID, name, value); err != nil {
+ log.Error("Error setting package property: %v", err)
+ return nil, false, err
+ }
+ }
+ }
+
+ metadataJSON, err := json.Marshal(pvci.Metadata)
+ if err != nil {
+ return nil, false, err
+ }
+
+ versionCreated := true
+ pv := &packages_model.PackageVersion{
+ PackageID: p.ID,
+ CreatorID: pvci.Creator.ID,
+ Version: pvci.Version,
+ LowerVersion: strings.ToLower(pvci.Version),
+ MetadataJSON: string(metadataJSON),
+ }
+ if pv, err = packages_model.GetOrInsertVersion(ctx, pv); err != nil {
+ if err == packages_model.ErrDuplicatePackageVersion {
+ versionCreated = false
+ } else {
+ log.Error("Error inserting package: %v", err)
+ return nil, false, err
+ }
+
+ if !allowDuplicate {
+ // no need to log an error
+ return nil, false, err
+ }
+ }
+
+ if versionCreated {
+ if err := CheckCountQuotaExceeded(ctx, pvci.Creator, pvci.Owner); err != nil {
+ return nil, false, err
+ }
+
+ for name, value := range pvci.VersionProperties {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypeVersion, pv.ID, name, value); err != nil {
+ log.Error("Error setting package version property: %v", err)
+ return nil, false, err
+ }
+ }
+ }
+
+ return pv, versionCreated, nil
+}
+
+// AddFileToExistingPackage adds a file to an existing package. If the package does not exist, ErrPackageNotExist is returned
+func AddFileToExistingPackage(ctx context.Context, pvi *PackageInfo, pfci *PackageFileCreationInfo) (*packages_model.PackageFile, error) {
+ return addFileToPackageWrapper(ctx, func(ctx context.Context) (*packages_model.PackageFile, *packages_model.PackageBlob, bool, error) {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, pvi.Owner.ID, pvi.PackageType, pvi.Name, pvi.Version)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ return addFileToPackageVersion(ctx, pv, pvi, pfci)
+ })
+}
+
+// AddFileToPackageVersionInternal adds a file to the package
+// This method skips quota checks and should only be used for system-managed packages.
+func AddFileToPackageVersionInternal(ctx context.Context, pv *packages_model.PackageVersion, pfci *PackageFileCreationInfo) (*packages_model.PackageFile, error) {
+ return addFileToPackageWrapper(ctx, func(ctx context.Context) (*packages_model.PackageFile, *packages_model.PackageBlob, bool, error) {
+ return addFileToPackageVersionUnchecked(ctx, pv, pfci)
+ })
+}
+
+func addFileToPackageWrapper(ctx context.Context, fn func(ctx context.Context) (*packages_model.PackageFile, *packages_model.PackageBlob, bool, error)) (*packages_model.PackageFile, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ pf, pb, blobCreated, err := fn(ctx)
+ removeBlob := false
+ defer func() {
+ if removeBlob {
+ contentStore := packages_module.NewContentStore()
+ if err := contentStore.Delete(packages_module.BlobHash256Key(pb.HashSHA256)); err != nil {
+ log.Error("Error deleting package blob from content store: %v", err)
+ }
+ }
+ }()
+ if err != nil {
+ removeBlob = blobCreated
+ return nil, err
+ }
+
+ if err := committer.Commit(); err != nil {
+ removeBlob = blobCreated
+ return nil, err
+ }
+
+ return pf, nil
+}
+
+// NewPackageBlob creates a package blob instance
+func NewPackageBlob(hsr packages_module.HashedSizeReader) *packages_model.PackageBlob {
+ hashMD5, hashSHA1, hashSHA256, hashSHA512 := hsr.Sums()
+
+ return &packages_model.PackageBlob{
+ Size: hsr.Size(),
+ HashMD5: hex.EncodeToString(hashMD5),
+ HashSHA1: hex.EncodeToString(hashSHA1),
+ HashSHA256: hex.EncodeToString(hashSHA256),
+ HashSHA512: hex.EncodeToString(hashSHA512),
+ }
+}
+
+func addFileToPackageVersion(ctx context.Context, pv *packages_model.PackageVersion, pvi *PackageInfo, pfci *PackageFileCreationInfo) (*packages_model.PackageFile, *packages_model.PackageBlob, bool, error) {
+ if err := CheckSizeQuotaExceeded(ctx, pfci.Creator, pvi.Owner, pvi.PackageType, pfci.Data.Size()); err != nil {
+ return nil, nil, false, err
+ }
+
+ return addFileToPackageVersionUnchecked(ctx, pv, pfci)
+}
+
+func addFileToPackageVersionUnchecked(ctx context.Context, pv *packages_model.PackageVersion, pfci *PackageFileCreationInfo) (*packages_model.PackageFile, *packages_model.PackageBlob, bool, error) {
+ log.Trace("Adding package file: %v, %s", pv.ID, pfci.Filename)
+
+ pb, exists, err := packages_model.GetOrInsertBlob(ctx, NewPackageBlob(pfci.Data))
+ if err != nil {
+ log.Error("Error inserting package blob: %v", err)
+ return nil, nil, false, err
+ }
+ if !exists {
+ contentStore := packages_module.NewContentStore()
+ if err := contentStore.Save(packages_module.BlobHash256Key(pb.HashSHA256), pfci.Data, pfci.Data.Size()); err != nil {
+ log.Error("Error saving package blob in content store: %v", err)
+ return nil, nil, false, err
+ }
+ }
+
+ if pfci.OverwriteExisting {
+ pf, err := packages_model.GetFileForVersionByName(ctx, pv.ID, pfci.Filename, pfci.CompositeKey)
+ if err != nil && err != packages_model.ErrPackageFileNotExist {
+ return nil, pb, !exists, err
+ }
+ if pf != nil {
+ // Short circuit if blob is the same
+ if pf.BlobID == pb.ID {
+ return pf, pb, !exists, nil
+ }
+
+ if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypeFile, pf.ID); err != nil {
+ return nil, pb, !exists, err
+ }
+ if err := packages_model.DeleteFileByID(ctx, pf.ID); err != nil {
+ return nil, pb, !exists, err
+ }
+ }
+ }
+
+ pf := &packages_model.PackageFile{
+ VersionID: pv.ID,
+ BlobID: pb.ID,
+ Name: pfci.Filename,
+ LowerName: strings.ToLower(pfci.Filename),
+ CompositeKey: pfci.CompositeKey,
+ IsLead: pfci.IsLead,
+ }
+ if pf, err = packages_model.TryInsertFile(ctx, pf); err != nil {
+ if err != packages_model.ErrDuplicatePackageFile {
+ log.Error("Error inserting package file: %v", err)
+ }
+ return nil, pb, !exists, err
+ }
+
+ for name, value := range pfci.Properties {
+ if _, err := packages_model.InsertProperty(ctx, packages_model.PropertyTypeFile, pf.ID, name, value); err != nil {
+ log.Error("Error setting package file property: %v", err)
+ return pf, pb, !exists, err
+ }
+ }
+
+ return pf, pb, !exists, nil
+}
+
+// CheckCountQuotaExceeded checks if the owner has more than the allowed packages
+// The check is skipped if the doer is an admin.
+func CheckCountQuotaExceeded(ctx context.Context, doer, owner *user_model.User) error {
+ if doer.IsAdmin {
+ return nil
+ }
+
+ if setting.Packages.LimitTotalOwnerCount > -1 {
+ totalCount, err := packages_model.CountVersions(ctx, &packages_model.PackageSearchOptions{
+ OwnerID: owner.ID,
+ IsInternal: optional.Some(false),
+ })
+ if err != nil {
+ log.Error("CountVersions failed: %v", err)
+ return err
+ }
+ if totalCount > setting.Packages.LimitTotalOwnerCount {
+ return ErrQuotaTotalCount
+ }
+ }
+
+ return nil
+}
+
+// CheckSizeQuotaExceeded checks if the upload size is bigger than the allowed size
+// The check is skipped if the doer is an admin.
+func CheckSizeQuotaExceeded(ctx context.Context, doer, owner *user_model.User, packageType packages_model.Type, uploadSize int64) error {
+ if doer.IsAdmin {
+ return nil
+ }
+
+ var typeSpecificSize int64
+ switch packageType {
+ case packages_model.TypeAlpine:
+ typeSpecificSize = setting.Packages.LimitSizeAlpine
+ case packages_model.TypeArch:
+ typeSpecificSize = setting.Packages.LimitSizeArch
+ case packages_model.TypeCargo:
+ typeSpecificSize = setting.Packages.LimitSizeCargo
+ case packages_model.TypeChef:
+ typeSpecificSize = setting.Packages.LimitSizeChef
+ case packages_model.TypeComposer:
+ typeSpecificSize = setting.Packages.LimitSizeComposer
+ case packages_model.TypeConan:
+ typeSpecificSize = setting.Packages.LimitSizeConan
+ case packages_model.TypeConda:
+ typeSpecificSize = setting.Packages.LimitSizeConda
+ case packages_model.TypeContainer:
+ typeSpecificSize = setting.Packages.LimitSizeContainer
+ case packages_model.TypeCran:
+ typeSpecificSize = setting.Packages.LimitSizeCran
+ case packages_model.TypeDebian:
+ typeSpecificSize = setting.Packages.LimitSizeDebian
+ case packages_model.TypeGeneric:
+ typeSpecificSize = setting.Packages.LimitSizeGeneric
+ case packages_model.TypeGo:
+ typeSpecificSize = setting.Packages.LimitSizeGo
+ case packages_model.TypeHelm:
+ typeSpecificSize = setting.Packages.LimitSizeHelm
+ case packages_model.TypeMaven:
+ typeSpecificSize = setting.Packages.LimitSizeMaven
+ case packages_model.TypeNpm:
+ typeSpecificSize = setting.Packages.LimitSizeNpm
+ case packages_model.TypeNuGet:
+ typeSpecificSize = setting.Packages.LimitSizeNuGet
+ case packages_model.TypePub:
+ typeSpecificSize = setting.Packages.LimitSizePub
+ case packages_model.TypePyPI:
+ typeSpecificSize = setting.Packages.LimitSizePyPI
+ case packages_model.TypeRpm:
+ typeSpecificSize = setting.Packages.LimitSizeRpm
+ case packages_model.TypeRubyGems:
+ typeSpecificSize = setting.Packages.LimitSizeRubyGems
+ case packages_model.TypeSwift:
+ typeSpecificSize = setting.Packages.LimitSizeSwift
+ case packages_model.TypeVagrant:
+ typeSpecificSize = setting.Packages.LimitSizeVagrant
+ }
+ if typeSpecificSize > -1 && typeSpecificSize < uploadSize {
+ return ErrQuotaTypeSize
+ }
+
+ if setting.Packages.LimitTotalOwnerSize > -1 {
+ totalSize, err := packages_model.CalculateFileSize(ctx, &packages_model.PackageFileSearchOptions{
+ OwnerID: owner.ID,
+ })
+ if err != nil {
+ log.Error("CalculateFileSize failed: %v", err)
+ return err
+ }
+ if totalSize+uploadSize > setting.Packages.LimitTotalOwnerSize {
+ return ErrQuotaTotalSize
+ }
+ }
+
+ return nil
+}
+
+// GetOrCreateInternalPackageVersion gets or creates an internal package
+// Some package types need such internal packages for housekeeping.
+func GetOrCreateInternalPackageVersion(ctx context.Context, ownerID int64, packageType packages_model.Type, name, version string) (*packages_model.PackageVersion, error) {
+ var pv *packages_model.PackageVersion
+
+ return pv, db.WithTx(ctx, func(ctx context.Context) error {
+ p := &packages_model.Package{
+ OwnerID: ownerID,
+ Type: packageType,
+ Name: name,
+ LowerName: name,
+ IsInternal: true,
+ }
+ var err error
+ if p, err = packages_model.TryInsertPackage(ctx, p); err != nil {
+ if err != packages_model.ErrDuplicatePackage {
+ log.Error("Error inserting package: %v", err)
+ return err
+ }
+ }
+
+ pv = &packages_model.PackageVersion{
+ PackageID: p.ID,
+ CreatorID: ownerID,
+ Version: version,
+ LowerVersion: version,
+ IsInternal: true,
+ MetadataJSON: "null",
+ }
+ if pv, err = packages_model.GetOrInsertVersion(ctx, pv); err != nil {
+ if err != packages_model.ErrDuplicatePackageVersion {
+ log.Error("Error inserting package version: %v", err)
+ return err
+ }
+ }
+
+ return nil
+ })
+}
+
+// RemovePackageVersionByNameAndVersion deletes a package version and all associated files
+func RemovePackageVersionByNameAndVersion(ctx context.Context, doer *user_model.User, pvi *PackageInfo) error {
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, pvi.Owner.ID, pvi.PackageType, pvi.Name, pvi.Version)
+ if err != nil {
+ return err
+ }
+
+ return RemovePackageVersion(ctx, doer, pv)
+}
+
+// RemovePackageVersion deletes the package version and all associated files
+func RemovePackageVersion(ctx context.Context, doer *user_model.User, pv *packages_model.PackageVersion) error {
+ dbCtx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ pd, err := packages_model.GetPackageDescriptor(dbCtx, pv)
+ if err != nil {
+ return err
+ }
+
+ log.Trace("Deleting package: %v", pv.ID)
+
+ if err := DeletePackageVersionAndReferences(dbCtx, pv); err != nil {
+ return err
+ }
+
+ if err := committer.Commit(); err != nil {
+ return err
+ }
+
+ notify_service.PackageDelete(ctx, doer, pd)
+
+ return nil
+}
+
+// RemovePackageFileAndVersionIfUnreferenced deletes the package file and the version if there are no referenced files afterwards
+func RemovePackageFileAndVersionIfUnreferenced(ctx context.Context, doer *user_model.User, pf *packages_model.PackageFile) error {
+ var pd *packages_model.PackageDescriptor
+
+ if err := db.WithTx(ctx, func(ctx context.Context) error {
+ if err := DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+
+ has, err := packages_model.HasVersionFileReferences(ctx, pf.VersionID)
+ if err != nil {
+ return err
+ }
+ if !has {
+ pv, err := packages_model.GetVersionByID(ctx, pf.VersionID)
+ if err != nil {
+ return err
+ }
+
+ pd, err = packages_model.GetPackageDescriptor(ctx, pv)
+ if err != nil {
+ return err
+ }
+
+ if err := DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ if pd != nil {
+ notify_service.PackageDelete(ctx, doer, pd)
+ }
+
+ return nil
+}
+
+// DeletePackageVersionAndReferences deletes the package version and its properties and files
+func DeletePackageVersionAndReferences(ctx context.Context, pv *packages_model.PackageVersion) error {
+ if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypeVersion, pv.ID); err != nil {
+ return err
+ }
+
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+
+ for _, pf := range pfs {
+ if err := DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ return packages_model.DeleteVersionByID(ctx, pv.ID)
+}
+
+// DeletePackageFile deletes the package file and its properties
+func DeletePackageFile(ctx context.Context, pf *packages_model.PackageFile) error {
+ if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypeFile, pf.ID); err != nil {
+ return err
+ }
+ return packages_model.DeleteFileByID(ctx, pf.ID)
+}
+
+// GetFileStreamByPackageNameAndVersion returns the content of the specific package file
+func GetFileStreamByPackageNameAndVersion(ctx context.Context, pvi *PackageInfo, pfi *PackageFileInfo) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+ log.Trace("Getting package file stream: %v, %v, %s, %s, %s, %s", pvi.Owner.ID, pvi.PackageType, pvi.Name, pvi.Version, pfi.Filename, pfi.CompositeKey)
+
+ pv, err := packages_model.GetVersionByNameAndVersion(ctx, pvi.Owner.ID, pvi.PackageType, pvi.Name, pvi.Version)
+ if err != nil {
+ if err == packages_model.ErrPackageNotExist {
+ return nil, nil, nil, err
+ }
+ log.Error("Error getting package: %v", err)
+ return nil, nil, nil, err
+ }
+
+ return GetFileStreamByPackageVersion(ctx, pv, pfi)
+}
+
+// GetFileStreamByPackageVersion returns the content of the specific package file
+func GetFileStreamByPackageVersion(ctx context.Context, pv *packages_model.PackageVersion, pfi *PackageFileInfo) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+ pf, err := packages_model.GetFileForVersionByName(ctx, pv.ID, pfi.Filename, pfi.CompositeKey)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return GetPackageFileStream(ctx, pf)
+}
+
+// GetPackageFileStream returns the content of the specific package file
+func GetPackageFileStream(ctx context.Context, pf *packages_model.PackageFile) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+ pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ return GetPackageBlobStream(ctx, pf, pb)
+}
+
+// GetPackageBlobStream returns the content of the specific package blob
+// If the storage supports direct serving and it's enabled, only the direct serving url is returned.
+func GetPackageBlobStream(ctx context.Context, pf *packages_model.PackageFile, pb *packages_model.PackageBlob) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+ key := packages_module.BlobHash256Key(pb.HashSHA256)
+
+ cs := packages_module.NewContentStore()
+
+ var s io.ReadSeekCloser
+ var u *url.URL
+ var err error
+
+ if cs.ShouldServeDirect() {
+ u, err = cs.GetServeDirectURL(key, pf.Name)
+ if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
+ log.Error("Error getting serve direct url: %v", err)
+ }
+ }
+ if u == nil {
+ s, err = cs.Get(key)
+ }
+
+ if err == nil {
+ if pf.IsLead {
+ if err := packages_model.IncrementDownloadCounter(ctx, pf.VersionID); err != nil {
+ log.Error("Error incrementing download counter: %v", err)
+ }
+ }
+ }
+ return s, u, pf, err
+}
+
+// RemoveAllPackages for User
+func RemoveAllPackages(ctx context.Context, userID int64) (int, error) {
+ count := 0
+ for {
+ pkgVersions, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ Paginator: &db.ListOptions{
+ PageSize: repo_model.RepositoryListDefaultPageSize,
+ Page: 1,
+ },
+ OwnerID: userID,
+ IsInternal: optional.None[bool](),
+ })
+ if err != nil {
+ return count, fmt.Errorf("GetOwnedPackages[%d]: %w", userID, err)
+ }
+ if len(pkgVersions) == 0 {
+ break
+ }
+ for _, pv := range pkgVersions {
+ if err := DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ return count, fmt.Errorf("unable to delete package %d:%s[%d]. Error: %w", pv.PackageID, pv.Version, pv.ID, err)
+ }
+ count++
+ }
+ }
+ return count, nil
+}
diff --git a/services/packages/rpm/repository.go b/services/packages/rpm/repository.go
new file mode 100644
index 0000000..2cea042
--- /dev/null
+++ b/services/packages/rpm/repository.go
@@ -0,0 +1,674 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package rpm
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ packages_model "code.gitea.io/gitea/models/packages"
+ rpm_model "code.gitea.io/gitea/models/packages/rpm"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ rpm_module "code.gitea.io/gitea/modules/packages/rpm"
+ "code.gitea.io/gitea/modules/util"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/armor"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+ "github.com/sassoftware/go-rpmutils"
+)
+
+// GetOrCreateRepositoryVersion gets or creates the internal repository package
+// The RPM registry needs multiple metadata files which are stored in this package.
+func GetOrCreateRepositoryVersion(ctx context.Context, ownerID int64) (*packages_model.PackageVersion, error) {
+ return packages_service.GetOrCreateInternalPackageVersion(ctx, ownerID, packages_model.TypeRpm, rpm_module.RepositoryPackage, rpm_module.RepositoryVersion)
+}
+
+// GetOrCreateKeyPair gets or creates the PGP keys used to sign repository metadata files
+func GetOrCreateKeyPair(ctx context.Context, ownerID int64) (string, string, error) {
+ priv, err := user_model.GetSetting(ctx, ownerID, rpm_module.SettingKeyPrivate)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ pub, err := user_model.GetSetting(ctx, ownerID, rpm_module.SettingKeyPublic)
+ if err != nil && !errors.Is(err, util.ErrNotExist) {
+ return "", "", err
+ }
+
+ if priv == "" || pub == "" {
+ priv, pub, err = generateKeypair()
+ if err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, rpm_module.SettingKeyPrivate, priv); err != nil {
+ return "", "", err
+ }
+
+ if err := user_model.SetUserSetting(ctx, ownerID, rpm_module.SettingKeyPublic, pub); err != nil {
+ return "", "", err
+ }
+ }
+
+ return priv, pub, nil
+}
+
+func generateKeypair() (string, string, error) {
+ e, err := openpgp.NewEntity("", "RPM Registry", "", nil)
+ if err != nil {
+ return "", "", err
+ }
+
+ var priv strings.Builder
+ var pub strings.Builder
+
+ w, err := armor.Encode(&priv, openpgp.PrivateKeyType, nil)
+ if err != nil {
+ return "", "", err
+ }
+ if err := e.SerializePrivate(w, nil); err != nil {
+ return "", "", err
+ }
+ w.Close()
+
+ w, err = armor.Encode(&pub, openpgp.PublicKeyType, nil)
+ if err != nil {
+ return "", "", err
+ }
+ if err := e.Serialize(w); err != nil {
+ return "", "", err
+ }
+ w.Close()
+
+ return priv.String(), pub.String(), nil
+}
+
+// BuildAllRepositoryFiles (re)builds all repository files for every available group
+func BuildAllRepositoryFiles(ctx context.Context, ownerID int64) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ // 1. Delete all existing repository files
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+
+ for _, pf := range pfs {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ // 2. (Re)Build repository files for existing packages
+ groups, err := rpm_model.GetGroups(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ for _, group := range groups {
+ if err := BuildSpecificRepositoryFiles(ctx, ownerID, group); err != nil {
+ return fmt.Errorf("failed to build repository files [%s]: %w", group, err)
+ }
+ }
+
+ return nil
+}
+
+type repoChecksum struct {
+ Value string `xml:",chardata"`
+ Type string `xml:"type,attr"`
+}
+
+type repoLocation struct {
+ Href string `xml:"href,attr"`
+}
+
+type repoData struct {
+ Type string `xml:"type,attr"`
+ Checksum repoChecksum `xml:"checksum"`
+ OpenChecksum repoChecksum `xml:"open-checksum"`
+ Location repoLocation `xml:"location"`
+ Timestamp int64 `xml:"timestamp"`
+ Size int64 `xml:"size"`
+ OpenSize int64 `xml:"open-size"`
+}
+
+type packageData struct {
+ Package *packages_model.Package
+ Version *packages_model.PackageVersion
+ Blob *packages_model.PackageBlob
+ VersionMetadata *rpm_module.VersionMetadata
+ FileMetadata *rpm_module.FileMetadata
+}
+
+type packageCache = map[*packages_model.PackageFile]*packageData
+
+// BuildSpecificRepositoryFiles builds metadata files for the repository
+func BuildSpecificRepositoryFiles(ctx context.Context, ownerID int64, group string) error {
+ pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
+ OwnerID: ownerID,
+ PackageType: packages_model.TypeRpm,
+ Query: "%.rpm",
+ CompositeKey: group,
+ })
+ if err != nil {
+ return err
+ }
+
+ // Delete the repository files if there are no packages
+ if len(pfs) == 0 {
+ pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ return err
+ }
+ for _, pf := range pfs {
+ if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ // Cache data needed for all repository files
+ cache := make(packageCache)
+ for _, pf := range pfs {
+ pv, err := packages_model.GetVersionByID(ctx, pf.VersionID)
+ if err != nil {
+ return err
+ }
+ p, err := packages_model.GetPackageByID(ctx, pv.PackageID)
+ if err != nil {
+ return err
+ }
+ pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
+ if err != nil {
+ return err
+ }
+ pps, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeFile, pf.ID, rpm_module.PropertyMetadata)
+ if err != nil {
+ return err
+ }
+
+ pd := &packageData{
+ Package: p,
+ Version: pv,
+ Blob: pb,
+ }
+
+ if err := json.Unmarshal([]byte(pv.MetadataJSON), &pd.VersionMetadata); err != nil {
+ return err
+ }
+ if len(pps) > 0 {
+ if err := json.Unmarshal([]byte(pps[0].Value), &pd.FileMetadata); err != nil {
+ return err
+ }
+ }
+
+ cache[pf] = pd
+ }
+
+ primary, err := buildPrimary(ctx, pv, pfs, cache, group)
+ if err != nil {
+ return err
+ }
+ filelists, err := buildFilelists(ctx, pv, pfs, cache, group)
+ if err != nil {
+ return err
+ }
+ other, err := buildOther(ctx, pv, pfs, cache, group)
+ if err != nil {
+ return err
+ }
+
+ return buildRepomd(
+ ctx,
+ pv,
+ ownerID,
+ []*repoData{
+ primary,
+ filelists,
+ other,
+ },
+ group,
+ )
+}
+
+// https://docs.pulpproject.org/en/2.19/plugins/pulp_rpm/tech-reference/rpm.html#repomd-xml
+func buildRepomd(ctx context.Context, pv *packages_model.PackageVersion, ownerID int64, data []*repoData, group string) error {
+ type Repomd struct {
+ XMLName xml.Name `xml:"repomd"`
+ Xmlns string `xml:"xmlns,attr"`
+ XmlnsRpm string `xml:"xmlns:rpm,attr"`
+ Data []*repoData `xml:"data"`
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString(xml.Header)
+ if err := xml.NewEncoder(&buf).Encode(&Repomd{
+ Xmlns: "http://linux.duke.edu/metadata/repo",
+ XmlnsRpm: "http://linux.duke.edu/metadata/rpm",
+ Data: data,
+ }); err != nil {
+ return err
+ }
+
+ priv, _, err := GetOrCreateKeyPair(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+
+ block, err := armor.Decode(strings.NewReader(priv))
+ if err != nil {
+ return err
+ }
+
+ e, err := openpgp.ReadEntity(packet.NewReader(block.Body))
+ if err != nil {
+ return err
+ }
+
+ repomdAscContent, _ := packages_module.NewHashedBuffer()
+ defer repomdAscContent.Close()
+
+ if err := openpgp.ArmoredDetachSign(repomdAscContent, e, bytes.NewReader(buf.Bytes()), nil); err != nil {
+ return err
+ }
+
+ repomdContent, _ := packages_module.CreateHashedBufferFromReader(&buf)
+ defer repomdContent.Close()
+
+ for _, file := range []struct {
+ Name string
+ Data packages_module.HashedSizeReader
+ }{
+ {"repomd.xml", repomdContent},
+ {"repomd.xml.asc", repomdAscContent},
+ } {
+ _, err = packages_service.AddFileToPackageVersionInternal(
+ ctx,
+ pv,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: file.Name,
+ CompositeKey: group,
+ },
+ Creator: user_model.NewGhostUser(),
+ Data: file.Data,
+ IsLead: false,
+ OverwriteExisting: true,
+ },
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// https://docs.pulpproject.org/en/2.19/plugins/pulp_rpm/tech-reference/rpm.html#primary-xml
+func buildPrimary(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (*repoData, error) {
+ type Version struct {
+ Epoch string `xml:"epoch,attr"`
+ Version string `xml:"ver,attr"`
+ Release string `xml:"rel,attr"`
+ }
+
+ type Checksum struct {
+ Checksum string `xml:",chardata"`
+ Type string `xml:"type,attr"`
+ Pkgid string `xml:"pkgid,attr"`
+ }
+
+ type Times struct {
+ File uint64 `xml:"file,attr"`
+ Build uint64 `xml:"build,attr"`
+ }
+
+ type Sizes struct {
+ Package int64 `xml:"package,attr"`
+ Installed uint64 `xml:"installed,attr"`
+ Archive uint64 `xml:"archive,attr"`
+ }
+
+ type Location struct {
+ Href string `xml:"href,attr"`
+ }
+
+ type EntryList struct {
+ Entries []*rpm_module.Entry `xml:"rpm:entry"`
+ }
+
+ type Format struct {
+ License string `xml:"rpm:license"`
+ Vendor string `xml:"rpm:vendor"`
+ Group string `xml:"rpm:group"`
+ Buildhost string `xml:"rpm:buildhost"`
+ Sourcerpm string `xml:"rpm:sourcerpm"`
+ Provides EntryList `xml:"rpm:provides"`
+ Requires EntryList `xml:"rpm:requires"`
+ Conflicts EntryList `xml:"rpm:conflicts"`
+ Obsoletes EntryList `xml:"rpm:obsoletes"`
+ Files []*rpm_module.File `xml:"file"`
+ }
+
+ type Package struct {
+ XMLName xml.Name `xml:"package"`
+ Type string `xml:"type,attr"`
+ Name string `xml:"name"`
+ Architecture string `xml:"arch"`
+ Version Version `xml:"version"`
+ Checksum Checksum `xml:"checksum"`
+ Summary string `xml:"summary"`
+ Description string `xml:"description"`
+ Packager string `xml:"packager"`
+ URL string `xml:"url"`
+ Time Times `xml:"time"`
+ Size Sizes `xml:"size"`
+ Location Location `xml:"location"`
+ Format Format `xml:"format"`
+ }
+
+ type Metadata struct {
+ XMLName xml.Name `xml:"metadata"`
+ Xmlns string `xml:"xmlns,attr"`
+ XmlnsRpm string `xml:"xmlns:rpm,attr"`
+ PackageCount int `xml:"packages,attr"`
+ Packages []*Package `xml:"package"`
+ }
+
+ packages := make([]*Package, 0, len(pfs))
+ for _, pf := range pfs {
+ pd := c[pf]
+
+ files := make([]*rpm_module.File, 0, 3)
+ for _, f := range pd.FileMetadata.Files {
+ if f.IsExecutable {
+ files = append(files, f)
+ }
+ }
+ packageVersion := fmt.Sprintf("%s-%s", pd.FileMetadata.Version, pd.FileMetadata.Release)
+ packages = append(packages, &Package{
+ Type: "rpm",
+ Name: pd.Package.Name,
+ Architecture: pd.FileMetadata.Architecture,
+ Version: Version{
+ Epoch: pd.FileMetadata.Epoch,
+ Version: pd.FileMetadata.Version,
+ Release: pd.FileMetadata.Release,
+ },
+ Checksum: Checksum{
+ Type: "sha256",
+ Checksum: pd.Blob.HashSHA256,
+ Pkgid: "YES",
+ },
+ Summary: pd.VersionMetadata.Summary,
+ Description: pd.VersionMetadata.Description,
+ Packager: pd.FileMetadata.Packager,
+ URL: pd.VersionMetadata.ProjectURL,
+ Time: Times{
+ File: pd.FileMetadata.FileTime,
+ Build: pd.FileMetadata.BuildTime,
+ },
+ Size: Sizes{
+ Package: pd.Blob.Size,
+ Installed: pd.FileMetadata.InstalledSize,
+ Archive: pd.FileMetadata.ArchiveSize,
+ },
+ Location: Location{
+ Href: fmt.Sprintf("package/%s/%s/%s/%s-%s.%s.rpm", pd.Package.Name, packageVersion, pd.FileMetadata.Architecture, pd.Package.Name, packageVersion, pd.FileMetadata.Architecture),
+ },
+ Format: Format{
+ License: pd.VersionMetadata.License,
+ Vendor: pd.FileMetadata.Vendor,
+ Group: pd.FileMetadata.Group,
+ Buildhost: pd.FileMetadata.BuildHost,
+ Sourcerpm: pd.FileMetadata.SourceRpm,
+ Provides: EntryList{
+ Entries: pd.FileMetadata.Provides,
+ },
+ Requires: EntryList{
+ Entries: pd.FileMetadata.Requires,
+ },
+ Conflicts: EntryList{
+ Entries: pd.FileMetadata.Conflicts,
+ },
+ Obsoletes: EntryList{
+ Entries: pd.FileMetadata.Obsoletes,
+ },
+ Files: files,
+ },
+ })
+ }
+
+ return addDataAsFileToRepo(ctx, pv, "primary", &Metadata{
+ Xmlns: "http://linux.duke.edu/metadata/common",
+ XmlnsRpm: "http://linux.duke.edu/metadata/rpm",
+ PackageCount: len(pfs),
+ Packages: packages,
+ }, group)
+}
+
+// https://docs.pulpproject.org/en/2.19/plugins/pulp_rpm/tech-reference/rpm.html#filelists-xml
+func buildFilelists(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (*repoData, error) { //nolint:dupl
+ type Version struct {
+ Epoch string `xml:"epoch,attr"`
+ Version string `xml:"ver,attr"`
+ Release string `xml:"rel,attr"`
+ }
+
+ type Package struct {
+ Pkgid string `xml:"pkgid,attr"`
+ Name string `xml:"name,attr"`
+ Architecture string `xml:"arch,attr"`
+ Version Version `xml:"version"`
+ Files []*rpm_module.File `xml:"file"`
+ }
+
+ type Filelists struct {
+ XMLName xml.Name `xml:"filelists"`
+ Xmlns string `xml:"xmlns,attr"`
+ PackageCount int `xml:"packages,attr"`
+ Packages []*Package `xml:"package"`
+ }
+
+ packages := make([]*Package, 0, len(pfs))
+ for _, pf := range pfs {
+ pd := c[pf]
+
+ packages = append(packages, &Package{
+ Pkgid: pd.Blob.HashSHA256,
+ Name: pd.Package.Name,
+ Architecture: pd.FileMetadata.Architecture,
+ Version: Version{
+ Epoch: pd.FileMetadata.Epoch,
+ Version: pd.FileMetadata.Version,
+ Release: pd.FileMetadata.Release,
+ },
+ Files: pd.FileMetadata.Files,
+ })
+ }
+
+ return addDataAsFileToRepo(ctx, pv, "filelists", &Filelists{
+ Xmlns: "http://linux.duke.edu/metadata/other",
+ PackageCount: len(pfs),
+ Packages: packages,
+ }, group)
+}
+
+// https://docs.pulpproject.org/en/2.19/plugins/pulp_rpm/tech-reference/rpm.html#other-xml
+func buildOther(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (*repoData, error) { //nolint:dupl
+ type Version struct {
+ Epoch string `xml:"epoch,attr"`
+ Version string `xml:"ver,attr"`
+ Release string `xml:"rel,attr"`
+ }
+
+ type Package struct {
+ Pkgid string `xml:"pkgid,attr"`
+ Name string `xml:"name,attr"`
+ Architecture string `xml:"arch,attr"`
+ Version Version `xml:"version"`
+ Changelogs []*rpm_module.Changelog `xml:"changelog"`
+ }
+
+ type Otherdata struct {
+ XMLName xml.Name `xml:"otherdata"`
+ Xmlns string `xml:"xmlns,attr"`
+ PackageCount int `xml:"packages,attr"`
+ Packages []*Package `xml:"package"`
+ }
+
+ packages := make([]*Package, 0, len(pfs))
+ for _, pf := range pfs {
+ pd := c[pf]
+
+ packages = append(packages, &Package{
+ Pkgid: pd.Blob.HashSHA256,
+ Name: pd.Package.Name,
+ Architecture: pd.FileMetadata.Architecture,
+ Version: Version{
+ Epoch: pd.FileMetadata.Epoch,
+ Version: pd.FileMetadata.Version,
+ Release: pd.FileMetadata.Release,
+ },
+ Changelogs: pd.FileMetadata.Changelogs,
+ })
+ }
+
+ return addDataAsFileToRepo(ctx, pv, "other", &Otherdata{
+ Xmlns: "http://linux.duke.edu/metadata/other",
+ PackageCount: len(pfs),
+ Packages: packages,
+ }, group)
+}
+
+// writtenCounter counts all written bytes
+type writtenCounter struct {
+ written int64
+}
+
+func (wc *writtenCounter) Write(buf []byte) (int, error) {
+ n := len(buf)
+
+ wc.written += int64(n)
+
+ return n, nil
+}
+
+func (wc *writtenCounter) Written() int64 {
+ return wc.written
+}
+
+func addDataAsFileToRepo(ctx context.Context, pv *packages_model.PackageVersion, filetype string, obj any, group string) (*repoData, error) {
+ content, _ := packages_module.NewHashedBuffer()
+ defer content.Close()
+
+ gzw := gzip.NewWriter(content)
+ wc := &writtenCounter{}
+ h := sha256.New()
+
+ w := io.MultiWriter(gzw, wc, h)
+ _, _ = w.Write([]byte(xml.Header))
+
+ if err := xml.NewEncoder(w).Encode(obj); err != nil {
+ return nil, err
+ }
+
+ if err := gzw.Close(); err != nil {
+ return nil, err
+ }
+
+ filename := filetype + ".xml.gz"
+
+ _, err := packages_service.AddFileToPackageVersionInternal(
+ ctx,
+ pv,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: filename,
+ CompositeKey: group,
+ },
+ Creator: user_model.NewGhostUser(),
+ Data: content,
+ IsLead: false,
+ OverwriteExisting: true,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ _, _, hashSHA256, _ := content.Sums()
+
+ return &repoData{
+ Type: filetype,
+ Checksum: repoChecksum{
+ Type: "sha256",
+ Value: hex.EncodeToString(hashSHA256),
+ },
+ OpenChecksum: repoChecksum{
+ Type: "sha256",
+ Value: hex.EncodeToString(h.Sum(nil)),
+ },
+ Location: repoLocation{
+ Href: "repodata/" + filename,
+ },
+ Timestamp: time.Now().Unix(),
+ Size: content.Size(),
+ OpenSize: wc.Written(),
+ }, nil
+}
+
+func NewSignedRPMBuffer(rpm *packages_module.HashedBuffer, privateKey string) (*packages_module.HashedBuffer, error) {
+ keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewReader([]byte(privateKey)))
+ if err != nil {
+ // failed to parse key
+ return nil, err
+ }
+ entity := keyring[0]
+ h, err := rpmutils.SignRpmStream(rpm, entity.PrivateKey, nil)
+ if err != nil {
+ // error signing rpm
+ return nil, err
+ }
+ signBlob, err := h.DumpSignatureHeader(false)
+ if err != nil {
+ // error writing sig header
+ return nil, err
+ }
+ if len(signBlob)%8 != 0 {
+ log.Info("incorrect padding: got %d bytes, expected a multiple of 8", len(signBlob))
+ return nil, err
+ }
+
+ // move fp to sign end
+ if _, err := rpm.Seek(int64(h.OriginalSignatureHeaderSize()), io.SeekStart); err != nil {
+ return nil, err
+ }
+ // create signed rpm buf
+ return packages_module.CreateHashedBufferFromReader(io.MultiReader(bytes.NewReader(signBlob), rpm))
+}