summaryrefslogtreecommitdiffstats
path: root/modules/storage
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
commitdd136858f1ea40ad3c94191d647487fa4f31926c (patch)
tree58fec94a7b2a12510c9664b21793f1ed560c6518 /modules/storage
parentInitial commit. (diff)
downloadforgejo-upstream/9.0.0.tar.xz
forgejo-upstream/9.0.0.zip
Adding upstream version 9.0.0.HEADupstream/9.0.0upstreamdebian
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to 'modules/storage')
-rw-r--r--modules/storage/helper.go39
-rw-r--r--modules/storage/helper_test.go51
-rw-r--r--modules/storage/local.go154
-rw-r--r--modules/storage/local_test.go61
-rw-r--r--modules/storage/minio.go310
-rw-r--r--modules/storage/minio_test.go216
-rw-r--r--modules/storage/storage.go226
-rw-r--r--modules/storage/storage_test.go52
-rw-r--r--modules/storage/testdata/aws_credentials3
-rw-r--r--modules/storage/testdata/minio.json12
10 files changed, 1124 insertions, 0 deletions
diff --git a/modules/storage/helper.go b/modules/storage/helper.go
new file mode 100644
index 0000000..95f1c7b
--- /dev/null
+++ b/modules/storage/helper.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+)
+
+var UninitializedStorage = DiscardStorage("uninitialized storage")
+
+type DiscardStorage string
+
+func (s DiscardStorage) Open(_ string) (Object, error) {
+ return nil, fmt.Errorf("%s", s)
+}
+
+func (s DiscardStorage) Save(_ string, _ io.Reader, _ int64) (int64, error) {
+ return 0, fmt.Errorf("%s", s)
+}
+
+func (s DiscardStorage) Stat(_ string) (os.FileInfo, error) {
+ return nil, fmt.Errorf("%s", s)
+}
+
+func (s DiscardStorage) Delete(_ string) error {
+ return fmt.Errorf("%s", s)
+}
+
+func (s DiscardStorage) URL(_, _ string) (*url.URL, error) {
+ return nil, fmt.Errorf("%s", s)
+}
+
+func (s DiscardStorage) IterateObjects(_ string, _ func(string, Object) error) error {
+ return fmt.Errorf("%s", s)
+}
diff --git a/modules/storage/helper_test.go b/modules/storage/helper_test.go
new file mode 100644
index 0000000..60a7c61
--- /dev/null
+++ b/modules/storage/helper_test.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_discardStorage(t *testing.T) {
+ tests := []DiscardStorage{
+ UninitializedStorage,
+ DiscardStorage("empty"),
+ }
+ for _, tt := range tests {
+ t.Run(string(tt), func(t *testing.T) {
+ {
+ got, err := tt.Open("path")
+ assert.Nil(t, got)
+ require.Error(t, err, string(tt))
+ }
+ {
+ got, err := tt.Save("path", bytes.NewReader([]byte{0}), 1)
+ assert.Equal(t, int64(0), got)
+ require.Error(t, err, string(tt))
+ }
+ {
+ got, err := tt.Stat("path")
+ assert.Nil(t, got)
+ require.Error(t, err, string(tt))
+ }
+ {
+ err := tt.Delete("path")
+ require.Error(t, err, string(tt))
+ }
+ {
+ got, err := tt.URL("path", "name")
+ assert.Nil(t, got)
+ require.Errorf(t, err, string(tt))
+ }
+ {
+ err := tt.IterateObjects("", func(_ string, _ Object) error { return nil })
+ require.Error(t, err, string(tt))
+ }
+ })
+ }
+}
diff --git a/modules/storage/local.go b/modules/storage/local.go
new file mode 100644
index 0000000..9bb532f
--- /dev/null
+++ b/modules/storage/local.go
@@ -0,0 +1,154 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+var _ ObjectStorage = &LocalStorage{}
+
+// LocalStorage represents a local files storage
+type LocalStorage struct {
+ ctx context.Context
+ dir string
+ tmpdir string
+}
+
+// NewLocalStorage returns a local files
+func NewLocalStorage(ctx context.Context, config *setting.Storage) (ObjectStorage, error) {
+ if !filepath.IsAbs(config.Path) {
+ return nil, fmt.Errorf("LocalStorageConfig.Path should have been prepared by setting/storage.go and should be an absolute path, but not: %q", config.Path)
+ }
+ log.Info("Creating new Local Storage at %s", config.Path)
+ if err := os.MkdirAll(config.Path, os.ModePerm); err != nil {
+ return nil, err
+ }
+
+ if config.TemporaryPath == "" {
+ config.TemporaryPath = filepath.Join(config.Path, "tmp")
+ }
+ if !filepath.IsAbs(config.TemporaryPath) {
+ return nil, fmt.Errorf("LocalStorageConfig.TemporaryPath should be an absolute path, but not: %q", config.TemporaryPath)
+ }
+
+ return &LocalStorage{
+ ctx: ctx,
+ dir: config.Path,
+ tmpdir: config.TemporaryPath,
+ }, nil
+}
+
+func (l *LocalStorage) buildLocalPath(p string) string {
+ return util.FilePathJoinAbs(l.dir, p)
+}
+
+// Open a file
+func (l *LocalStorage) Open(path string) (Object, error) {
+ return os.Open(l.buildLocalPath(path))
+}
+
+// Save a file
+func (l *LocalStorage) Save(path string, r io.Reader, size int64) (int64, error) {
+ p := l.buildLocalPath(path)
+ if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil {
+ return 0, err
+ }
+
+ // Create a temporary file to save to
+ if err := os.MkdirAll(l.tmpdir, os.ModePerm); err != nil {
+ return 0, err
+ }
+ tmp, err := os.CreateTemp(l.tmpdir, "upload-*")
+ if err != nil {
+ return 0, err
+ }
+ tmpRemoved := false
+ defer func() {
+ if !tmpRemoved {
+ _ = util.Remove(tmp.Name())
+ }
+ }()
+
+ n, err := io.Copy(tmp, r)
+ if err != nil {
+ return 0, err
+ }
+
+ if err := tmp.Close(); err != nil {
+ return 0, err
+ }
+
+ if err := util.Rename(tmp.Name(), p); err != nil {
+ return 0, err
+ }
+ // Golang's tmp file (os.CreateTemp) always have 0o600 mode, so we need to change the file to follow the umask (as what Create/MkDir does)
+ // but we don't want to make these files executable - so ensure that we mask out the executable bits
+ if err := util.ApplyUmask(p, os.ModePerm&0o666); err != nil {
+ return 0, err
+ }
+
+ tmpRemoved = true
+
+ return n, nil
+}
+
+// Stat returns the info of the file
+func (l *LocalStorage) Stat(path string) (os.FileInfo, error) {
+ return os.Stat(l.buildLocalPath(path))
+}
+
+// Delete delete a file
+func (l *LocalStorage) Delete(path string) error {
+ return util.Remove(l.buildLocalPath(path))
+}
+
+// URL gets the redirect URL to a file
+func (l *LocalStorage) URL(path, name string) (*url.URL, error) {
+ return nil, ErrURLNotSupported
+}
+
+// IterateObjects iterates across the objects in the local storage
+func (l *LocalStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
+ dir := l.buildLocalPath(dirName)
+ return filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ select {
+ case <-l.ctx.Done():
+ return l.ctx.Err()
+ default:
+ }
+ if path == l.dir {
+ return nil
+ }
+ if d.IsDir() {
+ return nil
+ }
+ relPath, err := filepath.Rel(l.dir, path)
+ if err != nil {
+ return err
+ }
+ obj, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer obj.Close()
+ return fn(relPath, obj)
+ })
+}
+
+func init() {
+ RegisterStorageType(setting.LocalStorageType, NewLocalStorage)
+}
diff --git a/modules/storage/local_test.go b/modules/storage/local_test.go
new file mode 100644
index 0000000..e230323
--- /dev/null
+++ b/modules/storage/local_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBuildLocalPath(t *testing.T) {
+ kases := []struct {
+ localDir string
+ path string
+ expected string
+ }{
+ {
+ "/a",
+ "0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ "/a/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ },
+ {
+ "/a",
+ "../0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ "/a/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ },
+ {
+ "/a",
+ "0\\a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ "/a/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ },
+ {
+ "/b",
+ "a/../0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ "/b/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ },
+ {
+ "/b",
+ "a\\..\\0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ "/b/0/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14",
+ },
+ }
+
+ for _, k := range kases {
+ t.Run(k.path, func(t *testing.T) {
+ l := LocalStorage{dir: k.localDir}
+
+ assert.EqualValues(t, k.expected, l.buildLocalPath(k.path))
+ })
+ }
+}
+
+func TestLocalStorageIterator(t *testing.T) {
+ dir := filepath.Join(os.TempDir(), "TestLocalStorageIteratorTestDir")
+ testStorageIterator(t, setting.LocalStorageType, &setting.Storage{Path: dir})
+}
diff --git a/modules/storage/minio.go b/modules/storage/minio.go
new file mode 100644
index 0000000..d0c2dec
--- /dev/null
+++ b/modules/storage/minio.go
@@ -0,0 +1,310 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/minio/minio-go/v7"
+ "github.com/minio/minio-go/v7/pkg/credentials"
+)
+
+var (
+ _ ObjectStorage = &MinioStorage{}
+
+ quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
+)
+
+type minioObject struct {
+ *minio.Object
+}
+
+func (m *minioObject) Stat() (os.FileInfo, error) {
+ oi, err := m.Object.Stat()
+ if err != nil {
+ return nil, convertMinioErr(err)
+ }
+
+ return &minioFileInfo{oi}, nil
+}
+
+// MinioStorage returns a minio bucket storage
+type MinioStorage struct {
+ cfg *setting.MinioStorageConfig
+ ctx context.Context
+ client *minio.Client
+ bucket string
+ basePath string
+}
+
+func convertMinioErr(err error) error {
+ if err == nil {
+ return nil
+ }
+ errResp, ok := err.(minio.ErrorResponse)
+ if !ok {
+ return err
+ }
+
+ // Convert two responses to standard analogues
+ switch errResp.Code {
+ case "NoSuchKey":
+ return os.ErrNotExist
+ case "AccessDenied":
+ return os.ErrPermission
+ }
+
+ return err
+}
+
+var getBucketVersioning = func(ctx context.Context, minioClient *minio.Client, bucket string) error {
+ _, err := minioClient.GetBucketVersioning(ctx, bucket)
+ return err
+}
+
+// NewMinioStorage returns a minio storage
+func NewMinioStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error) {
+ config := cfg.MinioConfig
+ if config.ChecksumAlgorithm != "" && config.ChecksumAlgorithm != "default" && config.ChecksumAlgorithm != "md5" {
+ return nil, fmt.Errorf("invalid minio checksum algorithm: %s", config.ChecksumAlgorithm)
+ }
+ var lookup minio.BucketLookupType
+ switch config.BucketLookup {
+ case "auto", "":
+ lookup = minio.BucketLookupAuto
+ case "dns":
+ lookup = minio.BucketLookupDNS
+ case "path":
+ lookup = minio.BucketLookupPath
+ default:
+ return nil, fmt.Errorf("invalid minio bucket lookup type %s", config.BucketLookup)
+ }
+
+ log.Info("Creating Minio storage at %s:%s with base path %s", config.Endpoint, config.Bucket, config.BasePath)
+
+ minioClient, err := minio.New(config.Endpoint, &minio.Options{
+ Creds: buildMinioCredentials(config, credentials.DefaultIAMRoleEndpoint),
+ Secure: config.UseSSL,
+ Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: config.InsecureSkipVerify}},
+ Region: config.Location,
+ BucketLookup: lookup,
+ })
+ if err != nil {
+ return nil, convertMinioErr(err)
+ }
+
+ // The GetBucketVersioning is only used for checking whether the Object Storage parameters are generally good. It doesn't need to succeed.
+ // The assumption is that if the API returns the HTTP code 400, then the parameters could be incorrect.
+ // Otherwise even if the request itself fails (403, 404, etc), the code should still continue because the parameters seem "good" enough.
+ // Keep in mind that GetBucketVersioning requires "owner" to really succeed, so it can't be used to check the existence.
+ // Not using "BucketExists (HeadBucket)" because it doesn't include detailed failure reasons.
+ err = getBucketVersioning(ctx, minioClient, config.Bucket)
+ if err != nil {
+ errResp, ok := err.(minio.ErrorResponse)
+ if !ok {
+ return nil, err
+ }
+ if errResp.StatusCode == http.StatusBadRequest {
+ log.Error("S3 storage connection failure at %s:%s with base path %s and region: %s", config.Endpoint, config.Bucket, config.Location, errResp.Message)
+ return nil, err
+ }
+ }
+
+ // Check to see if we already own this bucket
+ exists, err := minioClient.BucketExists(ctx, config.Bucket)
+ if err != nil {
+ return nil, convertMinioErr(err)
+ }
+
+ if !exists {
+ if err := minioClient.MakeBucket(ctx, config.Bucket, minio.MakeBucketOptions{
+ Region: config.Location,
+ }); err != nil {
+ return nil, convertMinioErr(err)
+ }
+ }
+
+ return &MinioStorage{
+ cfg: &config,
+ ctx: ctx,
+ client: minioClient,
+ bucket: config.Bucket,
+ basePath: config.BasePath,
+ }, nil
+}
+
+func (m *MinioStorage) buildMinioPath(p string) string {
+ p = strings.TrimPrefix(util.PathJoinRelX(m.basePath, p), "/") // object store doesn't use slash for root path
+ if p == "." {
+ p = "" // object store doesn't use dot as relative path
+ }
+ return p
+}
+
+func (m *MinioStorage) buildMinioDirPrefix(p string) string {
+ // ending slash is required for avoiding matching like "foo/" and "foobar/" with prefix "foo"
+ p = m.buildMinioPath(p) + "/"
+ if p == "/" {
+ p = "" // object store doesn't use slash for root path
+ }
+ return p
+}
+
+func buildMinioCredentials(config setting.MinioStorageConfig, iamEndpoint string) *credentials.Credentials {
+ // If static credentials are provided, use those
+ if config.AccessKeyID != "" {
+ return credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, "")
+ }
+
+ // Otherwise, fallback to a credentials chain for S3 access
+ chain := []credentials.Provider{
+ // configure based upon MINIO_ prefixed environment variables
+ &credentials.EnvMinio{},
+ // configure based upon AWS_ prefixed environment variables
+ &credentials.EnvAWS{},
+ // read credentials from MINIO_SHARED_CREDENTIALS_FILE
+ // environment variable, or default json config files
+ &credentials.FileMinioClient{},
+ // read credentials from AWS_SHARED_CREDENTIALS_FILE
+ // environment variable, or default credentials file
+ &credentials.FileAWSCredentials{},
+ // read IAM role from EC2 metadata endpoint if available
+ &credentials.IAM{
+ Endpoint: iamEndpoint,
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ },
+ }
+ return credentials.NewChainCredentials(chain)
+}
+
+// Open opens a file
+func (m *MinioStorage) Open(path string) (Object, error) {
+ opts := minio.GetObjectOptions{}
+ object, err := m.client.GetObject(m.ctx, m.bucket, m.buildMinioPath(path), opts)
+ if err != nil {
+ return nil, convertMinioErr(err)
+ }
+ return &minioObject{object}, nil
+}
+
+// Save saves a file to minio
+func (m *MinioStorage) Save(path string, r io.Reader, size int64) (int64, error) {
+ uploadInfo, err := m.client.PutObject(
+ m.ctx,
+ m.bucket,
+ m.buildMinioPath(path),
+ r,
+ size,
+ minio.PutObjectOptions{
+ ContentType: "application/octet-stream",
+ // some storages like:
+ // * https://developers.cloudflare.com/r2/api/s3/api/
+ // * https://www.backblaze.com/b2/docs/s3_compatible_api.html
+ // do not support "x-amz-checksum-algorithm" header, so use legacy MD5 checksum
+ SendContentMd5: m.cfg.ChecksumAlgorithm == "md5",
+ },
+ )
+ if err != nil {
+ return 0, convertMinioErr(err)
+ }
+ return uploadInfo.Size, nil
+}
+
+type minioFileInfo struct {
+ minio.ObjectInfo
+}
+
+func (m minioFileInfo) Name() string {
+ return path.Base(m.ObjectInfo.Key)
+}
+
+func (m minioFileInfo) Size() int64 {
+ return m.ObjectInfo.Size
+}
+
+func (m minioFileInfo) ModTime() time.Time {
+ return m.LastModified
+}
+
+func (m minioFileInfo) IsDir() bool {
+ return strings.HasSuffix(m.ObjectInfo.Key, "/")
+}
+
+func (m minioFileInfo) Mode() os.FileMode {
+ return os.ModePerm
+}
+
+func (m minioFileInfo) Sys() any {
+ return nil
+}
+
+// Stat returns the stat information of the object
+func (m *MinioStorage) Stat(path string) (os.FileInfo, error) {
+ info, err := m.client.StatObject(
+ m.ctx,
+ m.bucket,
+ m.buildMinioPath(path),
+ minio.StatObjectOptions{},
+ )
+ if err != nil {
+ return nil, convertMinioErr(err)
+ }
+ return &minioFileInfo{info}, nil
+}
+
+// Delete delete a file
+func (m *MinioStorage) Delete(path string) error {
+ err := m.client.RemoveObject(m.ctx, m.bucket, m.buildMinioPath(path), minio.RemoveObjectOptions{})
+
+ return convertMinioErr(err)
+}
+
+// URL gets the redirect URL to a file. The presigned link is valid for 5 minutes.
+func (m *MinioStorage) URL(path, name string) (*url.URL, error) {
+ reqParams := make(url.Values)
+ // TODO it may be good to embed images with 'inline' like ServeData does, but we don't want to have to read the file, do we?
+ reqParams.Set("response-content-disposition", "attachment; filename=\""+quoteEscaper.Replace(name)+"\"")
+ u, err := m.client.PresignedGetObject(m.ctx, m.bucket, m.buildMinioPath(path), 5*time.Minute, reqParams)
+ return u, convertMinioErr(err)
+}
+
+// IterateObjects iterates across the objects in the miniostorage
+func (m *MinioStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
+ opts := minio.GetObjectOptions{}
+ for mObjInfo := range m.client.ListObjects(m.ctx, m.bucket, minio.ListObjectsOptions{
+ Prefix: m.buildMinioDirPrefix(dirName),
+ Recursive: true,
+ }) {
+ object, err := m.client.GetObject(m.ctx, m.bucket, mObjInfo.Key, opts)
+ if err != nil {
+ return convertMinioErr(err)
+ }
+ if err := func(object *minio.Object, fn func(path string, obj Object) error) error {
+ defer object.Close()
+ return fn(strings.TrimPrefix(mObjInfo.Key, m.basePath), &minioObject{object})
+ }(object, fn); err != nil {
+ return convertMinioErr(err)
+ }
+ }
+ return nil
+}
+
+func init() {
+ RegisterStorageType(setting.MinioStorageType, NewMinioStorage)
+}
diff --git a/modules/storage/minio_test.go b/modules/storage/minio_test.go
new file mode 100644
index 0000000..9ce1dbc
--- /dev/null
+++ b/modules/storage/minio_test.go
@@ -0,0 +1,216 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/minio/minio-go/v7"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMinioStorageIterator(t *testing.T) {
+ if os.Getenv("CI") == "" {
+ t.Skip("minioStorage not present outside of CI")
+ return
+ }
+ testStorageIterator(t, setting.MinioStorageType, &setting.Storage{
+ MinioConfig: setting.MinioStorageConfig{
+ Endpoint: "minio:9000",
+ AccessKeyID: "123456",
+ SecretAccessKey: "12345678",
+ Bucket: "gitea",
+ Location: "us-east-1",
+ },
+ })
+}
+
+func TestVirtualHostMinioStorage(t *testing.T) {
+ if os.Getenv("CI") == "" {
+ t.Skip("minioStorage not present outside of CI")
+ return
+ }
+ testStorageIterator(t, setting.MinioStorageType, &setting.Storage{
+ MinioConfig: setting.MinioStorageConfig{
+ Endpoint: "minio:9000",
+ AccessKeyID: "123456",
+ SecretAccessKey: "12345678",
+ Bucket: "gitea",
+ Location: "us-east-1",
+ BucketLookup: "dns",
+ },
+ })
+}
+
+func TestMinioStoragePath(t *testing.T) {
+ m := &MinioStorage{basePath: ""}
+ assert.Equal(t, "", m.buildMinioPath("/"))
+ assert.Equal(t, "", m.buildMinioPath("."))
+ assert.Equal(t, "a", m.buildMinioPath("/a"))
+ assert.Equal(t, "a/b", m.buildMinioPath("/a/b/"))
+ assert.Equal(t, "", m.buildMinioDirPrefix(""))
+ assert.Equal(t, "a/", m.buildMinioDirPrefix("/a/"))
+
+ m = &MinioStorage{basePath: "/"}
+ assert.Equal(t, "", m.buildMinioPath("/"))
+ assert.Equal(t, "", m.buildMinioPath("."))
+ assert.Equal(t, "a", m.buildMinioPath("/a"))
+ assert.Equal(t, "a/b", m.buildMinioPath("/a/b/"))
+ assert.Equal(t, "", m.buildMinioDirPrefix(""))
+ assert.Equal(t, "a/", m.buildMinioDirPrefix("/a/"))
+
+ m = &MinioStorage{basePath: "/base"}
+ assert.Equal(t, "base", m.buildMinioPath("/"))
+ assert.Equal(t, "base", m.buildMinioPath("."))
+ assert.Equal(t, "base/a", m.buildMinioPath("/a"))
+ assert.Equal(t, "base/a/b", m.buildMinioPath("/a/b/"))
+ assert.Equal(t, "base/", m.buildMinioDirPrefix(""))
+ assert.Equal(t, "base/a/", m.buildMinioDirPrefix("/a/"))
+
+ m = &MinioStorage{basePath: "/base/"}
+ assert.Equal(t, "base", m.buildMinioPath("/"))
+ assert.Equal(t, "base", m.buildMinioPath("."))
+ assert.Equal(t, "base/a", m.buildMinioPath("/a"))
+ assert.Equal(t, "base/a/b", m.buildMinioPath("/a/b/"))
+ assert.Equal(t, "base/", m.buildMinioDirPrefix(""))
+ assert.Equal(t, "base/a/", m.buildMinioDirPrefix("/a/"))
+}
+
+func TestS3StorageBadRequest(t *testing.T) {
+ if os.Getenv("CI") == "" {
+ t.Skip("S3Storage not present outside of CI")
+ return
+ }
+ cfg := &setting.Storage{
+ MinioConfig: setting.MinioStorageConfig{
+ Endpoint: "minio:9000",
+ AccessKeyID: "123456",
+ SecretAccessKey: "12345678",
+ Bucket: "bucket",
+ Location: "us-east-1",
+ },
+ }
+ message := "ERROR"
+ old := getBucketVersioning
+ defer func() { getBucketVersioning = old }()
+ getBucketVersioning = func(ctx context.Context, minioClient *minio.Client, bucket string) error {
+ return minio.ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "FixtureError",
+ Message: message,
+ }
+ }
+ _, err := NewStorage(setting.MinioStorageType, cfg)
+ require.ErrorContains(t, err, message)
+}
+
+func TestMinioCredentials(t *testing.T) {
+ const (
+ ExpectedAccessKey = "ExampleAccessKeyID"
+ ExpectedSecretAccessKey = "ExampleSecretAccessKeyID"
+ // Use a FakeEndpoint for IAM credentials to avoid logging any
+ // potential real IAM credentials when running in EC2.
+ FakeEndpoint = "http://localhost"
+ )
+
+ t.Run("Static Credentials", func(t *testing.T) {
+ cfg := setting.MinioStorageConfig{
+ AccessKeyID: ExpectedAccessKey,
+ SecretAccessKey: ExpectedSecretAccessKey,
+ }
+ creds := buildMinioCredentials(cfg, FakeEndpoint)
+ v, err := creds.Get()
+
+ require.NoError(t, err)
+ assert.Equal(t, ExpectedAccessKey, v.AccessKeyID)
+ assert.Equal(t, ExpectedSecretAccessKey, v.SecretAccessKey)
+ })
+
+ t.Run("Chain", func(t *testing.T) {
+ cfg := setting.MinioStorageConfig{}
+
+ t.Run("EnvMinio", func(t *testing.T) {
+ t.Setenv("MINIO_ACCESS_KEY", ExpectedAccessKey+"Minio")
+ t.Setenv("MINIO_SECRET_KEY", ExpectedSecretAccessKey+"Minio")
+
+ creds := buildMinioCredentials(cfg, FakeEndpoint)
+ v, err := creds.Get()
+
+ require.NoError(t, err)
+ assert.Equal(t, ExpectedAccessKey+"Minio", v.AccessKeyID)
+ assert.Equal(t, ExpectedSecretAccessKey+"Minio", v.SecretAccessKey)
+ })
+
+ t.Run("EnvAWS", func(t *testing.T) {
+ t.Setenv("AWS_ACCESS_KEY", ExpectedAccessKey+"AWS")
+ t.Setenv("AWS_SECRET_KEY", ExpectedSecretAccessKey+"AWS")
+
+ creds := buildMinioCredentials(cfg, FakeEndpoint)
+ v, err := creds.Get()
+
+ require.NoError(t, err)
+ assert.Equal(t, ExpectedAccessKey+"AWS", v.AccessKeyID)
+ assert.Equal(t, ExpectedSecretAccessKey+"AWS", v.SecretAccessKey)
+ })
+
+ t.Run("FileMinio", func(t *testing.T) {
+ t.Setenv("MINIO_SHARED_CREDENTIALS_FILE", "testdata/minio.json")
+ // prevent loading any actual credentials files from the user
+ t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "testdata/fake")
+
+ creds := buildMinioCredentials(cfg, FakeEndpoint)
+ v, err := creds.Get()
+
+ require.NoError(t, err)
+ assert.Equal(t, ExpectedAccessKey+"MinioFile", v.AccessKeyID)
+ assert.Equal(t, ExpectedSecretAccessKey+"MinioFile", v.SecretAccessKey)
+ })
+
+ t.Run("FileAWS", func(t *testing.T) {
+ // prevent loading any actual credentials files from the user
+ t.Setenv("MINIO_SHARED_CREDENTIALS_FILE", "testdata/fake.json")
+ t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "testdata/aws_credentials")
+
+ creds := buildMinioCredentials(cfg, FakeEndpoint)
+ v, err := creds.Get()
+
+ require.NoError(t, err)
+ assert.Equal(t, ExpectedAccessKey+"AWSFile", v.AccessKeyID)
+ assert.Equal(t, ExpectedSecretAccessKey+"AWSFile", v.SecretAccessKey)
+ })
+
+ t.Run("IAM", func(t *testing.T) {
+ // prevent loading any actual credentials files from the user
+ t.Setenv("MINIO_SHARED_CREDENTIALS_FILE", "testdata/fake.json")
+ t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "testdata/fake")
+
+ // Spawn a server to emulate the EC2 Instance Metadata
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // The client will actually make 3 requests here,
+ // first will be to get the IMDSv2 token, second to
+ // get the role, and third for the actual
+ // credentials. However, we can return credentials
+ // every request since we're not emulating a full
+ // IMDSv2 flow.
+ w.Write([]byte(`{"Code":"Success","AccessKeyId":"ExampleAccessKeyIDIAM","SecretAccessKey":"ExampleSecretAccessKeyIDIAM"}`))
+ }))
+ defer server.Close()
+
+ // Use the provided EC2 Instance Metadata server
+ creds := buildMinioCredentials(cfg, server.URL)
+ v, err := creds.Get()
+
+ require.NoError(t, err)
+ assert.Equal(t, ExpectedAccessKey+"IAM", v.AccessKeyID)
+ assert.Equal(t, ExpectedSecretAccessKey+"IAM", v.SecretAccessKey)
+ })
+ })
+}
diff --git a/modules/storage/storage.go b/modules/storage/storage.go
new file mode 100644
index 0000000..b83b1c7
--- /dev/null
+++ b/modules/storage/storage.go
@@ -0,0 +1,226 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// ErrURLNotSupported represents url is not supported
+var ErrURLNotSupported = errors.New("url method not supported")
+
+// ErrInvalidConfiguration is called when there is invalid configuration for a storage
+type ErrInvalidConfiguration struct {
+ cfg any
+ err error
+}
+
+func (err ErrInvalidConfiguration) Error() string {
+ if err.err != nil {
+ return fmt.Sprintf("Invalid Configuration Argument: %v: Error: %v", err.cfg, err.err)
+ }
+ return fmt.Sprintf("Invalid Configuration Argument: %v", err.cfg)
+}
+
+// IsErrInvalidConfiguration checks if an error is an ErrInvalidConfiguration
+func IsErrInvalidConfiguration(err error) bool {
+ _, ok := err.(ErrInvalidConfiguration)
+ return ok
+}
+
+type Type = setting.StorageType
+
+// NewStorageFunc is a function that creates a storage
+type NewStorageFunc func(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error)
+
+var storageMap = map[Type]NewStorageFunc{}
+
+// RegisterStorageType registers a provided storage type with a function to create it
+func RegisterStorageType(typ Type, fn func(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error)) {
+ storageMap[typ] = fn
+}
+
+// Object represents the object on the storage
+type Object interface {
+ io.ReadCloser
+ io.Seeker
+ Stat() (os.FileInfo, error)
+}
+
+// ObjectStorage represents an object storage to handle a bucket and files
+type ObjectStorage interface {
+ Open(path string) (Object, error)
+ // Save store a object, if size is unknown set -1
+ Save(path string, r io.Reader, size int64) (int64, error)
+ Stat(path string) (os.FileInfo, error)
+ Delete(path string) error
+ URL(path, name string) (*url.URL, error)
+ IterateObjects(path string, iterator func(path string, obj Object) error) error
+}
+
+// Copy copies a file from source ObjectStorage to dest ObjectStorage
+func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, srcPath string) (int64, error) {
+ f, err := srcStorage.Open(srcPath)
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ size := int64(-1)
+ fsinfo, err := f.Stat()
+ if err == nil {
+ size = fsinfo.Size()
+ }
+
+ return dstStorage.Save(dstPath, f, size)
+}
+
+// Clean delete all the objects in this storage
+func Clean(storage ObjectStorage) error {
+ return storage.IterateObjects("", func(path string, obj Object) error {
+ _ = obj.Close()
+ return storage.Delete(path)
+ })
+}
+
+// SaveFrom saves data to the ObjectStorage with path p from the callback
+func SaveFrom(objStorage ObjectStorage, p string, callback func(w io.Writer) error) error {
+ pr, pw := io.Pipe()
+ defer pr.Close()
+ go func() {
+ defer pw.Close()
+ if err := callback(pw); err != nil {
+ _ = pw.CloseWithError(err)
+ }
+ }()
+
+ _, err := objStorage.Save(p, pr, -1)
+ return err
+}
+
+var (
+ // Attachments represents attachments storage
+ Attachments ObjectStorage = UninitializedStorage
+
+ // LFS represents lfs storage
+ LFS ObjectStorage = UninitializedStorage
+
+ // Avatars represents user avatars storage
+ Avatars ObjectStorage = UninitializedStorage
+ // RepoAvatars represents repository avatars storage
+ RepoAvatars ObjectStorage = UninitializedStorage
+
+ // RepoArchives represents repository archives storage
+ RepoArchives ObjectStorage = UninitializedStorage
+
+ // Packages represents packages storage
+ Packages ObjectStorage = UninitializedStorage
+
+ // Actions represents actions storage
+ Actions ObjectStorage = UninitializedStorage
+ // Actions Artifacts represents actions artifacts storage
+ ActionsArtifacts ObjectStorage = UninitializedStorage
+)
+
+// Init init the stoarge
+func Init() error {
+ for _, f := range []func() error{
+ initAttachments,
+ initAvatars,
+ initRepoAvatars,
+ initLFS,
+ initRepoArchives,
+ initPackages,
+ initActions,
+ } {
+ if err := f(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewStorage takes a storage type and some config and returns an ObjectStorage or an error
+func NewStorage(typStr Type, cfg *setting.Storage) (ObjectStorage, error) {
+ if len(typStr) == 0 {
+ typStr = setting.LocalStorageType
+ }
+ fn, ok := storageMap[typStr]
+ if !ok {
+ return nil, fmt.Errorf("Unsupported storage type: %s", typStr)
+ }
+
+ return fn(context.Background(), cfg)
+}
+
+func initAvatars() (err error) {
+ log.Info("Initialising Avatar storage with type: %s", setting.Avatar.Storage.Type)
+ Avatars, err = NewStorage(setting.Avatar.Storage.Type, setting.Avatar.Storage)
+ return err
+}
+
+func initAttachments() (err error) {
+ if !setting.Attachment.Enabled {
+ Attachments = DiscardStorage("Attachment isn't enabled")
+ return nil
+ }
+ log.Info("Initialising Attachment storage with type: %s", setting.Attachment.Storage.Type)
+ Attachments, err = NewStorage(setting.Attachment.Storage.Type, setting.Attachment.Storage)
+ return err
+}
+
+func initLFS() (err error) {
+ if !setting.LFS.StartServer {
+ LFS = DiscardStorage("LFS isn't enabled")
+ return nil
+ }
+ log.Info("Initialising LFS storage with type: %s", setting.LFS.Storage.Type)
+ LFS, err = NewStorage(setting.LFS.Storage.Type, setting.LFS.Storage)
+ return err
+}
+
+func initRepoAvatars() (err error) {
+ log.Info("Initialising Repository Avatar storage with type: %s", setting.RepoAvatar.Storage.Type)
+ RepoAvatars, err = NewStorage(setting.RepoAvatar.Storage.Type, setting.RepoAvatar.Storage)
+ return err
+}
+
+func initRepoArchives() (err error) {
+ log.Info("Initialising Repository Archive storage with type: %s", setting.RepoArchive.Storage.Type)
+ RepoArchives, err = NewStorage(setting.RepoArchive.Storage.Type, setting.RepoArchive.Storage)
+ return err
+}
+
+func initPackages() (err error) {
+ if !setting.Packages.Enabled {
+ Packages = DiscardStorage("Packages isn't enabled")
+ return nil
+ }
+ log.Info("Initialising Packages storage with type: %s", setting.Packages.Storage.Type)
+ Packages, err = NewStorage(setting.Packages.Storage.Type, setting.Packages.Storage)
+ return err
+}
+
+func initActions() (err error) {
+ if !setting.Actions.Enabled {
+ Actions = DiscardStorage("Actions isn't enabled")
+ ActionsArtifacts = DiscardStorage("ActionsArtifacts isn't enabled")
+ return nil
+ }
+ log.Info("Initialising Actions storage with type: %s", setting.Actions.LogStorage.Type)
+ if Actions, err = NewStorage(setting.Actions.LogStorage.Type, setting.Actions.LogStorage); err != nil {
+ return err
+ }
+ log.Info("Initialising ActionsArtifacts storage with type: %s", setting.Actions.ArtifactStorage.Type)
+ ActionsArtifacts, err = NewStorage(setting.Actions.ArtifactStorage.Type, setting.Actions.ArtifactStorage)
+ return err
+}
diff --git a/modules/storage/storage_test.go b/modules/storage/storage_test.go
new file mode 100644
index 0000000..70bcd31
--- /dev/null
+++ b/modules/storage/storage_test.go
@@ -0,0 +1,52 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "bytes"
+ "testing"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func testStorageIterator(t *testing.T, typStr Type, cfg *setting.Storage) {
+ l, err := NewStorage(typStr, cfg)
+ require.NoError(t, err)
+
+ testFiles := [][]string{
+ {"a/1.txt", "a1"},
+ {"/a/1.txt", "aa1"}, // same as above, but with leading slash that will be trim
+ {"ab/1.txt", "ab1"},
+ {"b/1.txt", "b1"},
+ {"b/2.txt", "b2"},
+ {"b/3.txt", "b3"},
+ {"b/x 4.txt", "bx4"},
+ }
+ for _, f := range testFiles {
+ _, err = l.Save(f[0], bytes.NewBufferString(f[1]), -1)
+ require.NoError(t, err)
+ }
+
+ expectedList := map[string][]string{
+ "a": {"a/1.txt"},
+ "b": {"b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt"},
+ "": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
+ "/": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
+ "a/b/../../a": {"a/1.txt"},
+ }
+ for dir, expected := range expectedList {
+ count := 0
+ err = l.IterateObjects(dir, func(path string, f Object) error {
+ defer f.Close()
+ assert.Contains(t, expected, path)
+ count++
+ return nil
+ })
+ require.NoError(t, err)
+ assert.Len(t, expected, count)
+ }
+}
diff --git a/modules/storage/testdata/aws_credentials b/modules/storage/testdata/aws_credentials
new file mode 100644
index 0000000..62a5488
--- /dev/null
+++ b/modules/storage/testdata/aws_credentials
@@ -0,0 +1,3 @@
+[default]
+aws_access_key_id=ExampleAccessKeyIDAWSFile
+aws_secret_access_key=ExampleSecretAccessKeyIDAWSFile
diff --git a/modules/storage/testdata/minio.json b/modules/storage/testdata/minio.json
new file mode 100644
index 0000000..3876257
--- /dev/null
+++ b/modules/storage/testdata/minio.json
@@ -0,0 +1,12 @@
+{
+ "version": "10",
+ "aliases": {
+ "s3": {
+ "url": "https://s3.amazonaws.com",
+ "accessKey": "ExampleAccessKeyIDMinioFile",
+ "secretKey": "ExampleSecretAccessKeyIDMinioFile",
+ "api": "S3v4",
+ "path": "dns"
+ }
+ }
+}