summaryrefslogtreecommitdiffstats
path: root/models/dbfs/dbfs.go
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-12-12 23:57:56 +0100
commite68b9d00a6e05b3a941f63ffb696f91e554ac5ec (patch)
tree97775d6c13b0f416af55314eb6a89ef792474615 /models/dbfs/dbfs.go
parentInitial commit. (diff)
downloadforgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.tar.xz
forgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.zip
Adding upstream version 9.0.3.
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to 'models/dbfs/dbfs.go')
-rw-r--r--models/dbfs/dbfs.go131
1 files changed, 131 insertions, 0 deletions
diff --git a/models/dbfs/dbfs.go b/models/dbfs/dbfs.go
new file mode 100644
index 0000000..f68b4a2
--- /dev/null
+++ b/models/dbfs/dbfs.go
@@ -0,0 +1,131 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package dbfs
+
+import (
+ "context"
+ "io/fs"
+ "os"
+ "path"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+/*
+The reasons behind the DBFS (database-filesystem) package:
+When a Gitea action is running, the Gitea action server should collect and store all the logs.
+
+The requirements are:
+* The running logs must be stored across the cluster if the Gitea servers are deployed as a cluster.
+* The logs will be archived to Object Storage (S3/MinIO, etc.) after a period of time.
+* The Gitea action UI should be able to render the running logs and the archived logs.
+
+Some possible solutions for the running logs:
+* [Not ideal] Using local temp file: it can not be shared across the cluster.
+* [Not ideal] Using shared file in the filesystem of git repository: although at the moment, the Gitea cluster's
+ git repositories must be stored in a shared filesystem, in the future, Gitea may need a dedicated Git Service Server
+ to decouple the shared filesystem. Then the action logs will become a blocker.
+* [Not ideal] Record the logs in a database table line by line: it has a couple of problems:
+ - It's difficult to make multiple increasing sequence (log line number) for different databases.
+ - The database table will have a lot of rows and be affected by the big-table performance problem.
+ - It's difficult to load logs by using the same interface as other storages.
+ - It's difficult to calculate the size of the logs.
+
+The DBFS solution:
+* It can be used in a cluster.
+* It can share the same interface (Read/Write/Seek) as other storages.
+* It's very friendly to database because it only needs to store much fewer rows than the log-line solution.
+* In the future, when Gitea action needs to limit the log size (other CI/CD services also do so), it's easier to calculate the log file size.
+* Even sometimes the UI needs to render the tailing lines, the tailing lines can be found be counting the "\n" from the end of the file by seek.
+ The seeking and finding is not the fastest way, but it's still acceptable and won't affect the performance too much.
+*/
+
+type dbfsMeta struct {
+ ID int64 `xorm:"pk autoincr"`
+ FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"`
+ BlockSize int64 `xorm:"BIGINT NOT NULL"`
+ FileSize int64 `xorm:"BIGINT NOT NULL"`
+ CreateTimestamp int64 `xorm:"BIGINT NOT NULL"`
+ ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"`
+}
+
+type dbfsData struct {
+ ID int64 `xorm:"pk autoincr"`
+ Revision int64 `xorm:"BIGINT NOT NULL"`
+ MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
+ BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
+ BlobSize int64 `xorm:"BIGINT NOT NULL"`
+ BlobData []byte `xorm:"BLOB NOT NULL"`
+}
+
+func init() {
+ db.RegisterModel(new(dbfsMeta))
+ db.RegisterModel(new(dbfsData))
+}
+
+func OpenFile(ctx context.Context, name string, flag int) (File, error) {
+ f, err := newDbFile(ctx, name)
+ if err != nil {
+ return nil, err
+ }
+ err = f.open(flag)
+ if err != nil {
+ _ = f.Close()
+ return nil, err
+ }
+ return f, nil
+}
+
+func Open(ctx context.Context, name string) (File, error) {
+ return OpenFile(ctx, name, os.O_RDONLY)
+}
+
+func Create(ctx context.Context, name string) (File, error) {
+ return OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
+}
+
+func Rename(ctx context.Context, oldPath, newPath string) error {
+ f, err := newDbFile(ctx, oldPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return f.renameTo(newPath)
+}
+
+func Remove(ctx context.Context, name string) error {
+ f, err := newDbFile(ctx, name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return f.delete()
+}
+
+var _ fs.FileInfo = (*dbfsMeta)(nil)
+
+func (m *dbfsMeta) Name() string {
+ return path.Base(m.FullPath)
+}
+
+func (m *dbfsMeta) Size() int64 {
+ return m.FileSize
+}
+
+func (m *dbfsMeta) Mode() fs.FileMode {
+ return os.ModePerm
+}
+
+func (m *dbfsMeta) ModTime() time.Time {
+ return fileTimestampToTime(m.ModifyTimestamp)
+}
+
+func (m *dbfsMeta) IsDir() bool {
+ return false
+}
+
+func (m *dbfsMeta) Sys() any {
+ return nil
+}