summaryrefslogtreecommitdiffstats
path: root/models/repo
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-12-12 23:57:56 +0100
commite68b9d00a6e05b3a941f63ffb696f91e554ac5ec (patch)
tree97775d6c13b0f416af55314eb6a89ef792474615 /models/repo
parentInitial commit. (diff)
downloadforgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.tar.xz
forgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.zip
Adding upstream version 9.0.3.
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to '')
-rw-r--r--models/repo.go362
-rw-r--r--models/repo/TestSearchRepositoryIDsByCondition/repository.yml30
-rw-r--r--models/repo/archive_download_count.go90
-rw-r--r--models/repo/archive_download_count_test.go65
-rw-r--r--models/repo/archiver.go139
-rw-r--r--models/repo/attachment.go287
-rw-r--r--models/repo/attachment_test.go105
-rw-r--r--models/repo/avatar.go96
-rw-r--r--models/repo/collaboration.go170
-rw-r--r--models/repo/collaboration_test.go186
-rw-r--r--models/repo/following_repo.go39
-rw-r--r--models/repo/following_repo_test.go31
-rw-r--r--models/repo/fork.go120
-rw-r--r--models/repo/fork_test.go34
-rw-r--r--models/repo/git.go36
-rw-r--r--models/repo/issue.go60
-rw-r--r--models/repo/language_stats.go242
-rw-r--r--models/repo/main_test.go21
-rw-r--r--models/repo/mirror.go123
-rw-r--r--models/repo/pushmirror.go188
-rw-r--r--models/repo/pushmirror_test.go79
-rw-r--r--models/repo/redirect.go86
-rw-r--r--models/repo/redirect_test.go78
-rw-r--r--models/repo/release.go566
-rw-r--r--models/repo/release_test.go27
-rw-r--r--models/repo/repo.go951
-rw-r--r--models/repo/repo_flags.go102
-rw-r--r--models/repo/repo_flags_test.go115
-rw-r--r--models/repo/repo_indexer.go114
-rw-r--r--models/repo/repo_list.go757
-rw-r--r--models/repo/repo_list_test.go450
-rw-r--r--models/repo/repo_repository.go60
-rw-r--r--models/repo/repo_test.go230
-rw-r--r--models/repo/repo_unit.go317
-rw-r--r--models/repo/repo_unit_test.go39
-rw-r--r--models/repo/search.go53
-rw-r--r--models/repo/star.go101
-rw-r--r--models/repo/star_test.go72
-rw-r--r--models/repo/topic.go389
-rw-r--r--models/repo/topic_test.go83
-rw-r--r--models/repo/update.go145
-rw-r--r--models/repo/upload.go175
-rw-r--r--models/repo/user_repo.go197
-rw-r--r--models/repo/user_repo_test.go96
-rw-r--r--models/repo/watch.go190
-rw-r--r--models/repo/watch_test.go153
-rw-r--r--models/repo/wiki.go96
-rw-r--r--models/repo/wiki_test.go46
-rw-r--r--models/repo_test.go24
-rw-r--r--models/repo_transfer.go185
-rw-r--r--models/repo_transfer_test.go28
51 files changed, 8428 insertions, 0 deletions
diff --git a/models/repo.go b/models/repo.go
new file mode 100644
index 0000000..0dc8ee5
--- /dev/null
+++ b/models/repo.go
@@ -0,0 +1,362 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ _ "image/jpeg" // Needed for jpeg support
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+)
+
+// Init initialize model
+func Init(ctx context.Context) error {
+ return unit.LoadUnitConfig()
+}
+
+type repoChecker struct {
+ querySQL func(ctx context.Context) ([]map[string][]byte, error)
+ correctSQL func(ctx context.Context, id int64) error
+ desc string
+}
+
+func repoStatsCheck(ctx context.Context, checker *repoChecker) {
+ results, err := checker.querySQL(ctx)
+ if err != nil {
+ log.Error("Select %s: %v", checker.desc, err)
+ return
+ }
+ for _, result := range results {
+ id, _ := strconv.ParseInt(string(result["id"]), 10, 64)
+ select {
+ case <-ctx.Done():
+ log.Warn("CheckRepoStats: Cancelled before checking %s for with id=%d", checker.desc, id)
+ return
+ default:
+ }
+ log.Trace("Updating %s: %d", checker.desc, id)
+ err = checker.correctSQL(ctx, id)
+ if err != nil {
+ log.Error("Update %s[%d]: %v", checker.desc, id, err)
+ }
+ }
+}
+
+func StatsCorrectSQL(ctx context.Context, sql string, id int64) error {
+ _, err := db.GetEngine(ctx).Exec(sql, id, id)
+ return err
+}
+
+func repoStatsCorrectNumWatches(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=? AND mode<>2) WHERE id=?", id)
+}
+
+func repoStatsCorrectNumStars(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?", id)
+}
+
+func labelStatsCorrectNumIssues(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?", id)
+}
+
+func labelStatsCorrectNumIssuesRepo(ctx context.Context, id int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=id) WHERE repo_id=?", id)
+ return err
+}
+
+func labelStatsCorrectNumClosedIssues(ctx context.Context, id int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `label` SET num_closed_issues=(SELECT COUNT(*) FROM `issue_label`,`issue` WHERE `issue_label`.label_id=`label`.id AND `issue_label`.issue_id=`issue`.id AND `issue`.is_closed=?) WHERE `label`.id=?", true, id)
+ return err
+}
+
+func labelStatsCorrectNumClosedIssuesRepo(ctx context.Context, id int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `label` SET num_closed_issues=(SELECT COUNT(*) FROM `issue_label`,`issue` WHERE `issue_label`.label_id=`label`.id AND `issue_label`.issue_id=`issue`.id AND `issue`.is_closed=?) WHERE `label`.repo_id=?", true, id)
+ return err
+}
+
+var milestoneStatsQueryNumIssues = "SELECT `milestone`.id FROM `milestone` WHERE `milestone`.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE `issue`.milestone_id=`milestone`.id AND `issue`.is_closed=?) OR `milestone`.num_issues!=(SELECT COUNT(*) FROM `issue` WHERE `issue`.milestone_id=`milestone`.id)"
+
+func milestoneStatsCorrectNumIssuesRepo(ctx context.Context, id int64) error {
+ e := db.GetEngine(ctx)
+ results, err := e.Query(milestoneStatsQueryNumIssues+" AND `milestone`.repo_id = ?", true, id)
+ if err != nil {
+ return err
+ }
+ for _, result := range results {
+ id, _ := strconv.ParseInt(string(result["id"]), 10, 64)
+ err = issues_model.UpdateMilestoneCounters(ctx, id)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func userStatsCorrectNumRepos(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?", id)
+}
+
+func repoStatsCorrectIssueNumComments(ctx context.Context, id int64) error {
+ return StatsCorrectSQL(ctx, "UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?", id)
+}
+
+func repoStatsCorrectNumIssues(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, false, false)
+}
+
+func repoStatsCorrectNumPulls(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, true, false)
+}
+
+func repoStatsCorrectNumClosedIssues(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, false, true)
+}
+
+func repoStatsCorrectNumClosedPulls(ctx context.Context, id int64) error {
+ return repo_model.UpdateRepoIssueNumbers(ctx, id, true, true)
+}
+
+func statsQuery(args ...any) func(context.Context) ([]map[string][]byte, error) {
+ return func(ctx context.Context) ([]map[string][]byte, error) {
+ return db.GetEngine(ctx).Query(args...)
+ }
+}
+
+// CheckRepoStats checks the repository stats
+func CheckRepoStats(ctx context.Context) error {
+ log.Trace("Doing: CheckRepoStats")
+
+ checkers := []*repoChecker{
+ // Repository.NumWatches
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id AND mode<>2)"),
+ repoStatsCorrectNumWatches,
+ "repository count 'num_watches'",
+ },
+ // Repository.NumStars
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)"),
+ repoStatsCorrectNumStars,
+ "repository count 'num_stars'",
+ },
+ // Repository.NumIssues
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_pull=?)", false),
+ repoStatsCorrectNumIssues,
+ "repository count 'num_issues'",
+ },
+ // Repository.NumClosedIssues
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, false),
+ repoStatsCorrectNumClosedIssues,
+ "repository count 'num_closed_issues'",
+ },
+ // Repository.NumPulls
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_pulls!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_pull=?)", true),
+ repoStatsCorrectNumPulls,
+ "repository count 'num_pulls'",
+ },
+ // Repository.NumClosedPulls
+ {
+ statsQuery("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_pulls!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, true),
+ repoStatsCorrectNumClosedPulls,
+ "repository count 'num_closed_pulls'",
+ },
+ // Label.NumIssues
+ {
+ statsQuery("SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)"),
+ labelStatsCorrectNumIssues,
+ "label count 'num_issues'",
+ },
+ // Label.NumClosedIssues
+ {
+ statsQuery("SELECT `label`.id FROM `label` WHERE `label`.num_closed_issues!=(SELECT COUNT(*) FROM `issue_label`,`issue` WHERE `issue_label`.label_id=`label`.id AND `issue_label`.issue_id=`issue`.id AND `issue`.is_closed=?)", true),
+ labelStatsCorrectNumClosedIssues,
+ "label count 'num_closed_issues'",
+ },
+ // Milestone.Num{,Closed}Issues
+ {
+ statsQuery(milestoneStatsQueryNumIssues, true),
+ issues_model.UpdateMilestoneCounters,
+ "milestone count 'num_closed_issues' and 'num_issues'",
+ },
+ // User.NumRepos
+ {
+ statsQuery("SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)"),
+ userStatsCorrectNumRepos,
+ "user count 'num_repos'",
+ },
+ // Issue.NumComments
+ {
+ statsQuery("SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)"),
+ repoStatsCorrectIssueNumComments,
+ "issue count 'num_comments'",
+ },
+ }
+ for _, checker := range checkers {
+ select {
+ case <-ctx.Done():
+ log.Warn("CheckRepoStats: Cancelled before %s", checker.desc)
+ return db.ErrCancelledf("before checking %s", checker.desc)
+ default:
+ repoStatsCheck(ctx, checker)
+ }
+ }
+
+ // FIXME: use checker when stop supporting old fork repo format.
+ // ***** START: Repository.NumForks *****
+ e := db.GetEngine(ctx)
+ results, err := e.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
+ if err != nil {
+ log.Error("Select repository count 'num_forks': %v", err)
+ } else {
+ for _, result := range results {
+ id, _ := strconv.ParseInt(string(result["id"]), 10, 64)
+ select {
+ case <-ctx.Done():
+ log.Warn("CheckRepoStats: Cancelled")
+ return db.ErrCancelledf("during repository count 'num_fork' for repo ID %d", id)
+ default:
+ }
+ log.Trace("Updating repository count 'num_forks': %d", id)
+
+ repo, err := repo_model.GetRepositoryByID(ctx, id)
+ if err != nil {
+ log.Error("repo_model.GetRepositoryByID[%d]: %v", id, err)
+ continue
+ }
+
+ _, err = e.SQL("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID).Get(&repo.NumForks)
+ if err != nil {
+ log.Error("Select count of forks[%d]: %v", repo.ID, err)
+ continue
+ }
+
+ if _, err = e.ID(repo.ID).Cols("num_forks").Update(repo); err != nil {
+ log.Error("UpdateRepository[%d]: %v", id, err)
+ continue
+ }
+ }
+ }
+ // ***** END: Repository.NumForks *****
+ return nil
+}
+
+func UpdateRepoStats(ctx context.Context, id int64) error {
+ var err error
+
+ for _, f := range []func(ctx context.Context, id int64) error{
+ repoStatsCorrectNumWatches,
+ repoStatsCorrectNumStars,
+ repoStatsCorrectNumIssues,
+ repoStatsCorrectNumPulls,
+ repoStatsCorrectNumClosedIssues,
+ repoStatsCorrectNumClosedPulls,
+ labelStatsCorrectNumIssuesRepo,
+ labelStatsCorrectNumClosedIssuesRepo,
+ milestoneStatsCorrectNumIssuesRepo,
+ } {
+ err = f(ctx, id)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func updateUserStarNumbers(ctx context.Context, users []user_model.User) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ for _, user := range users {
+ if _, err = db.Exec(ctx, "UPDATE `user` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE uid=?) WHERE id=?", user.ID, user.ID); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
+
+// DoctorUserStarNum recalculate Stars number for all user
+func DoctorUserStarNum(ctx context.Context) (err error) {
+ const batchSize = 100
+
+ for start := 0; ; start += batchSize {
+ users := make([]user_model.User, 0, batchSize)
+ if err = db.GetEngine(ctx).Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil {
+ return err
+ }
+ if len(users) == 0 {
+ break
+ }
+
+ if err = updateUserStarNumbers(ctx, users); err != nil {
+ return err
+ }
+ }
+
+ log.Debug("recalculate Stars number for all user finished")
+
+ return err
+}
+
+// DeleteDeployKey delete deploy keys
+func DeleteDeployKey(ctx context.Context, doer *user_model.User, id int64) error {
+ key, err := asymkey_model.GetDeployKeyByID(ctx, id)
+ if err != nil {
+ if asymkey_model.IsErrDeployKeyNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("GetDeployKeyByID: %w", err)
+ }
+
+ // Check if user has access to delete this key.
+ if !doer.IsAdmin {
+ repo, err := repo_model.GetRepositoryByID(ctx, key.RepoID)
+ if err != nil {
+ return fmt.Errorf("GetRepositoryByID: %w", err)
+ }
+ has, err := access_model.IsUserRepoAdmin(ctx, repo, doer)
+ if err != nil {
+ return fmt.Errorf("GetUserRepoPermission: %w", err)
+ } else if !has {
+ return asymkey_model.ErrKeyAccessDenied{
+ UserID: doer.ID,
+ KeyID: key.ID,
+ Note: "deploy",
+ }
+ }
+ }
+
+ if _, err := db.DeleteByID[asymkey_model.DeployKey](ctx, key.ID); err != nil {
+ return fmt.Errorf("delete deploy key [%d]: %w", key.ID, err)
+ }
+
+ // Check if this is the last reference to same key content.
+ has, err := asymkey_model.IsDeployKeyExistByKeyID(ctx, key.KeyID)
+ if err != nil {
+ return err
+ } else if !has {
+ if _, err = db.DeleteByID[asymkey_model.PublicKey](ctx, key.KeyID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/models/repo/TestSearchRepositoryIDsByCondition/repository.yml b/models/repo/TestSearchRepositoryIDsByCondition/repository.yml
new file mode 100644
index 0000000..9ce8307
--- /dev/null
+++ b/models/repo/TestSearchRepositoryIDsByCondition/repository.yml
@@ -0,0 +1,30 @@
+-
+ id: 1001
+ owner_id: 33
+ owner_name: user33
+ lower_name: repo1001
+ name: repo1001
+ default_branch: main
+ num_watches: 0
+ num_stars: 0
+ num_forks: 0
+ num_issues: 0
+ num_closed_issues: 0
+ num_pulls: 0
+ num_closed_pulls: 0
+ num_milestones: 0
+ num_closed_milestones: 0
+ num_projects: 0
+ num_closed_projects: 0
+ is_private: false
+ is_empty: false
+ is_archived: false
+ is_mirror: false
+ status: 0
+ is_fork: false
+ fork_id: 0
+ is_template: false
+ template_id: 0
+ size: 0
+ is_fsck_enabled: true
+ close_issues_via_commit_in_any_branch: false
diff --git a/models/repo/archive_download_count.go b/models/repo/archive_download_count.go
new file mode 100644
index 0000000..31f0399
--- /dev/null
+++ b/models/repo/archive_download_count.go
@@ -0,0 +1,90 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/git"
+ api "code.gitea.io/gitea/modules/structs"
+)
+
+// RepoArchiveDownloadCount counts all archive downloads for a tag
+type RepoArchiveDownloadCount struct { //nolint:revive
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"index unique(s)"`
+ ReleaseID int64 `xorm:"index unique(s)"`
+ Type git.ArchiveType `xorm:"unique(s)"`
+ Count int64
+}
+
+func init() {
+ db.RegisterModel(new(RepoArchiveDownloadCount))
+}
+
+// CountArchiveDownload adds one download the the given archive
+func CountArchiveDownload(ctx context.Context, repoID, releaseID int64, tp git.ArchiveType) error {
+ updateCount, err := db.GetEngine(ctx).Where("repo_id = ?", repoID).And("release_id = ?", releaseID).And("`type` = ?", tp).Incr("count").Update(new(RepoArchiveDownloadCount))
+ if err != nil {
+ return err
+ }
+
+ if updateCount != 0 {
+ // The count was updated, so we can exit
+ return nil
+ }
+
+ // The archive does not esxists in the database, so let's add it
+ newCounter := &RepoArchiveDownloadCount{
+ RepoID: repoID,
+ ReleaseID: releaseID,
+ Type: tp,
+ Count: 1,
+ }
+
+ _, err = db.GetEngine(ctx).Insert(newCounter)
+ return err
+}
+
+// GetArchiveDownloadCount returns the download count of a tag
+func GetArchiveDownloadCount(ctx context.Context, repoID, releaseID int64) (*api.TagArchiveDownloadCount, error) {
+ downloadCountList := make([]RepoArchiveDownloadCount, 0)
+ err := db.GetEngine(ctx).Where("repo_id = ?", repoID).And("release_id = ?", releaseID).Find(&downloadCountList)
+ if err != nil {
+ return nil, err
+ }
+
+ tagCounter := new(api.TagArchiveDownloadCount)
+
+ for _, singleCount := range downloadCountList {
+ switch singleCount.Type {
+ case git.ZIP:
+ tagCounter.Zip = singleCount.Count
+ case git.TARGZ:
+ tagCounter.TarGz = singleCount.Count
+ }
+ }
+
+ return tagCounter, nil
+}
+
+// GetDownloadCountForTagName returns the download count of a tag with the given name
+func GetArchiveDownloadCountForTagName(ctx context.Context, repoID int64, tagName string) (*api.TagArchiveDownloadCount, error) {
+ release, err := GetRelease(ctx, repoID, tagName)
+ if err != nil {
+ if IsErrReleaseNotExist(err) {
+ return new(api.TagArchiveDownloadCount), nil
+ }
+ return nil, err
+ }
+
+ return GetArchiveDownloadCount(ctx, repoID, release.ID)
+}
+
+// DeleteArchiveDownloadCountForRelease deletes the release from the repo_archive_download_count table
+func DeleteArchiveDownloadCountForRelease(ctx context.Context, releaseID int64) error {
+ _, err := db.GetEngine(ctx).Delete(&RepoArchiveDownloadCount{ReleaseID: releaseID})
+ return err
+}
diff --git a/models/repo/archive_download_count_test.go b/models/repo/archive_download_count_test.go
new file mode 100644
index 0000000..ffc6cdf
--- /dev/null
+++ b/models/repo/archive_download_count_test.go
@@ -0,0 +1,65 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/git"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepoArchiveDownloadCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ release, err := repo_model.GetReleaseByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+
+ // We have no count, so it should return 0
+ downloadCount, err := repo_model.GetArchiveDownloadCount(db.DefaultContext, release.RepoID, release.ID)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(0), downloadCount.TarGz)
+
+ // Set the TarGz counter to 1
+ err = repo_model.CountArchiveDownload(db.DefaultContext, release.RepoID, release.ID, git.TARGZ)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(1), downloadCount.TarGz)
+
+ // Set the TarGz counter to 2
+ err = repo_model.CountArchiveDownload(db.DefaultContext, release.RepoID, release.ID, git.TARGZ)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(2), downloadCount.TarGz)
+
+ // Set the Zip counter to 1
+ err = repo_model.CountArchiveDownload(db.DefaultContext, release.RepoID, release.ID, git.ZIP)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), downloadCount.Zip)
+ assert.Equal(t, int64(2), downloadCount.TarGz)
+
+ // Delete the count
+ err = repo_model.DeleteArchiveDownloadCountForRelease(db.DefaultContext, release.ID)
+ require.NoError(t, err)
+
+ downloadCount, err = repo_model.GetArchiveDownloadCountForTagName(db.DefaultContext, release.RepoID, release.TagName)
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), downloadCount.Zip)
+ assert.Equal(t, int64(0), downloadCount.TarGz)
+}
diff --git a/models/repo/archiver.go b/models/repo/archiver.go
new file mode 100644
index 0000000..3f05fcf
--- /dev/null
+++ b/models/repo/archiver.go
@@ -0,0 +1,139 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ArchiverStatus represents repo archive status
+type ArchiverStatus int
+
+// enumerate all repo archive statuses
+const (
+ ArchiverGenerating = iota // the archiver is generating
+ ArchiverReady // it's ready
+)
+
+// RepoArchiver represents all archivers
+type RepoArchiver struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"index unique(s)"`
+ Type git.ArchiveType `xorm:"unique(s)"`
+ Status ArchiverStatus
+ CommitID string `xorm:"VARCHAR(64) unique(s)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX NOT NULL created"`
+ ReleaseID int64 `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoArchiver))
+}
+
+// RelativePath returns the archive path relative to the archive storage root.
+func (archiver *RepoArchiver) RelativePath() string {
+ return fmt.Sprintf("%d/%s/%s.%s", archiver.RepoID, archiver.CommitID[:2], archiver.CommitID, archiver.Type.String())
+}
+
+// repoArchiverForRelativePath takes a relativePath created from (archiver *RepoArchiver) RelativePath() and creates a shell repoArchiver struct representing it
+func repoArchiverForRelativePath(relativePath string) (*RepoArchiver, error) {
+ parts := strings.SplitN(relativePath, "/", 3)
+ if len(parts) != 3 {
+ return nil, util.SilentWrap{Message: fmt.Sprintf("invalid storage path: %s", relativePath), Err: util.ErrInvalidArgument}
+ }
+ repoID, err := strconv.ParseInt(parts[0], 10, 64)
+ if err != nil {
+ return nil, util.SilentWrap{Message: fmt.Sprintf("invalid storage path: %s", relativePath), Err: util.ErrInvalidArgument}
+ }
+ nameExts := strings.SplitN(parts[2], ".", 2)
+ if len(nameExts) != 2 {
+ return nil, util.SilentWrap{Message: fmt.Sprintf("invalid storage path: %s", relativePath), Err: util.ErrInvalidArgument}
+ }
+
+ return &RepoArchiver{
+ RepoID: repoID,
+ CommitID: parts[1] + nameExts[0],
+ Type: git.ToArchiveType(nameExts[1]),
+ }, nil
+}
+
+// GetRepoArchiver get an archiver
+func GetRepoArchiver(ctx context.Context, repoID int64, tp git.ArchiveType, commitID string) (*RepoArchiver, error) {
+ var archiver RepoArchiver
+ has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).And("`type`=?", tp).And("commit_id=?", commitID).Get(&archiver)
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ return &archiver, nil
+ }
+ return nil, nil
+}
+
+// ExistsRepoArchiverWithStoragePath checks if there is a RepoArchiver for a given storage path
+func ExistsRepoArchiverWithStoragePath(ctx context.Context, storagePath string) (bool, error) {
+ // We need to invert the path provided func (archiver *RepoArchiver) RelativePath() above
+ archiver, err := repoArchiverForRelativePath(storagePath)
+ if err != nil {
+ return false, err
+ }
+
+ return db.GetEngine(ctx).Exist(archiver)
+}
+
+// UpdateRepoArchiverStatus updates archiver's status
+func UpdateRepoArchiverStatus(ctx context.Context, archiver *RepoArchiver) error {
+ _, err := db.GetEngine(ctx).ID(archiver.ID).Cols("status").Update(archiver)
+ return err
+}
+
+// DeleteAllRepoArchives deletes all repo archives records
+func DeleteAllRepoArchives(ctx context.Context) error {
+ // 1=1 to enforce delete all data, otherwise it will delete nothing
+ _, err := db.GetEngine(ctx).Where("1=1").Delete(new(RepoArchiver))
+ return err
+}
+
+// FindRepoArchiversOption represents an archiver options
+type FindRepoArchiversOption struct {
+ db.ListOptions
+ OlderThan time.Duration
+}
+
+func (opts FindRepoArchiversOption) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.OlderThan > 0 {
+ cond = cond.And(builder.Lt{"created_unix": time.Now().Add(-opts.OlderThan).Unix()})
+ }
+ return cond
+}
+
+func (opts FindRepoArchiversOption) ToOrders() string {
+ return "created_unix ASC"
+}
+
+// SetArchiveRepoState sets if a repo is archived
+func SetArchiveRepoState(ctx context.Context, repo *Repository, isArchived bool) (err error) {
+ repo.IsArchived = isArchived
+
+ if isArchived {
+ repo.ArchivedUnix = timeutil.TimeStampNow()
+ } else {
+ repo.ArchivedUnix = timeutil.TimeStamp(0)
+ }
+
+ _, err = db.GetEngine(ctx).ID(repo.ID).Cols("is_archived", "archived_unix").NoAutoTime().Update(repo)
+ return err
+}
diff --git a/models/repo/attachment.go b/models/repo/attachment.go
new file mode 100644
index 0000000..128bceb
--- /dev/null
+++ b/models/repo/attachment.go
@@ -0,0 +1,287 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "path"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/validation"
+)
+
+// Attachment represent a attachment of issue/comment/release.
+type Attachment struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ RepoID int64 `xorm:"INDEX"` // this should not be zero
+ IssueID int64 `xorm:"INDEX"` // maybe zero when creating
+ ReleaseID int64 `xorm:"INDEX"` // maybe zero when creating
+ UploaderID int64 `xorm:"INDEX DEFAULT 0"` // Notice: will be zero before this column added
+ CommentID int64 `xorm:"INDEX"`
+ Name string
+ DownloadCount int64 `xorm:"DEFAULT 0"`
+ Size int64 `xorm:"DEFAULT 0"`
+ NoAutoTime bool `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ CustomDownloadURL string `xorm:"-"`
+ ExternalURL string
+}
+
+func init() {
+ db.RegisterModel(new(Attachment))
+}
+
+// IncreaseDownloadCount is update download count + 1
+func (a *Attachment) IncreaseDownloadCount(ctx context.Context) error {
+ // Update download count.
+ if _, err := db.GetEngine(ctx).Exec("UPDATE `attachment` SET download_count=download_count+1 WHERE id=?", a.ID); err != nil {
+ return fmt.Errorf("increase attachment count: %w", err)
+ }
+
+ return nil
+}
+
+// AttachmentRelativePath returns the relative path
+func AttachmentRelativePath(uuid string) string {
+ return path.Join(uuid[0:1], uuid[1:2], uuid)
+}
+
+// RelativePath returns the relative path of the attachment
+func (a *Attachment) RelativePath() string {
+ return AttachmentRelativePath(a.UUID)
+}
+
+// DownloadURL returns the download url of the attached file
+func (a *Attachment) DownloadURL() string {
+ if a.ExternalURL != "" {
+ return a.ExternalURL
+ }
+
+ if a.CustomDownloadURL != "" {
+ return a.CustomDownloadURL
+ }
+
+ return setting.AppURL + "attachments/" + url.PathEscape(a.UUID)
+}
+
+// ErrAttachmentNotExist represents a "AttachmentNotExist" kind of error.
+type ErrAttachmentNotExist struct {
+ ID int64
+ UUID string
+}
+
+// IsErrAttachmentNotExist checks if an error is a ErrAttachmentNotExist.
+func IsErrAttachmentNotExist(err error) bool {
+ _, ok := err.(ErrAttachmentNotExist)
+ return ok
+}
+
+func (err ErrAttachmentNotExist) Error() string {
+ return fmt.Sprintf("attachment does not exist [id: %d, uuid: %s]", err.ID, err.UUID)
+}
+
+func (err ErrAttachmentNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+type ErrInvalidExternalURL struct {
+ ExternalURL string
+}
+
+func IsErrInvalidExternalURL(err error) bool {
+ _, ok := err.(ErrInvalidExternalURL)
+ return ok
+}
+
+func (err ErrInvalidExternalURL) Error() string {
+ return fmt.Sprintf("invalid external URL: '%s'", err.ExternalURL)
+}
+
+func (err ErrInvalidExternalURL) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// GetAttachmentByID returns attachment by given id
+func GetAttachmentByID(ctx context.Context, id int64) (*Attachment, error) {
+ attach := &Attachment{}
+ if has, err := db.GetEngine(ctx).ID(id).Get(attach); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrAttachmentNotExist{ID: id, UUID: ""}
+ }
+ return attach, nil
+}
+
+// GetAttachmentByUUID returns attachment by given UUID.
+func GetAttachmentByUUID(ctx context.Context, uuid string) (*Attachment, error) {
+ attach := &Attachment{}
+ has, err := db.GetEngine(ctx).Where("uuid=?", uuid).Get(attach)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrAttachmentNotExist{0, uuid}
+ }
+ return attach, nil
+}
+
+// GetAttachmentsByUUIDs returns attachment by given UUID list.
+func GetAttachmentsByUUIDs(ctx context.Context, uuids []string) ([]*Attachment, error) {
+ if len(uuids) == 0 {
+ return []*Attachment{}, nil
+ }
+
+ // Silently drop invalid uuids.
+ attachments := make([]*Attachment, 0, len(uuids))
+ return attachments, db.GetEngine(ctx).In("uuid", uuids).Find(&attachments)
+}
+
+// ExistAttachmentsByUUID returns true if attachment exists with the given UUID
+func ExistAttachmentsByUUID(ctx context.Context, uuid string) (bool, error) {
+ return db.GetEngine(ctx).Where("`uuid`=?", uuid).Exist(new(Attachment))
+}
+
+// GetAttachmentsByIssueID returns all attachments of an issue.
+func GetAttachmentsByIssueID(ctx context.Context, issueID int64) ([]*Attachment, error) {
+ attachments := make([]*Attachment, 0, 10)
+ return attachments, db.GetEngine(ctx).Where("issue_id = ? AND comment_id = 0", issueID).Find(&attachments)
+}
+
+// GetAttachmentsByIssueIDImagesLatest returns the latest image attachments of an issue.
+func GetAttachmentsByIssueIDImagesLatest(ctx context.Context, issueID int64) ([]*Attachment, error) {
+ attachments := make([]*Attachment, 0, 5)
+ return attachments, db.GetEngine(ctx).Where(`issue_id = ? AND (name like '%.apng'
+ OR name like '%.avif'
+ OR name like '%.bmp'
+ OR name like '%.gif'
+ OR name like '%.jpg'
+ OR name like '%.jpeg'
+ OR name like '%.jxl'
+ OR name like '%.png'
+ OR name like '%.svg'
+ OR name like '%.webp')`, issueID).Desc("comment_id").Limit(5).Find(&attachments)
+}
+
+// GetAttachmentsByCommentID returns all attachments if comment by given ID.
+func GetAttachmentsByCommentID(ctx context.Context, commentID int64) ([]*Attachment, error) {
+ attachments := make([]*Attachment, 0, 10)
+ return attachments, db.GetEngine(ctx).Where("comment_id=?", commentID).Find(&attachments)
+}
+
+// GetAttachmentByReleaseIDFileName returns attachment by given releaseId and fileName.
+func GetAttachmentByReleaseIDFileName(ctx context.Context, releaseID int64, fileName string) (*Attachment, error) {
+ attach := &Attachment{ReleaseID: releaseID, Name: fileName}
+ has, err := db.GetEngine(ctx).Get(attach)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, err
+ }
+ return attach, nil
+}
+
+// DeleteAttachment deletes the given attachment and optionally the associated file.
+func DeleteAttachment(ctx context.Context, a *Attachment, remove bool) error {
+ _, err := DeleteAttachments(ctx, []*Attachment{a}, remove)
+ return err
+}
+
+// DeleteAttachments deletes the given attachments and optionally the associated files.
+func DeleteAttachments(ctx context.Context, attachments []*Attachment, remove bool) (int, error) {
+ if len(attachments) == 0 {
+ return 0, nil
+ }
+
+ ids := make([]int64, 0, len(attachments))
+ for _, a := range attachments {
+ ids = append(ids, a.ID)
+ }
+
+ cnt, err := db.GetEngine(ctx).In("id", ids).NoAutoCondition().Delete(attachments[0])
+ if err != nil {
+ return 0, err
+ }
+
+ if remove {
+ for i, a := range attachments {
+ if err := storage.Attachments.Delete(a.RelativePath()); err != nil {
+ return i, err
+ }
+ }
+ }
+ return int(cnt), nil
+}
+
+// DeleteAttachmentsByIssue deletes all attachments associated with the given issue.
+func DeleteAttachmentsByIssue(ctx context.Context, issueID int64, remove bool) (int, error) {
+ attachments, err := GetAttachmentsByIssueID(ctx, issueID)
+ if err != nil {
+ return 0, err
+ }
+
+ return DeleteAttachments(ctx, attachments, remove)
+}
+
+// DeleteAttachmentsByComment deletes all attachments associated with the given comment.
+func DeleteAttachmentsByComment(ctx context.Context, commentID int64, remove bool) (int, error) {
+ attachments, err := GetAttachmentsByCommentID(ctx, commentID)
+ if err != nil {
+ return 0, err
+ }
+
+ return DeleteAttachments(ctx, attachments, remove)
+}
+
+// UpdateAttachmentByUUID Updates attachment via uuid
+func UpdateAttachmentByUUID(ctx context.Context, attach *Attachment, cols ...string) error {
+ if attach.UUID == "" {
+ return fmt.Errorf("attachment uuid should be not blank")
+ }
+ if attach.ExternalURL != "" && !validation.IsValidExternalURL(attach.ExternalURL) {
+ return ErrInvalidExternalURL{ExternalURL: attach.ExternalURL}
+ }
+ _, err := db.GetEngine(ctx).Where("uuid=?", attach.UUID).Cols(cols...).Update(attach)
+ return err
+}
+
+// UpdateAttachment updates the given attachment in database
+func UpdateAttachment(ctx context.Context, atta *Attachment) error {
+ if atta.ExternalURL != "" && !validation.IsValidExternalURL(atta.ExternalURL) {
+ return ErrInvalidExternalURL{ExternalURL: atta.ExternalURL}
+ }
+ sess := db.GetEngine(ctx).Cols("name", "issue_id", "release_id", "comment_id", "download_count")
+ if atta.ID != 0 && atta.UUID == "" {
+ sess = sess.ID(atta.ID)
+ } else {
+ // Use uuid only if id is not set and uuid is set
+ sess = sess.Where("uuid = ?", atta.UUID)
+ }
+ _, err := sess.Update(atta)
+ return err
+}
+
+// DeleteAttachmentsByRelease deletes all attachments associated with the given release.
+func DeleteAttachmentsByRelease(ctx context.Context, releaseID int64) error {
+ _, err := db.GetEngine(ctx).Where("release_id = ?", releaseID).Delete(&Attachment{})
+ return err
+}
+
+// CountOrphanedAttachments returns the number of bad attachments
+func CountOrphanedAttachments(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where("(issue_id > 0 and issue_id not in (select id from issue)) or (release_id > 0 and release_id not in (select id from `release`))").
+ Count(new(Attachment))
+}
+
+// DeleteOrphanedAttachments delete all bad attachments
+func DeleteOrphanedAttachments(ctx context.Context) error {
+ _, err := db.GetEngine(ctx).Where("(issue_id > 0 and issue_id not in (select id from issue)) or (release_id > 0 and release_id not in (select id from `release`))").
+ Delete(new(Attachment))
+ return err
+}
diff --git a/models/repo/attachment_test.go b/models/repo/attachment_test.go
new file mode 100644
index 0000000..23945ba
--- /dev/null
+++ b/models/repo/attachment_test.go
@@ -0,0 +1,105 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIncreaseDownloadCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attachment, err := repo_model.GetAttachmentByUUID(db.DefaultContext, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), attachment.DownloadCount)
+
+ // increase download count
+ err = attachment.IncreaseDownloadCount(db.DefaultContext)
+ require.NoError(t, err)
+
+ attachment, err = repo_model.GetAttachmentByUUID(db.DefaultContext, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), attachment.DownloadCount)
+}
+
+func TestGetByCommentOrIssueID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // count of attachments from issue ID
+ attachments, err := repo_model.GetAttachmentsByIssueID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Len(t, attachments, 1)
+
+ attachments, err = repo_model.GetAttachmentsByCommentID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Len(t, attachments, 2)
+}
+
+func TestDeleteAttachments(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := repo_model.DeleteAttachmentsByIssue(db.DefaultContext, 4, false)
+ require.NoError(t, err)
+ assert.Equal(t, 2, count)
+
+ count, err = repo_model.DeleteAttachmentsByComment(db.DefaultContext, 2, false)
+ require.NoError(t, err)
+ assert.Equal(t, 2, count)
+
+ err = repo_model.DeleteAttachment(db.DefaultContext, &repo_model.Attachment{ID: 8}, false)
+ require.NoError(t, err)
+
+ attachment, err := repo_model.GetAttachmentByUUID(db.DefaultContext, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a18")
+ require.Error(t, err)
+ assert.True(t, repo_model.IsErrAttachmentNotExist(err))
+ assert.Nil(t, attachment)
+}
+
+func TestGetAttachmentByID(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attach, err := repo_model.GetAttachmentByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attach.UUID)
+}
+
+func TestAttachment_DownloadURL(t *testing.T) {
+ attach := &repo_model.Attachment{
+ UUID: "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11",
+ ID: 1,
+ }
+ assert.Equal(t, "https://try.gitea.io/attachments/a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attach.DownloadURL())
+}
+
+func TestUpdateAttachment(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attach, err := repo_model.GetAttachmentByID(db.DefaultContext, 1)
+ require.NoError(t, err)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attach.UUID)
+
+ attach.Name = "new_name"
+ require.NoError(t, repo_model.UpdateAttachment(db.DefaultContext, attach))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Attachment{Name: "new_name"})
+}
+
+func TestGetAttachmentsByUUIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ attachList, err := repo_model.GetAttachmentsByUUIDs(db.DefaultContext, []string{"a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17", "not-existing-uuid"})
+ require.NoError(t, err)
+ assert.Len(t, attachList, 2)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attachList[0].UUID)
+ assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17", attachList[1].UUID)
+ assert.Equal(t, int64(1), attachList[0].IssueID)
+ assert.Equal(t, int64(5), attachList[1].IssueID)
+}
diff --git a/models/repo/avatar.go b/models/repo/avatar.go
new file mode 100644
index 0000000..72ee938
--- /dev/null
+++ b/models/repo/avatar.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "image/png"
+ "io"
+ "net/url"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/avatar"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+)
+
+// CustomAvatarRelativePath returns repository custom avatar file path.
+func (repo *Repository) CustomAvatarRelativePath() string {
+ return repo.Avatar
+}
+
+// ExistsWithAvatarAtStoragePath returns true if there is a user with this Avatar
+func ExistsWithAvatarAtStoragePath(ctx context.Context, storagePath string) (bool, error) {
+ // See func (repo *Repository) CustomAvatarRelativePath()
+ // repo.Avatar is used directly as the storage path - therefore we can check for existence directly using the path
+ return db.GetEngine(ctx).Where("`avatar`=?", storagePath).Exist(new(Repository))
+}
+
+// RelAvatarLink returns a relative link to the repository's avatar.
+func (repo *Repository) RelAvatarLink(ctx context.Context) string {
+ return repo.relAvatarLink(ctx)
+}
+
+// generateRandomAvatar generates a random avatar for repository.
+func generateRandomAvatar(ctx context.Context, repo *Repository) error {
+ idToString := fmt.Sprintf("%d", repo.ID)
+
+ seed := idToString
+ img, err := avatar.RandomImage([]byte(seed))
+ if err != nil {
+ return fmt.Errorf("RandomImage: %w", err)
+ }
+
+ repo.Avatar = idToString
+
+ if err := storage.SaveFrom(storage.RepoAvatars, repo.CustomAvatarRelativePath(), func(w io.Writer) error {
+ if err := png.Encode(w, img); err != nil {
+ log.Error("Encode: %v", err)
+ }
+ return err
+ }); err != nil {
+ return fmt.Errorf("Failed to create dir %s: %w", repo.CustomAvatarRelativePath(), err)
+ }
+
+ log.Info("New random avatar created for repository: %d", repo.ID)
+
+ if _, err := db.GetEngine(ctx).ID(repo.ID).Cols("avatar").NoAutoTime().Update(repo); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (repo *Repository) relAvatarLink(ctx context.Context) string {
+ // If no avatar - path is empty
+ avatarPath := repo.CustomAvatarRelativePath()
+ if len(avatarPath) == 0 {
+ switch mode := setting.RepoAvatar.Fallback; mode {
+ case "image":
+ return setting.RepoAvatar.FallbackImage
+ case "random":
+ if err := generateRandomAvatar(ctx, repo); err != nil {
+ log.Error("generateRandomAvatar: %v", err)
+ }
+ default:
+ // default behaviour: do not display avatar
+ return ""
+ }
+ }
+ return setting.AppSubURL + "/repo-avatars/" + url.PathEscape(repo.Avatar)
+}
+
+// AvatarLink returns a link to the repository's avatar.
+func (repo *Repository) AvatarLink(ctx context.Context) string {
+ link := repo.relAvatarLink(ctx)
+ // we only prepend our AppURL to our known (relative, internal) avatar link to get an absolute URL
+ if strings.HasPrefix(link, "/") && !strings.HasPrefix(link, "//") {
+ return setting.AppURL + strings.TrimPrefix(link, setting.AppSubURL)[1:]
+ }
+ // otherwise, return the link as it is
+ return link
+}
diff --git a/models/repo/collaboration.go b/models/repo/collaboration.go
new file mode 100644
index 0000000..cb66cb5
--- /dev/null
+++ b/models/repo/collaboration.go
@@ -0,0 +1,170 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+// Collaboration represent the relation between an individual and a repository.
+type Collaboration struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Mode perm.AccessMode `xorm:"DEFAULT 2 NOT NULL"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(Collaboration))
+}
+
+// Collaborator represents a user with collaboration details.
+type Collaborator struct {
+ *user_model.User
+ Collaboration *Collaboration
+}
+
+// GetCollaborators returns the collaborators for a repository
+func GetCollaborators(ctx context.Context, repoID int64, listOptions db.ListOptions) ([]*Collaborator, error) {
+ collaborations, err := db.Find[Collaboration](ctx, FindCollaborationOptions{
+ ListOptions: listOptions,
+ RepoID: repoID,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("db.Find[Collaboration]: %w", err)
+ }
+
+ collaborators := make([]*Collaborator, 0, len(collaborations))
+ userIDs := make([]int64, 0, len(collaborations))
+ for _, c := range collaborations {
+ userIDs = append(userIDs, c.UserID)
+ }
+
+ usersMap := make(map[int64]*user_model.User)
+ if err := db.GetEngine(ctx).In("id", userIDs).Find(&usersMap); err != nil {
+ return nil, fmt.Errorf("Find users map by user ids: %w", err)
+ }
+
+ for _, c := range collaborations {
+ u := usersMap[c.UserID]
+ if u == nil {
+ u = user_model.NewGhostUser()
+ }
+ collaborators = append(collaborators, &Collaborator{
+ User: u,
+ Collaboration: c,
+ })
+ }
+ return collaborators, nil
+}
+
+// GetCollaboration get collaboration for a repository id with a user id
+func GetCollaboration(ctx context.Context, repoID, uid int64) (*Collaboration, error) {
+ collaboration := &Collaboration{
+ RepoID: repoID,
+ UserID: uid,
+ }
+ has, err := db.GetEngine(ctx).Get(collaboration)
+ if !has {
+ collaboration = nil
+ }
+ return collaboration, err
+}
+
+// IsCollaborator check if a user is a collaborator of a repository
+func IsCollaborator(ctx context.Context, repoID, userID int64) (bool, error) {
+ return db.GetEngine(ctx).Get(&Collaboration{RepoID: repoID, UserID: userID})
+}
+
+type FindCollaborationOptions struct {
+ db.ListOptions
+ RepoID int64
+}
+
+func (opts FindCollaborationOptions) ToConds() builder.Cond {
+ return builder.And(builder.Eq{"repo_id": opts.RepoID})
+}
+
+// ChangeCollaborationAccessMode sets new access mode for the collaboration.
+func ChangeCollaborationAccessMode(ctx context.Context, repo *Repository, uid int64, mode perm.AccessMode) error {
+ // Discard invalid input
+ if mode <= perm.AccessModeNone || mode > perm.AccessModeOwner {
+ return nil
+ }
+
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ e := db.GetEngine(ctx)
+
+ collaboration := &Collaboration{
+ RepoID: repo.ID,
+ UserID: uid,
+ }
+ has, err := e.Get(collaboration)
+ if err != nil {
+ return fmt.Errorf("get collaboration: %w", err)
+ } else if !has {
+ return nil
+ }
+
+ if collaboration.Mode == mode {
+ return nil
+ }
+ collaboration.Mode = mode
+
+ if _, err = e.
+ ID(collaboration.ID).
+ Cols("mode").
+ Update(collaboration); err != nil {
+ return fmt.Errorf("update collaboration: %w", err)
+ } else if _, err = e.Exec("UPDATE access SET mode = ? WHERE user_id = ? AND repo_id = ?", mode, uid, repo.ID); err != nil {
+ return fmt.Errorf("update access table: %w", err)
+ }
+
+ return nil
+ })
+}
+
+// GetCollaboratorWithUser returns all collaborator IDs of collabUserID on
+// repositories of ownerID.
+func GetCollaboratorWithUser(ctx context.Context, ownerID, collabUserID int64) ([]int64, error) {
+ collabsID := make([]int64, 0, 8)
+ err := db.GetEngine(ctx).Table("collaboration").Select("collaboration.`id`").
+ Join("INNER", "repository", "repository.id = collaboration.repo_id").
+ Where("repository.`owner_id` = ?", ownerID).
+ And("collaboration.`user_id` = ?", collabUserID).
+ Find(&collabsID)
+
+ return collabsID, err
+}
+
+// IsOwnerMemberCollaborator checks if a provided user is the owner, a collaborator or a member of a team in a repository
+func IsOwnerMemberCollaborator(ctx context.Context, repo *Repository, userID int64) (bool, error) {
+ if repo.OwnerID == userID {
+ return true, nil
+ }
+ teamMember, err := db.GetEngine(ctx).Join("INNER", "team_repo", "team_repo.team_id = team_user.team_id").
+ Join("INNER", "team_unit", "team_unit.team_id = team_user.team_id").
+ Where("team_repo.repo_id = ?", repo.ID).
+ And("team_unit.`type` = ?", unit.TypeCode).
+ And("team_user.uid = ?", userID).Table("team_user").Exist()
+ if err != nil {
+ return false, err
+ }
+ if teamMember {
+ return true, nil
+ }
+
+ return db.GetEngine(ctx).Get(&Collaboration{RepoID: repo.ID, UserID: userID})
+}
diff --git a/models/repo/collaboration_test.go b/models/repo/collaboration_test.go
new file mode 100644
index 0000000..5adedfe
--- /dev/null
+++ b/models/repo/collaboration_test.go
@@ -0,0 +1,186 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepository_GetCollaborators(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ test := func(repoID int64) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ collaborators, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{})
+ require.NoError(t, err)
+ expectedLen, err := db.GetEngine(db.DefaultContext).Count(&repo_model.Collaboration{RepoID: repoID})
+ require.NoError(t, err)
+ assert.Len(t, collaborators, int(expectedLen))
+ for _, collaborator := range collaborators {
+ assert.EqualValues(t, collaborator.User.ID, collaborator.Collaboration.UserID)
+ assert.EqualValues(t, repoID, collaborator.Collaboration.RepoID)
+ }
+ }
+ test(1)
+ test(2)
+ test(3)
+ test(4)
+
+ // Test db.ListOptions
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 22})
+
+ collaborators1, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{PageSize: 1, Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, collaborators1, 1)
+
+ collaborators2, err := repo_model.GetCollaborators(db.DefaultContext, repo.ID, db.ListOptions{PageSize: 1, Page: 2})
+ require.NoError(t, err)
+ assert.Len(t, collaborators2, 1)
+
+ assert.NotEqualValues(t, collaborators1[0].ID, collaborators2[0].ID)
+}
+
+func TestRepository_IsCollaborator(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ test := func(repoID, userID int64, expected bool) {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
+ actual, err := repo_model.IsCollaborator(db.DefaultContext, repo.ID, userID)
+ require.NoError(t, err)
+ assert.Equal(t, expected, actual)
+ }
+ test(3, 2, true)
+ test(3, unittest.NonexistentID, false)
+ test(4, 2, false)
+ test(4, 4, true)
+}
+
+func TestRepository_ChangeCollaborationAccessMode(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, 4, perm.AccessModeAdmin))
+
+ collaboration := unittest.AssertExistsAndLoadBean(t, &repo_model.Collaboration{RepoID: repo.ID, UserID: 4})
+ assert.EqualValues(t, perm.AccessModeAdmin, collaboration.Mode)
+
+ access := unittest.AssertExistsAndLoadBean(t, &access_model.Access{UserID: 4, RepoID: repo.ID})
+ assert.EqualValues(t, perm.AccessModeAdmin, access.Mode)
+
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, 4, perm.AccessModeAdmin))
+
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, unittest.NonexistentID, perm.AccessModeAdmin))
+
+ // Disvard invalid input.
+ require.NoError(t, repo_model.ChangeCollaborationAccessMode(db.DefaultContext, repo, 4, perm.AccessMode(unittest.NonexistentID)))
+
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: repo.ID})
+}
+
+func TestRepository_CountCollaborators(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ count, err := db.Count[repo_model.Collaboration](db.DefaultContext, repo_model.FindCollaborationOptions{
+ RepoID: repo1.ID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 2, count)
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 22})
+ count, err = db.Count[repo_model.Collaboration](db.DefaultContext, repo_model.FindCollaborationOptions{
+ RepoID: repo2.ID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 2, count)
+
+ // Non-existent repository.
+ count, err = db.Count[repo_model.Collaboration](db.DefaultContext, repo_model.FindCollaborationOptions{
+ RepoID: unittest.NonexistentID,
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, 0, count)
+}
+
+func TestRepository_IsOwnerMemberCollaborator(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+
+ // Organisation owner.
+ actual, err := repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo1, 2)
+ require.NoError(t, err)
+ assert.True(t, actual)
+
+ // Team member.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo1, 4)
+ require.NoError(t, err)
+ assert.True(t, actual)
+
+ // Normal user.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo1, 1)
+ require.NoError(t, err)
+ assert.False(t, actual)
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+
+ // Collaborator.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo2, 4)
+ require.NoError(t, err)
+ assert.True(t, actual)
+
+ repo3 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 15})
+
+ // Repository owner.
+ actual, err = repo_model.IsOwnerMemberCollaborator(db.DefaultContext, repo3, 2)
+ require.NoError(t, err)
+ assert.True(t, actual)
+}
+
+func TestRepo_GetCollaboration(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+
+ // Existing collaboration.
+ collab, err := repo_model.GetCollaboration(db.DefaultContext, repo.ID, 4)
+ require.NoError(t, err)
+ assert.NotNil(t, collab)
+ assert.EqualValues(t, 4, collab.UserID)
+ assert.EqualValues(t, 4, collab.RepoID)
+
+ // Non-existing collaboration.
+ collab, err = repo_model.GetCollaboration(db.DefaultContext, repo.ID, 1)
+ require.NoError(t, err)
+ assert.Nil(t, collab)
+}
+
+func TestGetCollaboratorWithUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user16 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 16})
+ user15 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 15})
+ user18 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 18})
+
+ collabs, err := repo_model.GetCollaboratorWithUser(db.DefaultContext, user16.ID, user15.ID)
+ require.NoError(t, err)
+ assert.Len(t, collabs, 2)
+ assert.EqualValues(t, 5, collabs[0])
+ assert.EqualValues(t, 7, collabs[1])
+
+ collabs, err = repo_model.GetCollaboratorWithUser(db.DefaultContext, user16.ID, user18.ID)
+ require.NoError(t, err)
+ assert.Len(t, collabs, 2)
+ assert.EqualValues(t, 6, collabs[0])
+ assert.EqualValues(t, 8, collabs[1])
+}
diff --git a/models/repo/following_repo.go b/models/repo/following_repo.go
new file mode 100644
index 0000000..85b96aa
--- /dev/null
+++ b/models/repo/following_repo.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "code.gitea.io/gitea/modules/validation"
+)
+
+// FollowingRepo represents a federated Repository Actor connected with a local Repo
+type FollowingRepo struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ ExternalID string `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ FederationHostID int64 `xorm:"UNIQUE(federation_repo_mapping) NOT NULL"`
+ URI string
+}
+
+func NewFollowingRepo(repoID int64, externalID string, federationHostID int64, uri string) (FollowingRepo, error) {
+ result := FollowingRepo{
+ RepoID: repoID,
+ ExternalID: externalID,
+ FederationHostID: federationHostID,
+ URI: uri,
+ }
+ if valid, err := validation.IsValid(result); !valid {
+ return FollowingRepo{}, err
+ }
+ return result, nil
+}
+
+func (user FollowingRepo) Validate() []string {
+ var result []string
+ result = append(result, validation.ValidateNotEmpty(user.RepoID, "UserID")...)
+ result = append(result, validation.ValidateNotEmpty(user.ExternalID, "ExternalID")...)
+ result = append(result, validation.ValidateNotEmpty(user.FederationHostID, "FederationHostID")...)
+ result = append(result, validation.ValidateNotEmpty(user.URI, "Uri")...)
+ return result
+}
diff --git a/models/repo/following_repo_test.go b/models/repo/following_repo_test.go
new file mode 100644
index 0000000..d0dd0a3
--- /dev/null
+++ b/models/repo/following_repo_test.go
@@ -0,0 +1,31 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func Test_FollowingRepoValidation(t *testing.T) {
+ sut := FollowingRepo{
+ RepoID: 12,
+ ExternalID: "12",
+ FederationHostID: 1,
+ URI: "http://localhost:3000/api/v1/activitypub/repo-id/1",
+ }
+ if res, err := validation.IsValid(sut); !res {
+ t.Errorf("sut should be valid but was %q", err)
+ }
+
+ sut = FollowingRepo{
+ ExternalID: "12",
+ FederationHostID: 1,
+ URI: "http://localhost:3000/api/v1/activitypub/repo-id/1",
+ }
+ if res, _ := validation.IsValid(sut); res {
+ t.Errorf("sut should be invalid")
+ }
+}
diff --git a/models/repo/fork.go b/models/repo/fork.go
new file mode 100644
index 0000000..632e91c
--- /dev/null
+++ b/models/repo/fork.go
@@ -0,0 +1,120 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "xorm.io/builder"
+)
+
+// GetRepositoriesByForkID returns all repositories with given fork ID.
+func GetRepositoriesByForkID(ctx context.Context, forkID int64) ([]*Repository, error) {
+ repos := make([]*Repository, 0, 10)
+ return repos, db.GetEngine(ctx).
+ Where("fork_id=?", forkID).
+ Find(&repos)
+}
+
+// GetForkedRepo checks if given user has already forked a repository with given ID.
+func GetForkedRepo(ctx context.Context, ownerID, repoID int64) *Repository {
+ repo := new(Repository)
+ has, _ := db.GetEngine(ctx).
+ Where("owner_id=? AND fork_id=?", ownerID, repoID).
+ Get(repo)
+ if has {
+ return repo
+ }
+ return nil
+}
+
+// HasForkedRepo checks if given user has already forked a repository with given ID.
+func HasForkedRepo(ctx context.Context, ownerID, repoID int64) bool {
+ has, _ := db.GetEngine(ctx).
+ Table("repository").
+ Where("owner_id=? AND fork_id=?", ownerID, repoID).
+ Exist()
+ return has
+}
+
+// GetUserFork return user forked repository from this repository, if not forked return nil
+func GetUserFork(ctx context.Context, repoID, userID int64) (*Repository, error) {
+ var forkedRepo Repository
+ has, err := db.GetEngine(ctx).Where("fork_id = ?", repoID).And("owner_id = ?", userID).Get(&forkedRepo)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, nil
+ }
+ return &forkedRepo, nil
+}
+
+// GetForks returns all the forks of the repository that are visible to the user.
+func GetForks(ctx context.Context, repo *Repository, user *user_model.User, listOptions db.ListOptions) ([]*Repository, int64, error) {
+ sess := db.GetEngine(ctx).Where(AccessibleRepositoryCondition(user, unit.TypeInvalid))
+
+ var forks []*Repository
+ if listOptions.Page == 0 {
+ forks = make([]*Repository, 0, repo.NumForks)
+ } else {
+ forks = make([]*Repository, 0, listOptions.PageSize)
+ sess = db.SetSessionPagination(sess, &listOptions)
+ }
+
+ count, err := sess.FindAndCount(&forks, &Repository{ForkID: repo.ID})
+ return forks, count, err
+}
+
+// IncrementRepoForkNum increment repository fork number
+func IncrementRepoForkNum(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", repoID)
+ return err
+}
+
+// DecrementRepoForkNum decrement repository fork number
+func DecrementRepoForkNum(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repoID)
+ return err
+}
+
+// FindUserOrgForks returns the forked repositories for one user from a repository
+func FindUserOrgForks(ctx context.Context, repoID, userID int64) ([]*Repository, error) {
+ cond := builder.And(
+ builder.Eq{"fork_id": repoID},
+ builder.In("owner_id",
+ builder.Select("org_id").
+ From("org_user").
+ Where(builder.Eq{"uid": userID}),
+ ),
+ )
+
+ var repos []*Repository
+ return repos, db.GetEngine(ctx).Table("repository").Where(cond).Find(&repos)
+}
+
+// GetForksByUserAndOrgs return forked repos of the user and owned orgs
+func GetForksByUserAndOrgs(ctx context.Context, user *user_model.User, repo *Repository) ([]*Repository, error) {
+ var repoList []*Repository
+ if user == nil {
+ return repoList, nil
+ }
+ forkedRepo, err := GetUserFork(ctx, repo.ID, user.ID)
+ if err != nil {
+ return repoList, err
+ }
+ if forkedRepo != nil {
+ repoList = append(repoList, forkedRepo)
+ }
+ orgForks, err := FindUserOrgForks(ctx, repo.ID, user.ID)
+ if err != nil {
+ return nil, err
+ }
+ repoList = append(repoList, orgForks...)
+ return repoList, nil
+}
diff --git a/models/repo/fork_test.go b/models/repo/fork_test.go
new file mode 100644
index 0000000..dd12429
--- /dev/null
+++ b/models/repo/fork_test.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetUserFork(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // User13 has repo 11 forked from repo10
+ repo, err := repo_model.GetRepositoryByID(db.DefaultContext, 10)
+ require.NoError(t, err)
+ assert.NotNil(t, repo)
+ repo, err = repo_model.GetUserFork(db.DefaultContext, repo.ID, 13)
+ require.NoError(t, err)
+ assert.NotNil(t, repo)
+
+ repo, err = repo_model.GetRepositoryByID(db.DefaultContext, 9)
+ require.NoError(t, err)
+ assert.NotNil(t, repo)
+ repo, err = repo_model.GetUserFork(db.DefaultContext, repo.ID, 13)
+ require.NoError(t, err)
+ assert.Nil(t, repo)
+}
diff --git a/models/repo/git.go b/models/repo/git.go
new file mode 100644
index 0000000..388bf86
--- /dev/null
+++ b/models/repo/git.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+)
+
+// MergeStyle represents the approach to merge commits into base branch.
+type MergeStyle string
+
+const (
+ // MergeStyleMerge create merge commit
+ MergeStyleMerge MergeStyle = "merge"
+ // MergeStyleRebase rebase before merging, and fast-forward
+ MergeStyleRebase MergeStyle = "rebase"
+ // MergeStyleRebaseMerge rebase before merging with merge commit (--no-ff)
+ MergeStyleRebaseMerge MergeStyle = "rebase-merge"
+ // MergeStyleSquash squash commits into single commit before merging
+ MergeStyleSquash MergeStyle = "squash"
+ // MergeStyleFastForwardOnly fast-forward merge if possible, otherwise fail
+ MergeStyleFastForwardOnly MergeStyle = "fast-forward-only"
+ // MergeStyleManuallyMerged pr has been merged manually, just mark it as merged directly
+ MergeStyleManuallyMerged MergeStyle = "manually-merged"
+ // MergeStyleRebaseUpdate not a merge style, used to update pull head by rebase
+ MergeStyleRebaseUpdate MergeStyle = "rebase-update-only"
+)
+
+// UpdateDefaultBranch updates the default branch
+func UpdateDefaultBranch(ctx context.Context, repo *Repository) error {
+ _, err := db.GetEngine(ctx).ID(repo.ID).Cols("default_branch").Update(repo)
+ return err
+}
diff --git a/models/repo/issue.go b/models/repo/issue.go
new file mode 100644
index 0000000..0dd4fd5
--- /dev/null
+++ b/models/repo/issue.go
@@ -0,0 +1,60 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// ___________.__ ___________ __
+// \__ ___/|__| _____ ___\__ ___/___________ ____ | | __ ___________
+// | | | |/ \_/ __ \| | \_ __ \__ \ _/ ___\| |/ // __ \_ __ \
+// | | | | Y Y \ ___/| | | | \// __ \\ \___| <\ ___/| | \/
+// |____| |__|__|_| /\___ >____| |__| (____ /\___ >__|_ \\___ >__|
+// \/ \/ \/ \/ \/ \/
+
+// CanEnableTimetracker returns true when the server admin enabled time tracking
+// This overrules IsTimetrackerEnabled
+func (repo *Repository) CanEnableTimetracker() bool {
+ return setting.Service.EnableTimetracking
+}
+
+// IsTimetrackerEnabled returns whether or not the timetracker is enabled. It returns the default value from config if an error occurs.
+func (repo *Repository) IsTimetrackerEnabled(ctx context.Context) bool {
+ if !setting.Service.EnableTimetracking {
+ return false
+ }
+
+ var u *RepoUnit
+ var err error
+ if u, err = repo.GetUnit(ctx, unit.TypeIssues); err != nil {
+ return setting.Service.DefaultEnableTimetracking
+ }
+ return u.IssuesConfig().EnableTimetracker
+}
+
+// AllowOnlyContributorsToTrackTime returns value of IssuesConfig or the default value
+func (repo *Repository) AllowOnlyContributorsToTrackTime(ctx context.Context) bool {
+ var u *RepoUnit
+ var err error
+ if u, err = repo.GetUnit(ctx, unit.TypeIssues); err != nil {
+ return setting.Service.DefaultAllowOnlyContributorsToTrackTime
+ }
+ return u.IssuesConfig().AllowOnlyContributorsToTrackTime
+}
+
+// IsDependenciesEnabled returns if dependencies are enabled and returns the default setting if not set.
+func (repo *Repository) IsDependenciesEnabled(ctx context.Context) bool {
+ var u *RepoUnit
+ var err error
+ if u, err = repo.GetUnit(ctx, unit.TypeIssues); err != nil {
+ log.Trace("IsDependenciesEnabled: %v", err)
+ return setting.Service.DefaultEnableDependencies
+ }
+ return u.IssuesConfig().EnableDependencies
+}
diff --git a/models/repo/language_stats.go b/models/repo/language_stats.go
new file mode 100644
index 0000000..0bc0f1f
--- /dev/null
+++ b/models/repo/language_stats.go
@@ -0,0 +1,242 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "math"
+ "sort"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/go-enry/go-enry/v2"
+)
+
+// LanguageStat describes language statistics of a repository
+type LanguageStat struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ CommitID string
+ IsPrimary bool
+ Language string `xorm:"VARCHAR(50) UNIQUE(s) INDEX NOT NULL"`
+ Percentage float32 `xorm:"-"`
+ Size int64 `xorm:"NOT NULL DEFAULT 0"`
+ Color string `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
+}
+
+func init() {
+ db.RegisterModel(new(LanguageStat))
+}
+
+// LanguageStatList defines a list of language statistics
+type LanguageStatList []*LanguageStat
+
+// LoadAttributes loads attributes
+func (stats LanguageStatList) LoadAttributes() {
+ for i := range stats {
+ stats[i].Color = enry.GetColor(stats[i].Language)
+ }
+}
+
+func (stats LanguageStatList) getLanguagePercentages() map[string]float32 {
+ langPerc := make(map[string]float32)
+ var otherPerc float32
+ var total int64
+
+ for _, stat := range stats {
+ total += stat.Size
+ }
+ if total > 0 {
+ for _, stat := range stats {
+ perc := float32(float64(stat.Size) / float64(total) * 100)
+ if perc <= 0.1 {
+ otherPerc += perc
+ continue
+ }
+ langPerc[stat.Language] = perc
+ }
+ }
+ if otherPerc > 0 {
+ langPerc["other"] = otherPerc
+ }
+ roundByLargestRemainder(langPerc, 100)
+ return langPerc
+}
+
+// Rounds to 1 decimal point, target should be the expected sum of percs
+func roundByLargestRemainder(percs map[string]float32, target float32) {
+ leftToDistribute := int(target * 10)
+
+ keys := make([]string, 0, len(percs))
+
+ for k, v := range percs {
+ percs[k] = v * 10
+ floored := math.Floor(float64(percs[k]))
+ leftToDistribute -= int(floored)
+ keys = append(keys, k)
+ }
+
+ // Sort the keys by the largest remainder
+ sort.SliceStable(keys, func(i, j int) bool {
+ _, remainderI := math.Modf(float64(percs[keys[i]]))
+ _, remainderJ := math.Modf(float64(percs[keys[j]]))
+ return remainderI > remainderJ
+ })
+
+ // Increment the values in order of largest remainder
+ for _, k := range keys {
+ percs[k] = float32(math.Floor(float64(percs[k])))
+ if leftToDistribute > 0 {
+ percs[k]++
+ leftToDistribute--
+ }
+ percs[k] /= 10
+ }
+}
+
+// GetLanguageStats returns the language statistics for a repository
+func GetLanguageStats(ctx context.Context, repo *Repository) (LanguageStatList, error) {
+ stats := make(LanguageStatList, 0, 6)
+ if err := db.GetEngine(ctx).Where("`repo_id` = ?", repo.ID).Desc("`size`").Find(&stats); err != nil {
+ return nil, err
+ }
+ return stats, nil
+}
+
+// GetTopLanguageStats returns the top language statistics for a repository
+func GetTopLanguageStats(ctx context.Context, repo *Repository, limit int) (LanguageStatList, error) {
+ stats, err := GetLanguageStats(ctx, repo)
+ if err != nil {
+ return nil, err
+ }
+ perc := stats.getLanguagePercentages()
+ topstats := make(LanguageStatList, 0, limit)
+ var other float32
+ for i := range stats {
+ if _, ok := perc[stats[i].Language]; !ok {
+ continue
+ }
+ if stats[i].Language == "other" || len(topstats) >= limit {
+ other += perc[stats[i].Language]
+ continue
+ }
+ stats[i].Percentage = perc[stats[i].Language]
+ topstats = append(topstats, stats[i])
+ }
+ if other > 0 {
+ topstats = append(topstats, &LanguageStat{
+ RepoID: repo.ID,
+ Language: "other",
+ Color: "#cccccc",
+ Percentage: float32(math.Round(float64(other)*10) / 10),
+ })
+ }
+ topstats.LoadAttributes()
+ return topstats, nil
+}
+
+// UpdateLanguageStats updates the language statistics for repository
+func UpdateLanguageStats(ctx context.Context, repo *Repository, commitID string, stats map[string]int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ oldstats, err := GetLanguageStats(ctx, repo)
+ if err != nil {
+ return err
+ }
+ var topLang string
+ var s int64
+ for lang, size := range stats {
+ if size > s {
+ s = size
+ topLang = strings.ToLower(lang)
+ }
+ }
+
+ for lang, size := range stats {
+ upd := false
+ llang := strings.ToLower(lang)
+ for _, s := range oldstats {
+ // Update already existing language
+ if strings.ToLower(s.Language) == llang {
+ s.CommitID = commitID
+ s.IsPrimary = llang == topLang
+ s.Size = size
+ if _, err := sess.ID(s.ID).Cols("`commit_id`", "`size`", "`is_primary`").Update(s); err != nil {
+ return err
+ }
+ upd = true
+ break
+ }
+ }
+ // Insert new language
+ if !upd {
+ if err := db.Insert(ctx, &LanguageStat{
+ RepoID: repo.ID,
+ CommitID: commitID,
+ IsPrimary: llang == topLang,
+ Language: lang,
+ Size: size,
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ // Delete old languages
+ statsToDelete := make([]int64, 0, len(oldstats))
+ for _, s := range oldstats {
+ if s.CommitID != commitID {
+ statsToDelete = append(statsToDelete, s.ID)
+ }
+ }
+ if len(statsToDelete) > 0 {
+ if _, err := sess.In("`id`", statsToDelete).Delete(&LanguageStat{}); err != nil {
+ return err
+ }
+ }
+
+ // Update indexer status
+ if err = UpdateIndexerStatus(ctx, repo, RepoIndexerTypeStats, commitID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// CopyLanguageStat Copy originalRepo language stat information to destRepo (use for forked repo)
+func CopyLanguageStat(ctx context.Context, originalRepo, destRepo *Repository) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ RepoLang := make(LanguageStatList, 0, 6)
+ if err := db.GetEngine(ctx).Where("`repo_id` = ?", originalRepo.ID).Desc("`size`").Find(&RepoLang); err != nil {
+ return err
+ }
+ if len(RepoLang) > 0 {
+ for i := range RepoLang {
+ RepoLang[i].ID = 0
+ RepoLang[i].RepoID = destRepo.ID
+ RepoLang[i].CreatedUnix = timeutil.TimeStampNow()
+ }
+ // update destRepo's indexer status
+ tmpCommitID := RepoLang[0].CommitID
+ if err := UpdateIndexerStatus(ctx, destRepo, RepoIndexerTypeStats, tmpCommitID); err != nil {
+ return err
+ }
+ if err := db.Insert(ctx, &RepoLang); err != nil {
+ return err
+ }
+ }
+ return committer.Commit()
+}
diff --git a/models/repo/main_test.go b/models/repo/main_test.go
new file mode 100644
index 0000000..b49855f
--- /dev/null
+++ b/models/repo/main_test.go
@@ -0,0 +1,21 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models" // register table model
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+ _ "code.gitea.io/gitea/models/perm/access" // register table model
+ _ "code.gitea.io/gitea/models/repo" // register table model
+ _ "code.gitea.io/gitea/models/user" // register table model
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/repo/mirror.go b/models/repo/mirror.go
new file mode 100644
index 0000000..be7b785
--- /dev/null
+++ b/models/repo/mirror.go
@@ -0,0 +1,123 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrMirrorNotExist mirror does not exist error
+var ErrMirrorNotExist = util.NewNotExistErrorf("Mirror does not exist")
+
+// Mirror represents mirror information of a repository.
+type Mirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ Repo *Repository `xorm:"-"`
+ Interval time.Duration
+ EnablePrune bool `xorm:"NOT NULL DEFAULT true"`
+
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX"`
+ NextUpdateUnix timeutil.TimeStamp `xorm:"INDEX"`
+
+ LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
+ LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
+
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+}
+
+func init() {
+ db.RegisterModel(new(Mirror))
+}
+
+// BeforeInsert will be invoked by XORM before inserting a record
+func (m *Mirror) BeforeInsert() {
+ if m != nil {
+ m.UpdatedUnix = timeutil.TimeStampNow()
+ m.NextUpdateUnix = timeutil.TimeStampNow()
+ }
+}
+
+// GetRepository returns the repository.
+func (m *Mirror) GetRepository(ctx context.Context) *Repository {
+ if m.Repo != nil {
+ return m.Repo
+ }
+ var err error
+ m.Repo, err = GetRepositoryByID(ctx, m.RepoID)
+ if err != nil {
+ log.Error("getRepositoryByID[%d]: %v", m.ID, err)
+ }
+ return m.Repo
+}
+
+// GetRemoteName returns the name of the remote.
+func (m *Mirror) GetRemoteName() string {
+ return "origin"
+}
+
+// ScheduleNextUpdate calculates and sets next update time.
+func (m *Mirror) ScheduleNextUpdate() {
+ if m.Interval != 0 {
+ m.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(m.Interval)
+ } else {
+ m.NextUpdateUnix = 0
+ }
+}
+
+// GetMirrorByRepoID returns mirror information of a repository.
+func GetMirrorByRepoID(ctx context.Context, repoID int64) (*Mirror, error) {
+ m := &Mirror{RepoID: repoID}
+ has, err := db.GetEngine(ctx).Get(m)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrMirrorNotExist
+ }
+ return m, nil
+}
+
+// UpdateMirror updates the mirror
+func UpdateMirror(ctx context.Context, m *Mirror) error {
+ _, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
+ return err
+}
+
+// TouchMirror updates the mirror updatedUnix
+func TouchMirror(ctx context.Context, m *Mirror) error {
+ m.UpdatedUnix = timeutil.TimeStampNow()
+ _, err := db.GetEngine(ctx).ID(m.ID).Cols("updated_unix").Update(m)
+ return err
+}
+
+// DeleteMirrorByRepoID deletes a mirror by repoID
+func DeleteMirrorByRepoID(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Delete(&Mirror{RepoID: repoID})
+ return err
+}
+
+// MirrorsIterate iterates all mirror repositories.
+func MirrorsIterate(ctx context.Context, limit int, f func(idx int, bean any) error) error {
+ sess := db.GetEngine(ctx).
+ Where("next_update_unix<=?", time.Now().Unix()).
+ And("next_update_unix!=0").
+ OrderBy("updated_unix ASC")
+ if limit > 0 {
+ sess = sess.Limit(limit)
+ }
+ return sess.Iterate(new(Mirror), f)
+}
+
+// InsertMirror inserts a mirror to database
+func InsertMirror(ctx context.Context, mirror *Mirror) error {
+ _, err := db.GetEngine(ctx).Insert(mirror)
+ return err
+}
diff --git a/models/repo/pushmirror.go b/models/repo/pushmirror.go
new file mode 100644
index 0000000..68fb504
--- /dev/null
+++ b/models/repo/pushmirror.go
@@ -0,0 +1,188 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/git"
+ giturl "code.gitea.io/gitea/modules/git/url"
+ "code.gitea.io/gitea/modules/keying"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrPushMirrorNotExist mirror does not exist error
+var ErrPushMirrorNotExist = util.NewNotExistErrorf("PushMirror does not exist")
+
+// PushMirror represents mirror information of a repository.
+type PushMirror struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX"`
+ Repo *Repository `xorm:"-"`
+ RemoteName string
+ RemoteAddress string `xorm:"VARCHAR(2048)"`
+
+ // A keypair formatted in OpenSSH format.
+ PublicKey string `xorm:"VARCHAR(100)"`
+ PrivateKey []byte `xorm:"BLOB"`
+
+ SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"`
+ Interval time.Duration
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"`
+ LastError string `xorm:"text"`
+}
+
+type PushMirrorOptions struct {
+ db.ListOptions
+ ID int64
+ RepoID int64
+ RemoteName string
+}
+
+func (opts PushMirrorOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+ if opts.RemoteName != "" {
+ cond = cond.And(builder.Eq{"remote_name": opts.RemoteName})
+ }
+ if opts.ID > 0 {
+ cond = cond.And(builder.Eq{"id": opts.ID})
+ }
+ return cond
+}
+
+func init() {
+ db.RegisterModel(new(PushMirror))
+}
+
+// GetRepository returns the path of the repository.
+func (m *PushMirror) GetRepository(ctx context.Context) *Repository {
+ if m.Repo != nil {
+ return m.Repo
+ }
+ var err error
+ m.Repo, err = GetRepositoryByID(ctx, m.RepoID)
+ if err != nil {
+ log.Error("getRepositoryByID[%d]: %v", m.ID, err)
+ }
+ return m.Repo
+}
+
+// GetRemoteName returns the name of the remote.
+func (m *PushMirror) GetRemoteName() string {
+ return m.RemoteName
+}
+
+// GetPublicKey returns a sanitized version of the public key.
+// This should only be used when displaying the public key to the user, not for actual code.
+func (m *PushMirror) GetPublicKey() string {
+ return strings.TrimSuffix(m.PublicKey, "\n")
+}
+
+// SetPrivatekey encrypts the given private key and store it in the database.
+// The ID of the push mirror must be known, so this should be done after the
+// push mirror is inserted.
+func (m *PushMirror) SetPrivatekey(ctx context.Context, privateKey []byte) error {
+ key := keying.DeriveKey(keying.ContextPushMirror)
+ m.PrivateKey = key.Encrypt(privateKey, keying.ColumnAndID("private_key", m.ID))
+
+ _, err := db.GetEngine(ctx).ID(m.ID).Cols("private_key").Update(m)
+ return err
+}
+
+// Privatekey retrieves the encrypted private key and decrypts it.
+func (m *PushMirror) Privatekey() ([]byte, error) {
+ key := keying.DeriveKey(keying.ContextPushMirror)
+ return key.Decrypt(m.PrivateKey, keying.ColumnAndID("private_key", m.ID))
+}
+
+// UpdatePushMirror updates the push-mirror
+func UpdatePushMirror(ctx context.Context, m *PushMirror) error {
+ _, err := db.GetEngine(ctx).ID(m.ID).AllCols().Update(m)
+ return err
+}
+
+// UpdatePushMirrorInterval updates the push-mirror
+func UpdatePushMirrorInterval(ctx context.Context, m *PushMirror) error {
+ _, err := db.GetEngine(ctx).ID(m.ID).Cols("interval").Update(m)
+ return err
+}
+
+var DeletePushMirrors = deletePushMirrors
+
+func deletePushMirrors(ctx context.Context, opts PushMirrorOptions) error {
+ if opts.RepoID > 0 {
+ _, err := db.Delete[PushMirror](ctx, opts)
+ return err
+ }
+ return util.NewInvalidArgumentErrorf("repoID required and must be set")
+}
+
+// GetPushMirrorsByRepoID returns push-mirror information of a repository.
+func GetPushMirrorsByRepoID(ctx context.Context, repoID int64, listOptions db.ListOptions) ([]*PushMirror, int64, error) {
+ sess := db.GetEngine(ctx).Where("repo_id = ?", repoID)
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ mirrors := make([]*PushMirror, 0, listOptions.PageSize)
+ count, err := sess.FindAndCount(&mirrors)
+ return mirrors, count, err
+ }
+ mirrors := make([]*PushMirror, 0, 10)
+ count, err := sess.FindAndCount(&mirrors)
+ return mirrors, count, err
+}
+
+// GetPushMirrorsSyncedOnCommit returns push-mirrors for this repo that should be updated by new commits
+func GetPushMirrorsSyncedOnCommit(ctx context.Context, repoID int64) ([]*PushMirror, error) {
+ mirrors := make([]*PushMirror, 0, 10)
+ return mirrors, db.GetEngine(ctx).
+ Where("repo_id = ? AND sync_on_commit = ?", repoID, true).
+ Find(&mirrors)
+}
+
+// PushMirrorsIterate iterates all push-mirror repositories.
+func PushMirrorsIterate(ctx context.Context, limit int, f func(idx int, bean any) error) error {
+ sess := db.GetEngine(ctx).
+ Table("push_mirror").
+ Join("INNER", "`repository`", "`repository`.id = `push_mirror`.repo_id").
+ Where("`push_mirror`.last_update + (`push_mirror`.`interval` / ?) <= ?", time.Second, time.Now().Unix()).
+ And("`push_mirror`.`interval` != 0").
+ And("`repository`.is_archived = ?", false).
+ OrderBy("last_update ASC")
+ if limit > 0 {
+ sess = sess.Limit(limit)
+ }
+ return sess.Iterate(new(PushMirror), f)
+}
+
+// GetPushMirrorRemoteAddress returns the address of associated with a repository's given remote.
+func GetPushMirrorRemoteAddress(ownerName, repoName, remoteName string) (string, error) {
+ repoPath := filepath.Join(setting.RepoRootPath, strings.ToLower(ownerName), strings.ToLower(repoName)+".git")
+
+ remoteURL, err := git.GetRemoteAddress(context.Background(), repoPath, remoteName)
+ if err != nil {
+ return "", fmt.Errorf("get remote %s's address of %s/%s failed: %v", remoteName, ownerName, repoName, err)
+ }
+
+ u, err := giturl.Parse(remoteURL)
+ if err != nil {
+ return "", err
+ }
+ u.User = nil
+
+ return u.String(), nil
+}
diff --git a/models/repo/pushmirror_test.go b/models/repo/pushmirror_test.go
new file mode 100644
index 0000000..c3368cc
--- /dev/null
+++ b/models/repo/pushmirror_test.go
@@ -0,0 +1,79 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPushMirrorsIterate(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ now := timeutil.TimeStampNow()
+
+ db.Insert(db.DefaultContext, &repo_model.PushMirror{
+ RemoteName: "test-1",
+ LastUpdateUnix: now,
+ Interval: 1,
+ })
+
+ long, _ := time.ParseDuration("24h")
+ db.Insert(db.DefaultContext, &repo_model.PushMirror{
+ RemoteName: "test-2",
+ LastUpdateUnix: now,
+ Interval: long,
+ })
+
+ db.Insert(db.DefaultContext, &repo_model.PushMirror{
+ RemoteName: "test-3",
+ LastUpdateUnix: now,
+ Interval: 0,
+ })
+
+ time.Sleep(1 * time.Millisecond)
+
+ repo_model.PushMirrorsIterate(db.DefaultContext, 1, func(idx int, bean any) error {
+ m, ok := bean.(*repo_model.PushMirror)
+ assert.True(t, ok)
+ assert.Equal(t, "test-1", m.RemoteName)
+ assert.Equal(t, m.RemoteName, m.GetRemoteName())
+ return nil
+ })
+}
+
+func TestPushMirrorPrivatekey(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ m := &repo_model.PushMirror{
+ RemoteName: "test-privatekey",
+ }
+ require.NoError(t, db.Insert(db.DefaultContext, m))
+
+ privateKey := []byte{0x00, 0x01, 0x02, 0x04, 0x08, 0x10}
+ t.Run("Set privatekey", func(t *testing.T) {
+ require.NoError(t, m.SetPrivatekey(db.DefaultContext, privateKey))
+ })
+
+ t.Run("Normal retrieval", func(t *testing.T) {
+ actualPrivateKey, err := m.Privatekey()
+ require.NoError(t, err)
+ assert.EqualValues(t, privateKey, actualPrivateKey)
+ })
+
+ t.Run("Incorrect retrieval", func(t *testing.T) {
+ m.ID++
+ actualPrivateKey, err := m.Privatekey()
+ require.Error(t, err)
+ assert.Empty(t, actualPrivateKey)
+ })
+}
diff --git a/models/repo/redirect.go b/models/repo/redirect.go
new file mode 100644
index 0000000..61789eb
--- /dev/null
+++ b/models/repo/redirect.go
@@ -0,0 +1,86 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrRedirectNotExist represents a "RedirectNotExist" kind of error.
+type ErrRedirectNotExist struct {
+ OwnerID int64
+ RepoName string
+}
+
+// IsErrRedirectNotExist check if an error is an ErrRepoRedirectNotExist.
+func IsErrRedirectNotExist(err error) bool {
+ _, ok := err.(ErrRedirectNotExist)
+ return ok
+}
+
+func (err ErrRedirectNotExist) Error() string {
+ return fmt.Sprintf("repository redirect does not exist [uid: %d, name: %s]", err.OwnerID, err.RepoName)
+}
+
+func (err ErrRedirectNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Redirect represents that a repo name should be redirected to another
+type Redirect struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s)"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ RedirectRepoID int64 // repoID to redirect to
+}
+
+// TableName represents real table name in database
+func (Redirect) TableName() string {
+ return "repo_redirect"
+}
+
+func init() {
+ db.RegisterModel(new(Redirect))
+}
+
+// LookupRedirect look up if a repository has a redirect name
+func LookupRedirect(ctx context.Context, ownerID int64, repoName string) (int64, error) {
+ repoName = strings.ToLower(repoName)
+ redirect := &Redirect{OwnerID: ownerID, LowerName: repoName}
+ if has, err := db.GetEngine(ctx).Get(redirect); err != nil {
+ return 0, err
+ } else if !has {
+ return 0, ErrRedirectNotExist{OwnerID: ownerID, RepoName: repoName}
+ }
+ return redirect.RedirectRepoID, nil
+}
+
+// NewRedirect create a new repo redirect
+func NewRedirect(ctx context.Context, ownerID, repoID int64, oldRepoName, newRepoName string) error {
+ oldRepoName = strings.ToLower(oldRepoName)
+ newRepoName = strings.ToLower(newRepoName)
+
+ if err := DeleteRedirect(ctx, ownerID, newRepoName); err != nil {
+ return err
+ }
+
+ return db.Insert(ctx, &Redirect{
+ OwnerID: ownerID,
+ LowerName: oldRepoName,
+ RedirectRepoID: repoID,
+ })
+}
+
+// DeleteRedirect delete any redirect from the specified repo name to
+// anything else
+func DeleteRedirect(ctx context.Context, ownerID int64, repoName string) error {
+ repoName = strings.ToLower(repoName)
+ _, err := db.GetEngine(ctx).Delete(&Redirect{OwnerID: ownerID, LowerName: repoName})
+ return err
+}
diff --git a/models/repo/redirect_test.go b/models/repo/redirect_test.go
new file mode 100644
index 0000000..2016784
--- /dev/null
+++ b/models/repo/redirect_test.go
@@ -0,0 +1,78 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLookupRedirect(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repoID, err := repo_model.LookupRedirect(db.DefaultContext, 2, "oldrepo1")
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, repoID)
+
+ _, err = repo_model.LookupRedirect(db.DefaultContext, unittest.NonexistentID, "doesnotexist")
+ assert.True(t, repo_model.IsErrRedirectNotExist(err))
+}
+
+func TestNewRedirect(t *testing.T) {
+ // redirect to a completely new name
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ require.NoError(t, repo_model.NewRedirect(db.DefaultContext, repo.OwnerID, repo.ID, repo.Name, "newreponame"))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: repo.LowerName,
+ RedirectRepoID: repo.ID,
+ })
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: "oldrepo1",
+ RedirectRepoID: repo.ID,
+ })
+}
+
+func TestNewRedirect2(t *testing.T) {
+ // redirect to previously used name
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ require.NoError(t, repo_model.NewRedirect(db.DefaultContext, repo.OwnerID, repo.ID, repo.Name, "oldrepo1"))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: repo.LowerName,
+ RedirectRepoID: repo.ID,
+ })
+ unittest.AssertNotExistsBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: "oldrepo1",
+ RedirectRepoID: repo.ID,
+ })
+}
+
+func TestNewRedirect3(t *testing.T) {
+ // redirect for a previously-unredirected repo
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ require.NoError(t, repo_model.NewRedirect(db.DefaultContext, repo.OwnerID, repo.ID, repo.Name, "newreponame"))
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Redirect{
+ OwnerID: repo.OwnerID,
+ LowerName: repo.LowerName,
+ RedirectRepoID: repo.ID,
+ })
+}
diff --git a/models/repo/release.go b/models/repo/release.go
new file mode 100644
index 0000000..e2cd7d7
--- /dev/null
+++ b/models/repo/release.go
@@ -0,0 +1,566 @@
+// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrReleaseAlreadyExist represents a "ReleaseAlreadyExist" kind of error.
+type ErrReleaseAlreadyExist struct {
+ TagName string
+}
+
+// IsErrReleaseAlreadyExist checks if an error is a ErrReleaseAlreadyExist.
+func IsErrReleaseAlreadyExist(err error) bool {
+ _, ok := err.(ErrReleaseAlreadyExist)
+ return ok
+}
+
+func (err ErrReleaseAlreadyExist) Error() string {
+ return fmt.Sprintf("release tag already exist [tag_name: %s]", err.TagName)
+}
+
+func (err ErrReleaseAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrReleaseNotExist represents a "ReleaseNotExist" kind of error.
+type ErrReleaseNotExist struct {
+ ID int64
+ TagName string
+}
+
+// IsErrReleaseNotExist checks if an error is a ErrReleaseNotExist.
+func IsErrReleaseNotExist(err error) bool {
+ _, ok := err.(ErrReleaseNotExist)
+ return ok
+}
+
+func (err ErrReleaseNotExist) Error() string {
+ return fmt.Sprintf("release tag does not exist [id: %d, tag_name: %s]", err.ID, err.TagName)
+}
+
+func (err ErrReleaseNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Release represents a release of repository.
+type Release struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(n)"`
+ Repo *Repository `xorm:"-"`
+ PublisherID int64 `xorm:"INDEX"`
+ Publisher *user_model.User `xorm:"-"`
+ TagName string `xorm:"INDEX UNIQUE(n)"`
+ OriginalAuthor string
+ OriginalAuthorID int64 `xorm:"index"`
+ LowerTagName string
+ Target string
+ TargetBehind string `xorm:"-"` // to handle non-existing or empty target
+ Title string
+ Sha1 string `xorm:"VARCHAR(64)"`
+ HideArchiveLinks bool `xorm:"NOT NULL DEFAULT false"`
+ NumCommits int64
+ NumCommitsBehind int64 `xorm:"-"`
+ Note string `xorm:"TEXT"`
+ RenderedNote template.HTML `xorm:"-"`
+ IsDraft bool `xorm:"NOT NULL DEFAULT false"`
+ IsPrerelease bool `xorm:"NOT NULL DEFAULT false"`
+ IsTag bool `xorm:"NOT NULL DEFAULT false"` // will be true only if the record is a tag and has no related releases
+ Attachments []*Attachment `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX"`
+ ArchiveDownloadCount *structs.TagArchiveDownloadCount `xorm:"-"`
+}
+
+func init() {
+ db.RegisterModel(new(Release))
+}
+
+// LoadAttributes load repo and publisher attributes for a release
+func (r *Release) LoadAttributes(ctx context.Context) error {
+ var err error
+ if r.Repo == nil {
+ r.Repo, err = GetRepositoryByID(ctx, r.RepoID)
+ if err != nil {
+ return err
+ }
+ }
+ if r.Publisher == nil {
+ r.Publisher, err = user_model.GetUserByID(ctx, r.PublisherID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ r.Publisher = user_model.NewGhostUser()
+ } else {
+ return err
+ }
+ }
+ }
+
+ err = r.LoadArchiveDownloadCount(ctx)
+ if err != nil {
+ return err
+ }
+
+ return GetReleaseAttachments(ctx, r)
+}
+
+// LoadArchiveDownloadCount loads the download count for the source archives
+func (r *Release) LoadArchiveDownloadCount(ctx context.Context) error {
+ var err error
+ r.ArchiveDownloadCount, err = GetArchiveDownloadCount(ctx, r.RepoID, r.ID)
+ return err
+}
+
+// APIURL the api url for a release. release must have attributes loaded
+func (r *Release) APIURL() string {
+ return r.Repo.APIURL() + "/releases/" + strconv.FormatInt(r.ID, 10)
+}
+
+// ZipURL the zip url for a release. release must have attributes loaded
+func (r *Release) ZipURL() string {
+ return r.Repo.HTMLURL() + "/archive/" + util.PathEscapeSegments(r.TagName) + ".zip"
+}
+
+// TarURL the tar.gz url for a release. release must have attributes loaded
+func (r *Release) TarURL() string {
+ return r.Repo.HTMLURL() + "/archive/" + util.PathEscapeSegments(r.TagName) + ".tar.gz"
+}
+
+// HTMLURL the url for a release on the web UI. release must have attributes loaded
+func (r *Release) HTMLURL() string {
+ return r.Repo.HTMLURL() + "/releases/tag/" + util.PathEscapeSegments(r.TagName)
+}
+
+// APIUploadURL the api url to upload assets to a release. release must have attributes loaded
+func (r *Release) APIUploadURL() string {
+ return r.APIURL() + "/assets"
+}
+
+// Link the relative url for a release on the web UI. release must have attributes loaded
+func (r *Release) Link() string {
+ return r.Repo.Link() + "/releases/tag/" + util.PathEscapeSegments(r.TagName)
+}
+
+// IsReleaseExist returns true if release with given tag name already exists.
+func IsReleaseExist(ctx context.Context, repoID int64, tagName string) (bool, error) {
+ if len(tagName) == 0 {
+ return false, nil
+ }
+
+ return db.GetEngine(ctx).Exist(&Release{RepoID: repoID, LowerTagName: strings.ToLower(tagName)})
+}
+
+// UpdateRelease updates all columns of a release
+func UpdateRelease(ctx context.Context, rel *Release) error {
+ _, err := db.GetEngine(ctx).ID(rel.ID).AllCols().Update(rel)
+ return err
+}
+
+// AddReleaseAttachments adds a release attachments
+func AddReleaseAttachments(ctx context.Context, releaseID int64, attachmentUUIDs []string) (err error) {
+ // Check attachments
+ attachments, err := GetAttachmentsByUUIDs(ctx, attachmentUUIDs)
+ if err != nil {
+ return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %w", attachmentUUIDs, err)
+ }
+
+ for i := range attachments {
+ if attachments[i].ReleaseID != 0 {
+ return util.NewPermissionDeniedErrorf("release permission denied")
+ }
+ attachments[i].ReleaseID = releaseID
+ // No assign value could be 0, so ignore AllCols().
+ if _, err = db.GetEngine(ctx).ID(attachments[i].ID).Update(attachments[i]); err != nil {
+ return fmt.Errorf("update attachment [%d]: %w", attachments[i].ID, err)
+ }
+ }
+
+ return err
+}
+
+// GetRelease returns release by given ID.
+func GetRelease(ctx context.Context, repoID int64, tagName string) (*Release, error) {
+ rel := &Release{RepoID: repoID, LowerTagName: strings.ToLower(tagName)}
+ has, err := db.GetEngine(ctx).Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{0, tagName}
+ }
+ return rel, nil
+}
+
+// GetReleaseByID returns release with given ID.
+func GetReleaseByID(ctx context.Context, id int64) (*Release, error) {
+ rel := new(Release)
+ has, err := db.GetEngine(ctx).
+ ID(id).
+ Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{id, ""}
+ }
+
+ return rel, nil
+}
+
+// GetReleaseForRepoByID returns release with given ID.
+func GetReleaseForRepoByID(ctx context.Context, repoID, id int64) (*Release, error) {
+ rel := new(Release)
+ has, err := db.GetEngine(ctx).
+ Where("id=? AND repo_id=?", id, repoID).
+ Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{id, ""}
+ }
+
+ return rel, nil
+}
+
+// FindReleasesOptions describes the conditions to Find releases
+type FindReleasesOptions struct {
+ db.ListOptions
+ RepoID int64
+ IncludeDrafts bool
+ IncludeTags bool
+ IsPreRelease optional.Option[bool]
+ IsDraft optional.Option[bool]
+ TagNames []string
+ HasSha1 optional.Option[bool] // useful to find draft releases which are created with existing tags
+}
+
+func (opts FindReleasesOptions) ToConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{"repo_id": opts.RepoID}
+
+ if !opts.IncludeDrafts {
+ cond = cond.And(builder.Eq{"is_draft": false})
+ }
+ if !opts.IncludeTags {
+ cond = cond.And(builder.Eq{"is_tag": false})
+ }
+ if len(opts.TagNames) > 0 {
+ cond = cond.And(builder.In("tag_name", opts.TagNames))
+ }
+ if opts.IsPreRelease.Has() {
+ cond = cond.And(builder.Eq{"is_prerelease": opts.IsPreRelease.Value()})
+ }
+ if opts.IsDraft.Has() {
+ cond = cond.And(builder.Eq{"is_draft": opts.IsDraft.Value()})
+ }
+ if opts.HasSha1.Has() {
+ if opts.HasSha1.Value() {
+ cond = cond.And(builder.Neq{"sha1": ""})
+ } else {
+ cond = cond.And(builder.Eq{"sha1": ""})
+ }
+ }
+ return cond
+}
+
+func (opts FindReleasesOptions) ToOrders() string {
+ return "created_unix DESC, id DESC"
+}
+
+// GetTagNamesByRepoID returns a list of release tag names of repository.
+func GetTagNamesByRepoID(ctx context.Context, repoID int64) ([]string, error) {
+ listOptions := db.ListOptions{
+ ListAll: true,
+ }
+ opts := FindReleasesOptions{
+ ListOptions: listOptions,
+ IncludeDrafts: true,
+ IncludeTags: true,
+ HasSha1: optional.Some(true),
+ RepoID: repoID,
+ }
+
+ tags := make([]string, 0)
+ sess := db.GetEngine(ctx).
+ Table("release").
+ Desc("created_unix", "id").
+ Where(opts.ToConds()).
+ Cols("tag_name")
+
+ return tags, sess.Find(&tags)
+}
+
+// GetLatestReleaseByRepoID returns the latest release for a repository
+func GetLatestReleaseByRepoID(ctx context.Context, repoID int64) (*Release, error) {
+ cond := builder.NewCond().
+ And(builder.Eq{"repo_id": repoID}).
+ And(builder.Eq{"is_draft": false}).
+ And(builder.Eq{"is_prerelease": false}).
+ And(builder.Eq{"is_tag": false})
+
+ rel := new(Release)
+ has, err := db.GetEngine(ctx).
+ Desc("created_unix", "id").
+ Where(cond).
+ Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrReleaseNotExist{0, "latest"}
+ }
+
+ return rel, nil
+}
+
+type releaseMetaSearch struct {
+ ID []int64
+ Rel []*Release
+}
+
+func (s releaseMetaSearch) Len() int {
+ return len(s.ID)
+}
+
+func (s releaseMetaSearch) Swap(i, j int) {
+ s.ID[i], s.ID[j] = s.ID[j], s.ID[i]
+ s.Rel[i], s.Rel[j] = s.Rel[j], s.Rel[i]
+}
+
+func (s releaseMetaSearch) Less(i, j int) bool {
+ return s.ID[i] < s.ID[j]
+}
+
+func hasDuplicateName(attaches []*Attachment) bool {
+ attachSet := container.Set[string]{}
+ for _, attachment := range attaches {
+ if attachSet.Contains(attachment.Name) {
+ return true
+ }
+ attachSet.Add(attachment.Name)
+ }
+ return false
+}
+
+// GetReleaseAttachments retrieves the attachments for releases
+func GetReleaseAttachments(ctx context.Context, rels ...*Release) (err error) {
+ if len(rels) == 0 {
+ return nil
+ }
+
+ // To keep this efficient as possible sort all releases by id,
+ // select attachments by release id,
+ // then merge join them
+
+ // Sort
+ sortedRels := releaseMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Release, len(rels))}
+ var attachments []*Attachment
+ for index, element := range rels {
+ element.Attachments = []*Attachment{}
+ sortedRels.ID[index] = element.ID
+ sortedRels.Rel[index] = element
+ }
+ sort.Sort(sortedRels)
+
+ // Select attachments
+ err = db.GetEngine(ctx).
+ Asc("release_id", "name").
+ In("release_id", sortedRels.ID).
+ Find(&attachments)
+ if err != nil {
+ return err
+ }
+
+ // merge join
+ currentIndex := 0
+ for _, attachment := range attachments {
+ for sortedRels.ID[currentIndex] < attachment.ReleaseID {
+ currentIndex++
+ }
+ sortedRels.Rel[currentIndex].Attachments = append(sortedRels.Rel[currentIndex].Attachments, attachment)
+ }
+
+ // Makes URL's predictable
+ for _, release := range rels {
+ // If we have no Repo, we don't need to execute this loop
+ if release.Repo == nil {
+ continue
+ }
+
+ // If the names unique, use the URL with the Name instead of the UUID
+ if !hasDuplicateName(release.Attachments) {
+ for _, attachment := range release.Attachments {
+ attachment.CustomDownloadURL = release.Repo.HTMLURL() + "/releases/download/" + url.PathEscape(release.TagName) + "/" + url.PathEscape(attachment.Name)
+ }
+ }
+ }
+
+ return err
+}
+
+// UpdateReleasesMigrationsByType updates all migrated repositories' releases from gitServiceType to replace originalAuthorID to posterID
+func UpdateReleasesMigrationsByType(ctx context.Context, gitServiceType structs.GitServiceType, originalAuthorID string, posterID int64) error {
+ _, err := db.GetEngine(ctx).Table("release").
+ Where("repo_id IN (SELECT id FROM repository WHERE original_service_type = ?)", gitServiceType).
+ And("original_author_id = ?", originalAuthorID).
+ Update(map[string]any{
+ "publisher_id": posterID,
+ "original_author": "",
+ "original_author_id": 0,
+ })
+ return err
+}
+
+// PushUpdateDeleteTagsContext updates a number of delete tags with context
+func PushUpdateDeleteTagsContext(ctx context.Context, repo *Repository, tags []string) error {
+ if len(tags) == 0 {
+ return nil
+ }
+ lowerTags := make([]string, 0, len(tags))
+ for _, tag := range tags {
+ lowerTags = append(lowerTags, strings.ToLower(tag))
+ }
+
+ for _, tag := range tags {
+ release, err := GetRelease(ctx, repo.ID, tag)
+ if err != nil {
+ return fmt.Errorf("GetRelease: %w", err)
+ }
+
+ err = DeleteArchiveDownloadCountForRelease(ctx, release.ID)
+ if err != nil {
+ return fmt.Errorf("DeleteTagArchiveDownloadCount: %w", err)
+ }
+ }
+
+ if _, err := db.GetEngine(ctx).
+ Where("repo_id = ? AND is_tag = ?", repo.ID, true).
+ In("lower_tag_name", lowerTags).
+ Delete(new(Release)); err != nil {
+ return fmt.Errorf("Delete: %w", err)
+ }
+
+ if _, err := db.GetEngine(ctx).
+ Where("repo_id = ? AND is_tag = ?", repo.ID, false).
+ In("lower_tag_name", lowerTags).
+ Cols("is_draft", "num_commits", "sha1").
+ Update(&Release{
+ IsDraft: true,
+ }); err != nil {
+ return fmt.Errorf("Update: %w", err)
+ }
+
+ return nil
+}
+
+// PushUpdateDeleteTag must be called for any push actions to delete tag
+func PushUpdateDeleteTag(ctx context.Context, repo *Repository, tagName string) error {
+ rel, err := GetRelease(ctx, repo.ID, tagName)
+ if err != nil {
+ if IsErrReleaseNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("GetRelease: %w", err)
+ }
+ if rel.IsTag {
+ if _, err = db.DeleteByID[Release](ctx, rel.ID); err != nil {
+ return fmt.Errorf("Delete: %w", err)
+ }
+ } else {
+ rel.IsDraft = true
+ rel.NumCommits = 0
+ rel.Sha1 = ""
+ if _, err = db.GetEngine(ctx).ID(rel.ID).AllCols().Update(rel); err != nil {
+ return fmt.Errorf("Update: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// SaveOrUpdateTag must be called for any push actions to add tag
+func SaveOrUpdateTag(ctx context.Context, repo *Repository, newRel *Release) error {
+ rel, err := GetRelease(ctx, repo.ID, newRel.TagName)
+ if err != nil && !IsErrReleaseNotExist(err) {
+ return fmt.Errorf("GetRelease: %w", err)
+ }
+
+ if rel == nil {
+ rel = newRel
+ if _, err = db.GetEngine(ctx).Insert(rel); err != nil {
+ return fmt.Errorf("InsertOne: %w", err)
+ }
+ } else {
+ rel.Sha1 = newRel.Sha1
+ rel.CreatedUnix = newRel.CreatedUnix
+ rel.NumCommits = newRel.NumCommits
+ rel.IsDraft = false
+ if rel.IsTag && newRel.PublisherID > 0 {
+ rel.PublisherID = newRel.PublisherID
+ }
+ if _, err = db.GetEngine(ctx).ID(rel.ID).AllCols().Update(rel); err != nil {
+ return fmt.Errorf("Update: %w", err)
+ }
+ }
+ return nil
+}
+
+// RemapExternalUser ExternalUserRemappable interface
+func (r *Release) RemapExternalUser(externalName string, externalID, userID int64) error {
+ r.OriginalAuthor = externalName
+ r.OriginalAuthorID = externalID
+ r.PublisherID = userID
+ return nil
+}
+
+// UserID ExternalUserRemappable interface
+func (r *Release) GetUserID() int64 { return r.PublisherID }
+
+// ExternalName ExternalUserRemappable interface
+func (r *Release) GetExternalName() string { return r.OriginalAuthor }
+
+// ExternalID ExternalUserRemappable interface
+func (r *Release) GetExternalID() int64 { return r.OriginalAuthorID }
+
+// InsertReleases migrates release
+func InsertReleases(ctx context.Context, rels ...*Release) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ for _, rel := range rels {
+ if _, err := sess.NoAutoTime().Insert(rel); err != nil {
+ return err
+ }
+
+ if len(rel.Attachments) > 0 {
+ for i := range rel.Attachments {
+ rel.Attachments[i].ReleaseID = rel.ID
+ }
+
+ if _, err := sess.NoAutoTime().Insert(rel.Attachments); err != nil {
+ return err
+ }
+ }
+ }
+
+ return committer.Commit()
+}
diff --git a/models/repo/release_test.go b/models/repo/release_test.go
new file mode 100644
index 0000000..4e61a28
--- /dev/null
+++ b/models/repo/release_test.go
@@ -0,0 +1,27 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestMigrate_InsertReleases(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ a := &Attachment{
+ UUID: "a0eebc91-9c0c-4ef7-bb6e-6bb9bd380a12",
+ }
+ r := &Release{
+ Attachments: []*Attachment{a},
+ }
+
+ err := InsertReleases(db.DefaultContext, r)
+ require.NoError(t, err)
+}
diff --git a/models/repo/repo.go b/models/repo/repo.go
new file mode 100644
index 0000000..cd6be48
--- /dev/null
+++ b/models/repo/repo.go
@@ -0,0 +1,951 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "net"
+ "net/url"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/translation"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrUserDoesNotHaveAccessToRepo represents an error where the user doesn't has access to a given repo.
+type ErrUserDoesNotHaveAccessToRepo struct {
+ UserID int64
+ RepoName string
+}
+
+// IsErrUserDoesNotHaveAccessToRepo checks if an error is a ErrRepoFileAlreadyExists.
+func IsErrUserDoesNotHaveAccessToRepo(err error) bool {
+ _, ok := err.(ErrUserDoesNotHaveAccessToRepo)
+ return ok
+}
+
+func (err ErrUserDoesNotHaveAccessToRepo) Error() string {
+ return fmt.Sprintf("user doesn't have access to repo [user_id: %d, repo_name: %s]", err.UserID, err.RepoName)
+}
+
+func (err ErrUserDoesNotHaveAccessToRepo) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+type ErrRepoIsArchived struct {
+ Repo *Repository
+}
+
+func (err ErrRepoIsArchived) Error() string {
+ return fmt.Sprintf("%s is archived", err.Repo.LogString())
+}
+
+var (
+ reservedRepoNames = []string{".", "..", "-"}
+ reservedRepoPatterns = []string{"*.git", "*.wiki", "*.rss", "*.atom"}
+)
+
+// IsUsableRepoName returns true when repository is usable
+func IsUsableRepoName(name string) error {
+ if db.AlphaDashDotPattern.MatchString(name) {
+ // Note: usually this error is normally caught up earlier in the UI
+ return db.ErrNameCharsNotAllowed{Name: name}
+ }
+ return db.IsUsableName(reservedRepoNames, reservedRepoPatterns, name)
+}
+
+// TrustModelType defines the types of trust model for this repository
+type TrustModelType int
+
+// kinds of TrustModel
+const (
+ DefaultTrustModel TrustModelType = iota // default trust model
+ CommitterTrustModel
+ CollaboratorTrustModel
+ CollaboratorCommitterTrustModel
+)
+
+// String converts a TrustModelType to a string
+func (t TrustModelType) String() string {
+ switch t {
+ case DefaultTrustModel:
+ return "default"
+ case CommitterTrustModel:
+ return "committer"
+ case CollaboratorTrustModel:
+ return "collaborator"
+ case CollaboratorCommitterTrustModel:
+ return "collaboratorcommitter"
+ }
+ return "default"
+}
+
+// ToTrustModel converts a string to a TrustModelType
+func ToTrustModel(model string) TrustModelType {
+ switch strings.ToLower(strings.TrimSpace(model)) {
+ case "default":
+ return DefaultTrustModel
+ case "collaborator":
+ return CollaboratorTrustModel
+ case "committer":
+ return CommitterTrustModel
+ case "collaboratorcommitter":
+ return CollaboratorCommitterTrustModel
+ }
+ return DefaultTrustModel
+}
+
+// RepositoryStatus defines the status of repository
+type RepositoryStatus int
+
+// all kinds of RepositoryStatus
+const (
+ RepositoryReady RepositoryStatus = iota // a normal repository
+ RepositoryBeingMigrated // repository is migrating
+ RepositoryPendingTransfer // repository pending in ownership transfer state
+ RepositoryBroken // repository is in a permanently broken state
+)
+
+// Repository represents a git repository.
+type Repository struct {
+ ID int64 `xorm:"pk autoincr"`
+ OwnerID int64 `xorm:"UNIQUE(s) index"`
+ OwnerName string
+ Owner *user_model.User `xorm:"-"`
+ LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Name string `xorm:"INDEX NOT NULL"`
+ Description string `xorm:"TEXT"`
+ Website string `xorm:"VARCHAR(2048)"`
+ OriginalServiceType api.GitServiceType `xorm:"index"`
+ OriginalURL string `xorm:"VARCHAR(2048)"`
+ DefaultBranch string
+ WikiBranch string
+
+ NumWatches int
+ NumStars int
+ NumForks int
+ NumIssues int
+ NumClosedIssues int
+ NumOpenIssues int `xorm:"-"`
+ NumPulls int
+ NumClosedPulls int
+ NumOpenPulls int `xorm:"-"`
+ NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
+ NumOpenMilestones int `xorm:"-"`
+ NumProjects int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedProjects int `xorm:"NOT NULL DEFAULT 0"`
+ NumOpenProjects int `xorm:"-"`
+ NumActionRuns int `xorm:"NOT NULL DEFAULT 0"`
+ NumClosedActionRuns int `xorm:"NOT NULL DEFAULT 0"`
+ NumOpenActionRuns int `xorm:"-"`
+
+ IsPrivate bool `xorm:"INDEX"`
+ IsEmpty bool `xorm:"INDEX"`
+ IsArchived bool `xorm:"INDEX"`
+ IsMirror bool `xorm:"INDEX"`
+
+ Status RepositoryStatus `xorm:"NOT NULL DEFAULT 0"`
+
+ RenderingMetas map[string]string `xorm:"-"`
+ DocumentRenderingMetas map[string]string `xorm:"-"`
+ Units []*RepoUnit `xorm:"-"`
+ PrimaryLanguage *LanguageStat `xorm:"-"`
+
+ IsFork bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ ForkID int64 `xorm:"INDEX"`
+ BaseRepo *Repository `xorm:"-"`
+ IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
+ TemplateID int64 `xorm:"INDEX"`
+ Size int64 `xorm:"NOT NULL DEFAULT 0"`
+ GitSize int64 `xorm:"NOT NULL DEFAULT 0"`
+ LFSSize int64 `xorm:"NOT NULL DEFAULT 0"`
+ CodeIndexerStatus *RepoIndexerStatus `xorm:"-"`
+ StatsIndexerStatus *RepoIndexerStatus `xorm:"-"`
+ IsFsckEnabled bool `xorm:"NOT NULL DEFAULT true"`
+ CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"`
+ Topics []string `xorm:"TEXT JSON"`
+ ObjectFormatName string `xorm:"VARCHAR(6) NOT NULL DEFAULT 'sha1'"`
+
+ TrustModel TrustModelType
+
+ // Avatar: ID(10-20)-md5(32) - must fit into 64 symbols
+ Avatar string `xorm:"VARCHAR(64)"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+ ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(Repository))
+}
+
+func (repo *Repository) GetName() string {
+ return repo.Name
+}
+
+func (repo *Repository) GetOwnerName() string {
+ return repo.OwnerName
+}
+
+func (repo *Repository) GetWikiBranchName() string {
+ if repo.WikiBranch == "" {
+ return setting.Repository.DefaultBranch
+ }
+ return repo.WikiBranch
+}
+
+// SanitizedOriginalURL returns a sanitized OriginalURL
+func (repo *Repository) SanitizedOriginalURL() string {
+ if repo.OriginalURL == "" {
+ return ""
+ }
+ u, _ := util.SanitizeURL(repo.OriginalURL)
+ return u
+}
+
+// text representations to be returned in SizeDetail.Name
+const (
+ SizeDetailNameGit = "git"
+ SizeDetailNameLFS = "lfs"
+)
+
+type SizeDetail struct {
+ Name string
+ Size int64
+}
+
+// SizeDetails forms a struct with various size details about repository
+// Note: SizeDetailsString below expects it to have 2 entries
+func (repo *Repository) SizeDetails() []SizeDetail {
+ sizeDetails := []SizeDetail{
+ {
+ Name: SizeDetailNameGit,
+ Size: repo.GitSize,
+ },
+ {
+ Name: SizeDetailNameLFS,
+ Size: repo.LFSSize,
+ },
+ }
+ return sizeDetails
+}
+
+// SizeDetailsString returns a concatenation of all repository size details as a string
+func (repo *Repository) SizeDetailsString(locale translation.Locale) string {
+ sizeDetails := repo.SizeDetails()
+ return locale.TrString("repo.size_format", sizeDetails[0].Name, locale.TrSize(sizeDetails[0].Size), sizeDetails[1].Name, locale.TrSize(sizeDetails[1].Size))
+}
+
+func (repo *Repository) LogString() string {
+ if repo == nil {
+ return "<Repository nil>"
+ }
+ return fmt.Sprintf("<Repository %d:%s/%s>", repo.ID, repo.OwnerName, repo.Name)
+}
+
+// IsBeingMigrated indicates that repository is being migrated
+func (repo *Repository) IsBeingMigrated() bool {
+ return repo.Status == RepositoryBeingMigrated
+}
+
+// IsBeingCreated indicates that repository is being migrated or forked
+func (repo *Repository) IsBeingCreated() bool {
+ return repo.IsBeingMigrated()
+}
+
+// IsBroken indicates that repository is broken
+func (repo *Repository) IsBroken() bool {
+ return repo.Status == RepositoryBroken
+}
+
+// MarkAsBrokenEmpty marks the repo as broken and empty
+func (repo *Repository) MarkAsBrokenEmpty() {
+ repo.Status = RepositoryBroken
+ repo.IsEmpty = true
+}
+
+// AfterLoad is invoked from XORM after setting the values of all fields of this object.
+func (repo *Repository) AfterLoad() {
+ repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
+ repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
+ repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
+ repo.NumOpenProjects = repo.NumProjects - repo.NumClosedProjects
+ repo.NumOpenActionRuns = repo.NumActionRuns - repo.NumClosedActionRuns
+}
+
+// LoadAttributes loads attributes of the repository.
+func (repo *Repository) LoadAttributes(ctx context.Context) error {
+ // Load owner
+ if err := repo.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("load owner: %w", err)
+ }
+
+ // Load primary language
+ stats := make(LanguageStatList, 0, 1)
+ if err := db.GetEngine(ctx).
+ Where("`repo_id` = ? AND `is_primary` = ? AND `language` != ?", repo.ID, true, "other").
+ Find(&stats); err != nil {
+ return fmt.Errorf("find primary languages: %w", err)
+ }
+ stats.LoadAttributes()
+ for _, st := range stats {
+ if st.RepoID == repo.ID {
+ repo.PrimaryLanguage = st
+ break
+ }
+ }
+ return nil
+}
+
+// FullName returns the repository full name
+func (repo *Repository) FullName() string {
+ return repo.OwnerName + "/" + repo.Name
+}
+
+// HTMLURL returns the repository HTML URL
+func (repo *Repository) HTMLURL() string {
+ return setting.AppURL + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name)
+}
+
+// CommitLink make link to by commit full ID
+// note: won't check whether it's an right id
+func (repo *Repository) CommitLink(commitID string) (result string) {
+ if git.IsEmptyCommitID(commitID, nil) {
+ result = ""
+ } else {
+ result = repo.Link() + "/commit/" + url.PathEscape(commitID)
+ }
+ return result
+}
+
+// APIURL returns the repository API URL
+func (repo *Repository) APIURL() string {
+ return setting.AppURL + "api/v1/repos/" + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name)
+}
+
+// APActorID returns the activitypub repository API URL
+func (repo *Repository) APActorID() string {
+ return fmt.Sprintf("%vapi/v1/activitypub/repository-id/%v", setting.AppURL, url.PathEscape(fmt.Sprint(repo.ID)))
+}
+
+// GetCommitsCountCacheKey returns cache key used for commits count caching.
+func (repo *Repository) GetCommitsCountCacheKey(contextName string, isRef bool) string {
+ var prefix string
+ if isRef {
+ prefix = "ref"
+ } else {
+ prefix = "commit"
+ }
+ return fmt.Sprintf("commits-count-%d-%s-%s", repo.ID, prefix, contextName)
+}
+
+// LoadUnits loads repo units into repo.Units
+func (repo *Repository) LoadUnits(ctx context.Context) (err error) {
+ if repo.Units != nil {
+ return nil
+ }
+
+ repo.Units, err = getUnitsByRepoID(ctx, repo.ID)
+ if log.IsTrace() {
+ unitTypeStrings := make([]string, len(repo.Units))
+ for i, unit := range repo.Units {
+ unitTypeStrings[i] = unit.Type.String()
+ }
+ log.Trace("repo.Units, ID=%d, Types: [%s]", repo.ID, strings.Join(unitTypeStrings, ", "))
+ }
+
+ return err
+}
+
+// UnitEnabled if this repository has the given unit enabled
+func (repo *Repository) UnitEnabled(ctx context.Context, tp unit.Type) bool {
+ if err := repo.LoadUnits(ctx); err != nil {
+ log.Warn("Error loading repository (ID: %d) units: %s", repo.ID, err.Error())
+ }
+ for _, unit := range repo.Units {
+ if unit.Type == tp {
+ return true
+ }
+ }
+ return false
+}
+
+// MustGetUnit always returns a RepoUnit object
+func (repo *Repository) MustGetUnit(ctx context.Context, tp unit.Type) *RepoUnit {
+ ru, err := repo.GetUnit(ctx, tp)
+ if err == nil {
+ return ru
+ }
+
+ if tp == unit.TypeExternalWiki {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(ExternalWikiConfig),
+ }
+ } else if tp == unit.TypeExternalTracker {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(ExternalTrackerConfig),
+ }
+ } else if tp == unit.TypePullRequests {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(PullRequestsConfig),
+ }
+ } else if tp == unit.TypeIssues {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(IssuesConfig),
+ }
+ } else if tp == unit.TypeActions {
+ return &RepoUnit{
+ Type: tp,
+ Config: new(ActionsConfig),
+ }
+ }
+
+ return &RepoUnit{
+ Type: tp,
+ Config: new(UnitConfig),
+ }
+}
+
+// GetUnit returns a RepoUnit object
+func (repo *Repository) GetUnit(ctx context.Context, tp unit.Type) (*RepoUnit, error) {
+ if err := repo.LoadUnits(ctx); err != nil {
+ return nil, err
+ }
+ for _, unit := range repo.Units {
+ if unit.Type == tp {
+ return unit, nil
+ }
+ }
+ return nil, ErrUnitTypeNotExist{tp}
+}
+
+// AllUnitsEnabled returns true if all units are enabled for the repo.
+func (repo *Repository) AllUnitsEnabled(ctx context.Context) bool {
+ hasAnyUnitEnabled := func(unitGroup []unit.Type) bool {
+ // Loop over the group of units
+ for _, unit := range unitGroup {
+ // If *any* of them is enabled, return true.
+ if repo.UnitEnabled(ctx, unit) {
+ return true
+ }
+ }
+
+ // If none are enabled, return false.
+ return false
+ }
+
+ for _, unitGroup := range unit.AllowedRepoUnitGroups {
+ // If any disabled unit is found, return false immediately.
+ if !hasAnyUnitEnabled(unitGroup) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// LoadOwner loads owner user
+func (repo *Repository) LoadOwner(ctx context.Context) (err error) {
+ if repo.Owner != nil {
+ return nil
+ }
+
+ repo.Owner, err = user_model.GetUserByID(ctx, repo.OwnerID)
+ return err
+}
+
+// MustOwner always returns a valid *user_model.User object to avoid
+// conceptually impossible error handling.
+// It creates a fake object that contains error details
+// when error occurs.
+func (repo *Repository) MustOwner(ctx context.Context) *user_model.User {
+ if err := repo.LoadOwner(ctx); err != nil {
+ return &user_model.User{
+ Name: "error",
+ FullName: err.Error(),
+ }
+ }
+
+ return repo.Owner
+}
+
+// ComposeMetas composes a map of metas for properly rendering issue links and external issue trackers.
+func (repo *Repository) ComposeMetas(ctx context.Context) map[string]string {
+ if len(repo.RenderingMetas) == 0 {
+ metas := map[string]string{
+ "user": repo.OwnerName,
+ "repo": repo.Name,
+ "repoPath": repo.RepoPath(),
+ "mode": "comment",
+ }
+
+ unit, err := repo.GetUnit(ctx, unit.TypeExternalTracker)
+ if err == nil {
+ metas["format"] = unit.ExternalTrackerConfig().ExternalTrackerFormat
+ switch unit.ExternalTrackerConfig().ExternalTrackerStyle {
+ case markup.IssueNameStyleAlphanumeric:
+ metas["style"] = markup.IssueNameStyleAlphanumeric
+ case markup.IssueNameStyleRegexp:
+ metas["style"] = markup.IssueNameStyleRegexp
+ metas["regexp"] = unit.ExternalTrackerConfig().ExternalTrackerRegexpPattern
+ default:
+ metas["style"] = markup.IssueNameStyleNumeric
+ }
+ }
+
+ repo.MustOwner(ctx)
+ if repo.Owner.IsOrganization() {
+ teams := make([]string, 0, 5)
+ _ = db.GetEngine(ctx).Table("team_repo").
+ Join("INNER", "team", "team.id = team_repo.team_id").
+ Where("team_repo.repo_id = ?", repo.ID).
+ Select("team.lower_name").
+ OrderBy("team.lower_name").
+ Find(&teams)
+ metas["teams"] = "," + strings.Join(teams, ",") + ","
+ metas["org"] = strings.ToLower(repo.OwnerName)
+ }
+
+ repo.RenderingMetas = metas
+ }
+ return repo.RenderingMetas
+}
+
+// ComposeDocumentMetas composes a map of metas for properly rendering documents
+func (repo *Repository) ComposeDocumentMetas(ctx context.Context) map[string]string {
+ if len(repo.DocumentRenderingMetas) == 0 {
+ metas := map[string]string{}
+ for k, v := range repo.ComposeMetas(ctx) {
+ metas[k] = v
+ }
+ metas["mode"] = "document"
+ repo.DocumentRenderingMetas = metas
+ }
+ return repo.DocumentRenderingMetas
+}
+
+// GetBaseRepo populates repo.BaseRepo for a fork repository and
+// returns an error on failure (NOTE: no error is returned for
+// non-fork repositories, and BaseRepo will be left untouched)
+func (repo *Repository) GetBaseRepo(ctx context.Context) (err error) {
+ if !repo.IsFork {
+ return nil
+ }
+
+ if repo.BaseRepo != nil {
+ return nil
+ }
+ repo.BaseRepo, err = GetRepositoryByID(ctx, repo.ForkID)
+ return err
+}
+
+// IsGenerated returns whether _this_ repository was generated from a template
+func (repo *Repository) IsGenerated() bool {
+ return repo.TemplateID != 0
+}
+
+// RepoPath returns repository path by given user and repository name.
+func RepoPath(userName, repoName string) string { //revive:disable-line:exported
+ return filepath.Join(user_model.UserPath(userName), strings.ToLower(repoName)+".git")
+}
+
+// RepoPath returns the repository path
+func (repo *Repository) RepoPath() string {
+ return RepoPath(repo.OwnerName, repo.Name)
+}
+
+// Link returns the repository relative url
+func (repo *Repository) Link() string {
+ return setting.AppSubURL + "/" + url.PathEscape(repo.OwnerName) + "/" + url.PathEscape(repo.Name)
+}
+
+// ComposeCompareURL returns the repository comparison URL
+func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string {
+ return fmt.Sprintf("%s/%s/compare/%s...%s", url.PathEscape(repo.OwnerName), url.PathEscape(repo.Name), util.PathEscapeSegments(oldCommitID), util.PathEscapeSegments(newCommitID))
+}
+
+func (repo *Repository) ComposeBranchCompareURL(baseRepo *Repository, branchName string) string {
+ if baseRepo == nil {
+ baseRepo = repo
+ }
+ var cmpBranchEscaped string
+ if repo.ID != baseRepo.ID {
+ cmpBranchEscaped = fmt.Sprintf("%s/%s:", url.PathEscape(repo.OwnerName), url.PathEscape(repo.Name))
+ }
+ cmpBranchEscaped = fmt.Sprintf("%s%s", cmpBranchEscaped, util.PathEscapeSegments(branchName))
+ return fmt.Sprintf("%s/compare/%s...%s", baseRepo.Link(), util.PathEscapeSegments(baseRepo.DefaultBranch), cmpBranchEscaped)
+}
+
+// IsOwnedBy returns true when user owns this repository
+func (repo *Repository) IsOwnedBy(userID int64) bool {
+ return repo.OwnerID == userID
+}
+
+// CanCreateBranch returns true if repository meets the requirements for creating new branches.
+func (repo *Repository) CanCreateBranch() bool {
+ return !repo.IsMirror
+}
+
+// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
+func (repo *Repository) CanEnablePulls() bool {
+ return !repo.IsMirror && !repo.IsEmpty
+}
+
+// AllowsPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
+func (repo *Repository) AllowsPulls(ctx context.Context) bool {
+ return repo.CanEnablePulls() && repo.UnitEnabled(ctx, unit.TypePullRequests)
+}
+
+// CanEnableEditor returns true if repository meets the requirements of web editor.
+func (repo *Repository) CanEnableEditor() bool {
+ return !repo.IsMirror
+}
+
+// DescriptionHTML does special handles to description and return HTML string.
+func (repo *Repository) DescriptionHTML(ctx context.Context) template.HTML {
+ desc, err := markup.RenderDescriptionHTML(&markup.RenderContext{
+ Ctx: ctx,
+ // Don't use Metas to speedup requests
+ }, repo.Description)
+ if err != nil {
+ log.Error("Failed to render description for %s (ID: %d): %v", repo.Name, repo.ID, err)
+ return template.HTML(markup.SanitizeDescription(repo.Description))
+ }
+ return template.HTML(markup.SanitizeDescription(desc))
+}
+
+// CloneLink represents different types of clone URLs of repository.
+type CloneLink struct {
+ SSH string
+ HTTPS string
+}
+
+// ComposeHTTPSCloneURL returns HTTPS clone URL based on given owner and repository name.
+func ComposeHTTPSCloneURL(owner, repo string) string {
+ return fmt.Sprintf("%s%s/%s.git", setting.AppURL, url.PathEscape(owner), url.PathEscape(repo))
+}
+
+func ComposeSSHCloneURL(ownerName, repoName string) string {
+ sshUser := setting.SSH.User
+ sshDomain := setting.SSH.Domain
+
+ // non-standard port, it must use full URI
+ if setting.SSH.Port != 22 {
+ sshHost := net.JoinHostPort(sshDomain, strconv.Itoa(setting.SSH.Port))
+ return fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser, sshHost, url.PathEscape(ownerName), url.PathEscape(repoName))
+ }
+
+ // for standard port, it can use a shorter URI (without the port)
+ sshHost := sshDomain
+ if ip := net.ParseIP(sshHost); ip != nil && ip.To4() == nil {
+ sshHost = "[" + sshHost + "]" // for IPv6 address, wrap it with brackets
+ }
+ if setting.Repository.UseCompatSSHURI {
+ return fmt.Sprintf("ssh://%s@%s/%s/%s.git", sshUser, sshHost, url.PathEscape(ownerName), url.PathEscape(repoName))
+ }
+ return fmt.Sprintf("%s@%s:%s/%s.git", sshUser, sshHost, url.PathEscape(ownerName), url.PathEscape(repoName))
+}
+
+func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
+ repoName := repo.Name
+ if isWiki {
+ repoName += ".wiki"
+ }
+
+ cl := new(CloneLink)
+ cl.SSH = ComposeSSHCloneURL(repo.OwnerName, repoName)
+ cl.HTTPS = ComposeHTTPSCloneURL(repo.OwnerName, repoName)
+ return cl
+}
+
+// CloneLink returns clone URLs of repository.
+func (repo *Repository) CloneLink() (cl *CloneLink) {
+ return repo.cloneLink(false)
+}
+
+// GetOriginalURLHostname returns the hostname of a URL or the URL
+func (repo *Repository) GetOriginalURLHostname() string {
+ u, err := url.Parse(repo.OriginalURL)
+ if err != nil {
+ return repo.OriginalURL
+ }
+
+ return u.Host
+}
+
+// GetTrustModel will get the TrustModel for the repo or the default trust model
+func (repo *Repository) GetTrustModel() TrustModelType {
+ trustModel := repo.TrustModel
+ if trustModel == DefaultTrustModel {
+ trustModel = ToTrustModel(setting.Repository.Signing.DefaultTrustModel)
+ if trustModel == DefaultTrustModel {
+ return CollaboratorTrustModel
+ }
+ }
+ return trustModel
+}
+
+// MustNotBeArchived returns ErrRepoIsArchived if the repo is archived
+func (repo *Repository) MustNotBeArchived() error {
+ if repo.IsArchived {
+ return ErrRepoIsArchived{Repo: repo}
+ }
+ return nil
+}
+
+// __________ .__ __
+// \______ \ ____ ______ ____ _____|__|/ |_ ___________ ___.__.
+// | _// __ \\____ \ / _ \/ ___/ \ __\/ _ \_ __ < | |
+// | | \ ___/| |_> > <_> )___ \| || | ( <_> ) | \/\___ |
+// |____|_ /\___ > __/ \____/____ >__||__| \____/|__| / ____|
+// \/ \/|__| \/ \/
+
+// ErrRepoNotExist represents a "RepoNotExist" kind of error.
+type ErrRepoNotExist struct {
+ ID int64
+ UID int64
+ OwnerName string
+ Name string
+}
+
+// IsErrRepoNotExist checks if an error is a ErrRepoNotExist.
+func IsErrRepoNotExist(err error) bool {
+ _, ok := err.(ErrRepoNotExist)
+ return ok
+}
+
+func (err ErrRepoNotExist) Error() string {
+ return fmt.Sprintf("repository does not exist [id: %d, uid: %d, owner_name: %s, name: %s]",
+ err.ID, err.UID, err.OwnerName, err.Name)
+}
+
+// Unwrap unwraps this error as a ErrNotExist error
+func (err ErrRepoNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// GetRepositoryByOwnerAndName returns the repository by given owner name and repo name
+func GetRepositoryByOwnerAndName(ctx context.Context, ownerName, repoName string) (*Repository, error) {
+ var repo Repository
+ has, err := db.GetEngine(ctx).Table("repository").Select("repository.*").
+ Join("INNER", "`user`", "`user`.id = repository.owner_id").
+ Where("repository.lower_name = ?", strings.ToLower(repoName)).
+ And("`user`.lower_name = ?", strings.ToLower(ownerName)).
+ Get(&repo)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrRepoNotExist{0, 0, ownerName, repoName}
+ }
+ return &repo, nil
+}
+
+// GetRepositoryByName returns the repository by given name under user if exists.
+func GetRepositoryByName(ctx context.Context, ownerID int64, name string) (*Repository, error) {
+ var repo Repository
+ has, err := db.GetEngine(ctx).
+ Where("`owner_id`=?", ownerID).
+ And("`lower_name`=?", strings.ToLower(name)).
+ NoAutoCondition().
+ Get(&repo)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrRepoNotExist{0, ownerID, "", name}
+ }
+ return &repo, err
+}
+
+// getRepositoryURLPathSegments returns segments (owner, reponame) extracted from a url
+func getRepositoryURLPathSegments(repoURL string) []string {
+ if strings.HasPrefix(repoURL, setting.AppURL) {
+ return strings.Split(strings.TrimPrefix(repoURL, setting.AppURL), "/")
+ }
+
+ sshURLVariants := [4]string{
+ setting.SSH.Domain + ":",
+ setting.SSH.User + "@" + setting.SSH.Domain + ":",
+ "git+ssh://" + setting.SSH.Domain + "/",
+ "git+ssh://" + setting.SSH.User + "@" + setting.SSH.Domain + "/",
+ }
+
+ for _, sshURL := range sshURLVariants {
+ if strings.HasPrefix(repoURL, sshURL) {
+ return strings.Split(strings.TrimPrefix(repoURL, sshURL), "/")
+ }
+ }
+
+ return nil
+}
+
+// GetRepositoryByURL returns the repository by given url
+func GetRepositoryByURL(ctx context.Context, repoURL string) (*Repository, error) {
+ // possible urls for git:
+ // https://my.domain/sub-path/<owner>/<repo>.git
+ // https://my.domain/sub-path/<owner>/<repo>
+ // git+ssh://user@my.domain/<owner>/<repo>.git
+ // git+ssh://user@my.domain/<owner>/<repo>
+ // user@my.domain:<owner>/<repo>.git
+ // user@my.domain:<owner>/<repo>
+
+ pathSegments := getRepositoryURLPathSegments(repoURL)
+
+ if len(pathSegments) != 2 {
+ return nil, fmt.Errorf("unknown or malformed repository URL")
+ }
+
+ ownerName := pathSegments[0]
+ repoName := strings.TrimSuffix(pathSegments[1], ".git")
+ return GetRepositoryByOwnerAndName(ctx, ownerName, repoName)
+}
+
+// GetRepositoryByID returns the repository by given id if exists.
+func GetRepositoryByID(ctx context.Context, id int64) (*Repository, error) {
+ repo := new(Repository)
+ has, err := db.GetEngine(ctx).ID(id).Get(repo)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrRepoNotExist{id, 0, "", ""}
+ }
+ return repo, nil
+}
+
+// GetRepositoriesMapByIDs returns the repositories by given id slice.
+func GetRepositoriesMapByIDs(ctx context.Context, ids []int64) (map[int64]*Repository, error) {
+ repos := make(map[int64]*Repository, len(ids))
+ return repos, db.GetEngine(ctx).In("id", ids).Find(&repos)
+}
+
+// IsRepositoryModelOrDirExist returns true if the repository with given name under user has already existed.
+func IsRepositoryModelOrDirExist(ctx context.Context, u *user_model.User, repoName string) (bool, error) {
+ has, err := IsRepositoryModelExist(ctx, u, repoName)
+ if err != nil {
+ return false, err
+ }
+ isDir, err := util.IsDir(RepoPath(u.Name, repoName))
+ return has || isDir, err
+}
+
+func IsRepositoryModelExist(ctx context.Context, u *user_model.User, repoName string) (bool, error) {
+ return db.GetEngine(ctx).Get(&Repository{
+ OwnerID: u.ID,
+ LowerName: strings.ToLower(repoName),
+ })
+}
+
+// GetTemplateRepo populates repo.TemplateRepo for a generated repository and
+// returns an error on failure (NOTE: no error is returned for
+// non-generated repositories, and TemplateRepo will be left untouched)
+func GetTemplateRepo(ctx context.Context, repo *Repository) (*Repository, error) {
+ if !repo.IsGenerated() {
+ return nil, nil
+ }
+
+ return GetRepositoryByID(ctx, repo.TemplateID)
+}
+
+// TemplateRepo returns the repository, which is template of this repository
+func (repo *Repository) TemplateRepo(ctx context.Context) *Repository {
+ repo, err := GetTemplateRepo(ctx, repo)
+ if err != nil {
+ log.Error("TemplateRepo: %v", err)
+ return nil
+ }
+ return repo
+}
+
+type CountRepositoryOptions struct {
+ OwnerID int64
+ Private optional.Option[bool]
+}
+
+// CountRepositories returns number of repositories.
+// Argument private only takes effect when it is false,
+// set it true to count all repositories.
+func CountRepositories(ctx context.Context, opts CountRepositoryOptions) (int64, error) {
+ sess := db.GetEngine(ctx).Where("id > 0")
+
+ if opts.OwnerID > 0 {
+ sess.And("owner_id = ?", opts.OwnerID)
+ }
+ if opts.Private.Has() {
+ sess.And("is_private=?", opts.Private.Value())
+ }
+
+ count, err := sess.Count(new(Repository))
+ if err != nil {
+ return 0, fmt.Errorf("countRepositories: %w", err)
+ }
+ return count, nil
+}
+
+// UpdateRepoIssueNumbers updates one of a repositories amount of (open|closed) (issues|PRs) with the current count
+func UpdateRepoIssueNumbers(ctx context.Context, repoID int64, isPull, isClosed bool) error {
+ field := "num_"
+ if isClosed {
+ field += "closed_"
+ }
+ if isPull {
+ field += "pulls"
+ } else {
+ field += "issues"
+ }
+
+ subQuery := builder.Select("count(*)").
+ From("issue").Where(builder.Eq{
+ "repo_id": repoID,
+ "is_pull": isPull,
+ }.And(builder.If(isClosed, builder.Eq{"is_closed": isClosed})))
+
+ // builder.Update(cond) will generate SQL like UPDATE ... SET cond
+ query := builder.Update(builder.Eq{field: subQuery}).
+ From("repository").
+ Where(builder.Eq{"id": repoID})
+ _, err := db.Exec(ctx, query)
+ return err
+}
+
+// CountNullArchivedRepository counts the number of repositories with is_archived is null
+func CountNullArchivedRepository(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.IsNull{"is_archived"}).Count(new(Repository))
+}
+
+// FixNullArchivedRepository sets is_archived to false where it is null
+func FixNullArchivedRepository(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where(builder.IsNull{"is_archived"}).Cols("is_archived").NoAutoTime().Update(&Repository{
+ IsArchived: false,
+ })
+}
+
+// UpdateRepositoryOwnerName updates the owner name of all repositories owned by the user
+func UpdateRepositoryOwnerName(ctx context.Context, oldUserName, newUserName string) error {
+ if _, err := db.GetEngine(ctx).Exec("UPDATE `repository` SET owner_name=? WHERE owner_name=?", newUserName, oldUserName); err != nil {
+ return fmt.Errorf("change repo owner name: %w", err)
+ }
+ return nil
+}
diff --git a/models/repo/repo_flags.go b/models/repo/repo_flags.go
new file mode 100644
index 0000000..de76ed2
--- /dev/null
+++ b/models/repo/repo_flags.go
@@ -0,0 +1,102 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+
+ "xorm.io/builder"
+)
+
+// RepoFlag represents a single flag against a repository
+type RepoFlag struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s) INDEX"`
+ Name string `xorm:"UNIQUE(s) INDEX"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoFlag))
+}
+
+// TableName provides the real table name
+func (RepoFlag) TableName() string {
+ return "forgejo_repo_flag"
+}
+
+// ListFlags returns the array of flags on the repo.
+func (repo *Repository) ListFlags(ctx context.Context) ([]RepoFlag, error) {
+ var flags []RepoFlag
+ err := db.GetEngine(ctx).Table(&RepoFlag{}).Where("repo_id = ?", repo.ID).Find(&flags)
+ if err != nil {
+ return nil, err
+ }
+ return flags, nil
+}
+
+// IsFlagged returns whether a repo has any flags or not
+func (repo *Repository) IsFlagged(ctx context.Context) bool {
+ has, _ := db.Exist[RepoFlag](ctx, builder.Eq{"repo_id": repo.ID})
+ return has
+}
+
+// GetFlag returns a single RepoFlag based on its name
+func (repo *Repository) GetFlag(ctx context.Context, flagName string) (bool, *RepoFlag, error) {
+ flag, has, err := db.Get[RepoFlag](ctx, builder.Eq{"repo_id": repo.ID, "name": flagName})
+ if err != nil {
+ return false, nil, err
+ }
+ return has, flag, nil
+}
+
+// HasFlag returns true if a repo has a given flag, false otherwise
+func (repo *Repository) HasFlag(ctx context.Context, flagName string) bool {
+ has, _ := db.Exist[RepoFlag](ctx, builder.Eq{"repo_id": repo.ID, "name": flagName})
+ return has
+}
+
+// AddFlag adds a new flag to the repo
+func (repo *Repository) AddFlag(ctx context.Context, flagName string) error {
+ return db.Insert(ctx, RepoFlag{
+ RepoID: repo.ID,
+ Name: flagName,
+ })
+}
+
+// DeleteFlag removes a flag from the repo
+func (repo *Repository) DeleteFlag(ctx context.Context, flagName string) (int64, error) {
+ return db.DeleteByBean(ctx, &RepoFlag{RepoID: repo.ID, Name: flagName})
+}
+
+// ReplaceAllFlags replaces all flags of a repo with a new set
+func (repo *Repository) ReplaceAllFlags(ctx context.Context, flagNames []string) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if err := db.DeleteBeans(ctx, &RepoFlag{RepoID: repo.ID}); err != nil {
+ return err
+ }
+
+ if len(flagNames) == 0 {
+ return committer.Commit()
+ }
+
+ var flags []RepoFlag
+ for _, name := range flagNames {
+ flags = append(flags, RepoFlag{
+ RepoID: repo.ID,
+ Name: name,
+ })
+ }
+ if err := db.Insert(ctx, &flags); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
diff --git a/models/repo/repo_flags_test.go b/models/repo/repo_flags_test.go
new file mode 100644
index 0000000..bccefcf
--- /dev/null
+++ b/models/repo/repo_flags_test.go
@@ -0,0 +1,115 @@
+// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepositoryFlags(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 10})
+
+ // ********************
+ // ** NEGATIVE TESTS **
+ // ********************
+
+ // Unless we add flags, the repo has none
+ flags, err := repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Empty(t, flags)
+
+ // If the repo has no flags, it is not flagged
+ flagged := repo.IsFlagged(db.DefaultContext)
+ assert.False(t, flagged)
+
+ // Trying to find a flag when there is none
+ has := repo.HasFlag(db.DefaultContext, "foo")
+ assert.False(t, has)
+
+ // Trying to retrieve a non-existent flag indicates not found
+ has, _, err = repo.GetFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+ assert.False(t, has)
+
+ // Deleting a non-existent flag fails
+ deleted, err := repo.DeleteFlag(db.DefaultContext, "no-such-flag")
+ require.NoError(t, err)
+ assert.Equal(t, int64(0), deleted)
+
+ // ********************
+ // ** POSITIVE TESTS **
+ // ********************
+
+ // Adding a flag works
+ err = repo.AddFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+
+ // Adding it again fails
+ err = repo.AddFlag(db.DefaultContext, "foo")
+ require.Error(t, err)
+
+ // Listing flags includes the one we added
+ flags, err = repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, flags, 1)
+ assert.Equal(t, "foo", flags[0].Name)
+
+ // With a flag added, the repo is flagged
+ flagged = repo.IsFlagged(db.DefaultContext)
+ assert.True(t, flagged)
+
+ // The flag can be found
+ has = repo.HasFlag(db.DefaultContext, "foo")
+ assert.True(t, has)
+
+ // Added flag can be retrieved
+ _, flag, err := repo.GetFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+ assert.Equal(t, "foo", flag.Name)
+
+ // Deleting a flag works
+ deleted, err = repo.DeleteFlag(db.DefaultContext, "foo")
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), deleted)
+
+ // The list is now empty
+ flags, err = repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Empty(t, flags)
+
+ // Replacing an empty list works
+ err = repo.ReplaceAllFlags(db.DefaultContext, []string{"bar"})
+ require.NoError(t, err)
+
+ // The repo is now flagged with "bar"
+ has = repo.HasFlag(db.DefaultContext, "bar")
+ assert.True(t, has)
+
+ // Replacing a tag set with another works
+ err = repo.ReplaceAllFlags(db.DefaultContext, []string{"baz", "quux"})
+ require.NoError(t, err)
+
+ // The repo now has two tags
+ flags, err = repo.ListFlags(db.DefaultContext)
+ require.NoError(t, err)
+ assert.Len(t, flags, 2)
+ assert.Equal(t, "baz", flags[0].Name)
+ assert.Equal(t, "quux", flags[1].Name)
+
+ // Replacing flags with an empty set deletes all flags
+ err = repo.ReplaceAllFlags(db.DefaultContext, []string{})
+ require.NoError(t, err)
+
+ // The repo is now unflagged
+ flagged = repo.IsFlagged(db.DefaultContext)
+ assert.False(t, flagged)
+}
diff --git a/models/repo/repo_indexer.go b/models/repo/repo_indexer.go
new file mode 100644
index 0000000..6e19d8f
--- /dev/null
+++ b/models/repo/repo_indexer.go
@@ -0,0 +1,114 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+
+ "xorm.io/builder"
+)
+
+// RepoIndexerType specifies the repository indexer type
+type RepoIndexerType int //revive:disable-line:exported
+
+const (
+ // RepoIndexerTypeCode code indexer
+ RepoIndexerTypeCode RepoIndexerType = iota // 0
+ // RepoIndexerTypeStats repository stats indexer
+ RepoIndexerTypeStats // 1
+)
+
+// RepoIndexerStatus status of a repo's entry in the repo indexer
+// For now, implicitly refers to default branch
+type RepoIndexerStatus struct { //revive:disable-line:exported
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX(s)"`
+ CommitSha string `xorm:"VARCHAR(64)"`
+ IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoIndexerStatus))
+}
+
+// GetUnindexedRepos returns repos which do not have an indexer status
+func GetUnindexedRepos(ctx context.Context, indexerType RepoIndexerType, maxRepoID int64, page, pageSize int) ([]int64, error) {
+ ids := make([]int64, 0, 50)
+ cond := builder.Cond(builder.IsNull{
+ "repo_indexer_status.id",
+ }).And(builder.Eq{
+ "repository.is_empty": false,
+ })
+ sess := db.GetEngine(ctx).Table("repository").Join("LEFT OUTER", "repo_indexer_status", "repository.id = repo_indexer_status.repo_id AND repo_indexer_status.indexer_type = ?", indexerType)
+ if maxRepoID > 0 {
+ cond = builder.And(cond, builder.Lte{
+ "repository.id": maxRepoID,
+ })
+ }
+ if page >= 0 && pageSize > 0 {
+ start := 0
+ if page > 0 {
+ start = (page - 1) * pageSize
+ }
+ sess.Limit(pageSize, start)
+ }
+
+ sess.Where(cond).Cols("repository.id").Desc("repository.id")
+ err := sess.Find(&ids)
+ return ids, err
+}
+
+// GetIndexerStatus loads repo codes indxer status
+func GetIndexerStatus(ctx context.Context, repo *Repository, indexerType RepoIndexerType) (*RepoIndexerStatus, error) {
+ switch indexerType {
+ case RepoIndexerTypeCode:
+ if repo.CodeIndexerStatus != nil {
+ return repo.CodeIndexerStatus, nil
+ }
+ case RepoIndexerTypeStats:
+ if repo.StatsIndexerStatus != nil {
+ return repo.StatsIndexerStatus, nil
+ }
+ }
+ status := &RepoIndexerStatus{RepoID: repo.ID}
+ if has, err := db.GetEngine(ctx).Where("`indexer_type` = ?", indexerType).Get(status); err != nil {
+ return nil, err
+ } else if !has {
+ status.IndexerType = indexerType
+ status.CommitSha = ""
+ }
+ switch indexerType {
+ case RepoIndexerTypeCode:
+ repo.CodeIndexerStatus = status
+ case RepoIndexerTypeStats:
+ repo.StatsIndexerStatus = status
+ }
+ return status, nil
+}
+
+// UpdateIndexerStatus updates indexer status
+func UpdateIndexerStatus(ctx context.Context, repo *Repository, indexerType RepoIndexerType, sha string) error {
+ status, err := GetIndexerStatus(ctx, repo, indexerType)
+ if err != nil {
+ return fmt.Errorf("UpdateIndexerStatus: Unable to getIndexerStatus for repo: %s Error: %w", repo.FullName(), err)
+ }
+
+ if len(status.CommitSha) == 0 {
+ status.CommitSha = sha
+ if err := db.Insert(ctx, status); err != nil {
+ return fmt.Errorf("UpdateIndexerStatus: Unable to insert repoIndexerStatus for repo: %s Sha: %s Error: %w", repo.FullName(), sha, err)
+ }
+ return nil
+ }
+ status.CommitSha = sha
+ _, err = db.GetEngine(ctx).ID(status.ID).Cols("commit_sha").
+ Update(status)
+ if err != nil {
+ return fmt.Errorf("UpdateIndexerStatus: Unable to update repoIndexerStatus for repo: %s Sha: %s Error: %w", repo.FullName(), sha, err)
+ }
+ return nil
+}
diff --git a/models/repo/repo_list.go b/models/repo/repo_list.go
new file mode 100644
index 0000000..fc51f64
--- /dev/null
+++ b/models/repo/repo_list.go
@@ -0,0 +1,757 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// FindReposMapByIDs find repos as map
+func FindReposMapByIDs(ctx context.Context, repoIDs []int64, res map[int64]*Repository) error {
+ return db.GetEngine(ctx).In("id", repoIDs).Find(&res)
+}
+
+// RepositoryListDefaultPageSize is the default number of repositories
+// to load in memory when running administrative tasks on all (or almost
+// all) of them.
+// The number should be low enough to avoid filling up all RAM with
+// repository data...
+const RepositoryListDefaultPageSize = 64
+
+// RepositoryList contains a list of repositories
+type RepositoryList []*Repository
+
+func (repos RepositoryList) Len() int {
+ return len(repos)
+}
+
+func (repos RepositoryList) Less(i, j int) bool {
+ return repos[i].FullName() < repos[j].FullName()
+}
+
+func (repos RepositoryList) Swap(i, j int) {
+ repos[i], repos[j] = repos[j], repos[i]
+}
+
+// ValuesRepository converts a repository map to a list
+// FIXME: Remove in favor of maps.values when MIN_GO_VERSION >= 1.18
+func ValuesRepository(m map[int64]*Repository) []*Repository {
+ values := make([]*Repository, 0, len(m))
+ for _, v := range m {
+ values = append(values, v)
+ }
+ return values
+}
+
+// RepositoryListOfMap make list from values of map
+func RepositoryListOfMap(repoMap map[int64]*Repository) RepositoryList {
+ return RepositoryList(ValuesRepository(repoMap))
+}
+
+func (repos RepositoryList) LoadUnits(ctx context.Context) error {
+ if len(repos) == 0 {
+ return nil
+ }
+
+ // Load units.
+ units := make([]*RepoUnit, 0, len(repos)*6)
+ if err := db.GetEngine(ctx).
+ In("repo_id", repos.IDs()).
+ Find(&units); err != nil {
+ return fmt.Errorf("find units: %w", err)
+ }
+
+ unitsMap := make(map[int64][]*RepoUnit, len(repos))
+ for _, unit := range units {
+ if !unit.Type.UnitGlobalDisabled() {
+ unitsMap[unit.RepoID] = append(unitsMap[unit.RepoID], unit)
+ }
+ }
+
+ for _, repo := range repos {
+ repo.Units = unitsMap[repo.ID]
+ }
+
+ return nil
+}
+
+func (repos RepositoryList) IDs() []int64 {
+ repoIDs := make([]int64, len(repos))
+ for i := range repos {
+ repoIDs[i] = repos[i].ID
+ }
+ return repoIDs
+}
+
+// LoadAttributes loads the attributes for the given RepositoryList
+func (repos RepositoryList) LoadAttributes(ctx context.Context) error {
+ if len(repos) == 0 {
+ return nil
+ }
+
+ userIDs := container.FilterSlice(repos, func(repo *Repository) (int64, bool) {
+ return repo.OwnerID, true
+ })
+ repoIDs := make([]int64, len(repos))
+ for i := range repos {
+ repoIDs[i] = repos[i].ID
+ }
+
+ // Load owners.
+ users := make(map[int64]*user_model.User, len(userIDs))
+ if err := db.GetEngine(ctx).
+ Where("id > 0").
+ In("id", userIDs).
+ Find(&users); err != nil {
+ return fmt.Errorf("find users: %w", err)
+ }
+ for i := range repos {
+ repos[i].Owner = users[repos[i].OwnerID]
+ }
+
+ // Load primary language.
+ stats := make(LanguageStatList, 0, len(repos))
+ if err := db.GetEngine(ctx).
+ Where("`is_primary` = ? AND `language` != ?", true, "other").
+ In("`repo_id`", repoIDs).
+ Find(&stats); err != nil {
+ return fmt.Errorf("find primary languages: %w", err)
+ }
+ stats.LoadAttributes()
+ for i := range repos {
+ for _, st := range stats {
+ if st.RepoID == repos[i].ID {
+ repos[i].PrimaryLanguage = st
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// SearchRepoOptions holds the search options
+type SearchRepoOptions struct {
+ db.ListOptions
+ Actor *user_model.User
+ Keyword string
+ OwnerID int64
+ PriorityOwnerID int64
+ TeamID int64
+ OrderBy db.SearchOrderBy
+ Private bool // Include private repositories in results
+ StarredByID int64
+ WatchedByID int64
+ AllPublic bool // Include also all public repositories of users and public organisations
+ AllLimited bool // Include also all public repositories of limited organisations
+ // None -> include public and private
+ // True -> include just private
+ // False -> include just public
+ IsPrivate optional.Option[bool]
+ // None -> include collaborative AND non-collaborative
+ // True -> include just collaborative
+ // False -> include just non-collaborative
+ Collaborate optional.Option[bool]
+ // What type of unit the user can be collaborative in,
+ // it is ignored if Collaborate is False.
+ // TypeInvalid means any unit type.
+ UnitType unit.Type
+ // None -> include forks AND non-forks
+ // True -> include just forks
+ // False -> include just non-forks
+ Fork optional.Option[bool]
+ // None -> include templates AND non-templates
+ // True -> include just templates
+ // False -> include just non-templates
+ Template optional.Option[bool]
+ // None -> include mirrors AND non-mirrors
+ // True -> include just mirrors
+ // False -> include just non-mirrors
+ Mirror optional.Option[bool]
+ // None -> include archived AND non-archived
+ // True -> include just archived
+ // False -> include just non-archived
+ Archived optional.Option[bool]
+ // only search topic name
+ TopicOnly bool
+ // only search repositories with specified primary language
+ Language string
+ // include description in keyword search
+ IncludeDescription bool
+ // None -> include has milestones AND has no milestone
+ // True -> include just has milestones
+ // False -> include just has no milestone
+ HasMilestones optional.Option[bool]
+ // LowerNames represents valid lower names to restrict to
+ LowerNames []string
+ // When specified true, apply some filters over the conditions:
+ // - Don't show forks, when opts.Fork is OptionalBoolNone.
+ // - Do not display repositories that don't have a description, an icon and topics.
+ OnlyShowRelevant bool
+}
+
+// UserOwnedRepoCond returns user ownered repositories
+func UserOwnedRepoCond(userID int64) builder.Cond {
+ return builder.Eq{
+ "repository.owner_id": userID,
+ }
+}
+
+// UserAssignedRepoCond return user as assignee repositories list
+func UserAssignedRepoCond(id string, userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{
+ "repository.is_private": false,
+ },
+ builder.In(id,
+ builder.Select("issue.repo_id").From("issue_assignees").
+ InnerJoin("issue", "issue.id = issue_assignees.issue_id").
+ Where(builder.Eq{
+ "issue_assignees.assignee_id": userID,
+ }),
+ ),
+ )
+}
+
+// UserCreateIssueRepoCond return user created issues repositories list
+func UserCreateIssueRepoCond(id string, userID int64, isPull bool) builder.Cond {
+ return builder.And(
+ builder.Eq{
+ "repository.is_private": false,
+ },
+ builder.In(id,
+ builder.Select("issue.repo_id").From("issue").
+ Where(builder.Eq{
+ "issue.poster_id": userID,
+ "issue.is_pull": isPull,
+ }),
+ ),
+ )
+}
+
+// UserMentionedRepoCond return user metinoed repositories list
+func UserMentionedRepoCond(id string, userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{
+ "repository.is_private": false,
+ },
+ builder.In(id,
+ builder.Select("issue.repo_id").From("issue_user").
+ InnerJoin("issue", "issue.id = issue_user.issue_id").
+ Where(builder.Eq{
+ "issue_user.is_mentioned": true,
+ "issue_user.uid": userID,
+ }),
+ ),
+ )
+}
+
+// UserAccessRepoCond returns a condition for selecting all repositories a user has unit independent access to
+func UserAccessRepoCond(idStr string, userID int64) builder.Cond {
+ return builder.In(idStr, builder.Select("repo_id").
+ From("`access`").
+ Where(builder.And(
+ builder.Eq{"`access`.user_id": userID},
+ builder.Gt{"`access`.mode": int(perm.AccessModeNone)},
+ )),
+ )
+}
+
+// userCollaborationRepoCond returns a condition for selecting all repositories a user is collaborator in
+func UserCollaborationRepoCond(idStr string, userID int64) builder.Cond {
+ return builder.In(idStr, builder.Select("repo_id").
+ From("`collaboration`").
+ Where(builder.And(
+ builder.Eq{"`collaboration`.user_id": userID},
+ )),
+ )
+}
+
+// UserOrgTeamRepoCond selects repos that the given user has access to through team membership
+func UserOrgTeamRepoCond(idStr string, userID int64) builder.Cond {
+ return builder.In(idStr, userOrgTeamRepoBuilder(userID))
+}
+
+// userOrgTeamRepoBuilder returns repo ids where user's teams can access.
+func userOrgTeamRepoBuilder(userID int64) *builder.Builder {
+ return builder.Select("`team_repo`.repo_id").
+ From("team_repo").
+ Join("INNER", "team_user", "`team_user`.team_id = `team_repo`.team_id").
+ Where(builder.Eq{"`team_user`.uid": userID})
+}
+
+// userOrgTeamUnitRepoBuilder returns repo ids where user's teams can access the special unit.
+func userOrgTeamUnitRepoBuilder(userID int64, unitType unit.Type) *builder.Builder {
+ return userOrgTeamRepoBuilder(userID).
+ Join("INNER", "team_unit", "`team_unit`.team_id = `team_repo`.team_id").
+ Where(builder.Eq{"`team_unit`.`type`": unitType}).
+ And(builder.Gt{"`team_unit`.`access_mode`": int(perm.AccessModeNone)})
+}
+
+// userOrgTeamUnitRepoCond returns a condition to select repo ids where user's teams can access the special unit.
+func userOrgTeamUnitRepoCond(idStr string, userID int64, unitType unit.Type) builder.Cond {
+ return builder.In(idStr, userOrgTeamUnitRepoBuilder(userID, unitType))
+}
+
+// UserOrgUnitRepoCond selects repos that the given user has access to through org and the special unit
+func UserOrgUnitRepoCond(idStr string, userID, orgID int64, unitType unit.Type) builder.Cond {
+ return builder.In(idStr,
+ userOrgTeamUnitRepoBuilder(userID, unitType).
+ And(builder.Eq{"`team_unit`.org_id": orgID}),
+ )
+}
+
+// userOrgPublicRepoCond returns the condition that one user could access all public repositories in organizations
+func userOrgPublicRepoCond(userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{"`repository`.is_private": false},
+ builder.In("`repository`.owner_id",
+ builder.Select("`org_user`.org_id").
+ From("org_user").
+ Where(builder.Eq{"`org_user`.uid": userID}),
+ ),
+ )
+}
+
+// userOrgPublicRepoCondPrivate returns the condition that one user could access all public repositories in private organizations
+func userOrgPublicRepoCondPrivate(userID int64) builder.Cond {
+ return builder.And(
+ builder.Eq{"`repository`.is_private": false},
+ builder.In("`repository`.owner_id",
+ builder.Select("`org_user`.org_id").
+ From("org_user").
+ Join("INNER", "`user`", "`user`.id = `org_user`.org_id").
+ Where(builder.Eq{
+ "`org_user`.uid": userID,
+ "`user`.`type`": user_model.UserTypeOrganization,
+ "`user`.visibility": structs.VisibleTypePrivate,
+ }),
+ ),
+ )
+}
+
+// UserOrgPublicUnitRepoCond returns the condition that one user could access all public repositories in the special organization
+func UserOrgPublicUnitRepoCond(userID, orgID int64) builder.Cond {
+ return userOrgPublicRepoCond(userID).
+ And(builder.Eq{"`repository`.owner_id": orgID})
+}
+
+// SearchRepositoryCondition creates a query condition according search repository options
+func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
+ cond := builder.NewCond()
+
+ if opts.Private {
+ if opts.Actor != nil && !opts.Actor.IsAdmin && opts.Actor.ID != opts.OwnerID {
+ // OK we're in the context of a User
+ cond = cond.And(AccessibleRepositoryCondition(opts.Actor, unit.TypeInvalid))
+ }
+ } else {
+ // Not looking at private organisations and users
+ // We should be able to see all non-private repositories that
+ // isn't in a private or limited organisation.
+ cond = cond.And(
+ builder.Eq{"is_private": false},
+ builder.NotIn("owner_id", builder.Select("id").From("`user`").Where(
+ builder.Or(builder.Eq{"visibility": structs.VisibleTypeLimited}, builder.Eq{"visibility": structs.VisibleTypePrivate}),
+ )))
+ }
+
+ if opts.IsPrivate.Has() {
+ cond = cond.And(builder.Eq{"is_private": opts.IsPrivate.Value()})
+ }
+
+ if opts.Template.Has() {
+ cond = cond.And(builder.Eq{"is_template": opts.Template.Value()})
+ }
+
+ // Restrict to starred repositories
+ if opts.StarredByID > 0 {
+ cond = cond.And(builder.In("id", builder.Select("repo_id").From("star").Where(builder.Eq{"uid": opts.StarredByID})))
+ }
+
+ // Restrict to watched repositories
+ if opts.WatchedByID > 0 {
+ cond = cond.And(builder.In("id", builder.Select("repo_id").From("watch").Where(builder.Eq{"user_id": opts.WatchedByID})))
+ }
+
+ // Restrict repositories to those the OwnerID owns or contributes to as per opts.Collaborate
+ if opts.OwnerID > 0 {
+ accessCond := builder.NewCond()
+ if !opts.Collaborate.Value() {
+ accessCond = builder.Eq{"owner_id": opts.OwnerID}
+ }
+
+ if opts.Collaborate.ValueOrDefault(true) {
+ // A Collaboration is:
+
+ collaborateCond := builder.NewCond()
+ // 1. Repository we don't own
+ collaborateCond = collaborateCond.And(builder.Neq{"owner_id": opts.OwnerID})
+ // 2. But we can see because of:
+ {
+ userAccessCond := builder.NewCond()
+ // A. We have unit independent access
+ userAccessCond = userAccessCond.Or(UserAccessRepoCond("`repository`.id", opts.OwnerID))
+ // B. We are in a team for
+ if opts.UnitType == unit.TypeInvalid {
+ userAccessCond = userAccessCond.Or(UserOrgTeamRepoCond("`repository`.id", opts.OwnerID))
+ } else {
+ userAccessCond = userAccessCond.Or(userOrgTeamUnitRepoCond("`repository`.id", opts.OwnerID, opts.UnitType))
+ }
+ // C. Public repositories in organizations that we are member of
+ userAccessCond = userAccessCond.Or(userOrgPublicRepoCondPrivate(opts.OwnerID))
+ collaborateCond = collaborateCond.And(userAccessCond)
+ }
+ if !opts.Private {
+ collaborateCond = collaborateCond.And(builder.Expr("owner_id NOT IN (SELECT org_id FROM org_user WHERE org_user.uid = ? AND org_user.is_public = ?)", opts.OwnerID, false))
+ }
+
+ accessCond = accessCond.Or(collaborateCond)
+ }
+
+ if opts.AllPublic {
+ accessCond = accessCond.Or(builder.Eq{"is_private": false}.And(builder.In("owner_id", builder.Select("`user`.id").From("`user`").Where(builder.Eq{"`user`.visibility": structs.VisibleTypePublic}))))
+ }
+
+ if opts.AllLimited {
+ accessCond = accessCond.Or(builder.Eq{"is_private": false}.And(builder.In("owner_id", builder.Select("`user`.id").From("`user`").Where(builder.Eq{"`user`.visibility": structs.VisibleTypeLimited}))))
+ }
+
+ cond = cond.And(accessCond)
+ }
+
+ if opts.TeamID > 0 {
+ cond = cond.And(builder.In("`repository`.id", builder.Select("`team_repo`.repo_id").From("team_repo").Where(builder.Eq{"`team_repo`.team_id": opts.TeamID})))
+ }
+
+ if opts.Keyword != "" {
+ // separate keyword
+ subQueryCond := builder.NewCond()
+ for _, v := range strings.Split(opts.Keyword, ",") {
+ if opts.TopicOnly {
+ subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
+ } else {
+ subQueryCond = subQueryCond.Or(builder.Like{"topic.name", strings.ToLower(v)})
+ }
+ }
+ subQuery := builder.Select("repo_topic.repo_id").From("repo_topic").
+ Join("INNER", "topic", "topic.id = repo_topic.topic_id").
+ Where(subQueryCond).
+ GroupBy("repo_topic.repo_id")
+
+ keywordCond := builder.In("id", subQuery)
+ if !opts.TopicOnly {
+ likes := builder.NewCond()
+ for _, v := range strings.Split(opts.Keyword, ",") {
+ likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
+
+ // If the string looks like "org/repo", match against that pattern too
+ if opts.TeamID == 0 && strings.Count(opts.Keyword, "/") == 1 {
+ pieces := strings.Split(opts.Keyword, "/")
+ ownerName := pieces[0]
+ repoName := pieces[1]
+ likes = likes.Or(builder.And(builder.Like{"owner_name", strings.ToLower(ownerName)}, builder.Like{"lower_name", strings.ToLower(repoName)}))
+ }
+
+ if opts.IncludeDescription {
+ likes = likes.Or(builder.Like{"LOWER(description)", strings.ToLower(v)})
+ }
+ }
+ keywordCond = keywordCond.Or(likes)
+ }
+ cond = cond.And(keywordCond)
+ }
+
+ if opts.Language != "" {
+ cond = cond.And(builder.In("id", builder.
+ Select("repo_id").
+ From("language_stat").
+ Where(builder.Eq{"language": opts.Language}).And(builder.Eq{"is_primary": true})))
+ }
+
+ if opts.Fork.Has() || opts.OnlyShowRelevant {
+ if opts.OnlyShowRelevant && !opts.Fork.Has() {
+ cond = cond.And(builder.Eq{"is_fork": false})
+ } else {
+ cond = cond.And(builder.Eq{"is_fork": opts.Fork.Value()})
+ }
+ }
+
+ if opts.Mirror.Has() {
+ cond = cond.And(builder.Eq{"is_mirror": opts.Mirror.Value()})
+ }
+
+ if opts.Actor != nil && opts.Actor.IsRestricted {
+ cond = cond.And(AccessibleRepositoryCondition(opts.Actor, unit.TypeInvalid))
+ }
+
+ if opts.Archived.Has() {
+ cond = cond.And(builder.Eq{"is_archived": opts.Archived.Value()})
+ }
+
+ if opts.HasMilestones.Has() {
+ if opts.HasMilestones.Value() {
+ cond = cond.And(builder.Gt{"num_milestones": 0})
+ } else {
+ cond = cond.And(builder.Eq{"num_milestones": 0}.Or(builder.IsNull{"num_milestones"}))
+ }
+ }
+
+ if opts.OnlyShowRelevant {
+ // Only show a repo that has at least a topic, an icon, or a description
+ subQueryCond := builder.NewCond()
+
+ // Topic checking. Topics are present.
+ if setting.Database.Type.IsPostgreSQL() { // postgres stores the topics as json and not as text
+ subQueryCond = subQueryCond.Or(builder.And(builder.NotNull{"topics"}, builder.Neq{"(topics)::text": "[]"}))
+ } else {
+ subQueryCond = subQueryCond.Or(builder.And(builder.Neq{"topics": "null"}, builder.Neq{"topics": "[]"}))
+ }
+
+ // Description checking. Description not empty
+ subQueryCond = subQueryCond.Or(builder.Neq{"description": ""})
+
+ // Repo has a avatar
+ subQueryCond = subQueryCond.Or(builder.Neq{"avatar": ""})
+
+ // Always hide repo's that are empty
+ subQueryCond = subQueryCond.And(builder.Eq{"is_empty": false})
+
+ cond = cond.And(subQueryCond)
+ }
+
+ return cond
+}
+
+// SearchRepository returns repositories based on search options,
+// it returns results in given range and number of total results.
+func SearchRepository(ctx context.Context, opts *SearchRepoOptions) (RepositoryList, int64, error) {
+ cond := SearchRepositoryCondition(opts)
+ return SearchRepositoryByCondition(ctx, opts, cond, true)
+}
+
+// CountRepository counts repositories based on search options,
+func CountRepository(ctx context.Context, opts *SearchRepoOptions) (int64, error) {
+ return db.GetEngine(ctx).Where(SearchRepositoryCondition(opts)).Count(new(Repository))
+}
+
+// SearchRepositoryByCondition search repositories by condition
+func SearchRepositoryByCondition(ctx context.Context, opts *SearchRepoOptions, cond builder.Cond, loadAttributes bool) (RepositoryList, int64, error) {
+ sess, count, err := searchRepositoryByCondition(ctx, opts, cond)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ defaultSize := 50
+ if opts.PageSize > 0 {
+ defaultSize = opts.PageSize
+ }
+ repos := make(RepositoryList, 0, defaultSize)
+ if err := sess.Find(&repos); err != nil {
+ return nil, 0, fmt.Errorf("Repo: %w", err)
+ }
+
+ if opts.PageSize <= 0 {
+ count = int64(len(repos))
+ }
+
+ if loadAttributes {
+ if err := repos.LoadAttributes(ctx); err != nil {
+ return nil, 0, fmt.Errorf("LoadAttributes: %w", err)
+ }
+ }
+
+ return repos, count, nil
+}
+
+func searchRepositoryByCondition(ctx context.Context, opts *SearchRepoOptions, cond builder.Cond) (db.Engine, int64, error) {
+ if opts.Page <= 0 {
+ opts.Page = 1
+ }
+
+ if len(opts.OrderBy) == 0 {
+ opts.OrderBy = db.SearchOrderByAlphabetically
+ }
+
+ args := make([]any, 0)
+ if opts.PriorityOwnerID > 0 {
+ opts.OrderBy = db.SearchOrderBy(fmt.Sprintf("CASE WHEN owner_id = ? THEN 0 ELSE owner_id END, %s", opts.OrderBy))
+ args = append(args, opts.PriorityOwnerID)
+ } else if strings.Count(opts.Keyword, "/") == 1 {
+ // With "owner/repo" search times, prioritise results which match the owner field
+ orgName := strings.Split(opts.Keyword, "/")[0]
+ opts.OrderBy = db.SearchOrderBy(fmt.Sprintf("CASE WHEN owner_name LIKE ? THEN 0 ELSE 1 END, %s", opts.OrderBy))
+ args = append(args, orgName)
+ }
+
+ sess := db.GetEngine(ctx)
+
+ var count int64
+ if opts.PageSize > 0 {
+ var err error
+ count, err = sess.
+ Where(cond).
+ Count(new(Repository))
+ if err != nil {
+ return nil, 0, fmt.Errorf("Count: %w", err)
+ }
+ }
+
+ sess = sess.Where(cond).OrderBy(opts.OrderBy.String(), args...)
+ if opts.PageSize > 0 {
+ sess = sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ return sess, count, nil
+}
+
+// SearchRepositoryIDsByCondition search repository IDs by given condition.
+func SearchRepositoryIDsByCondition(ctx context.Context, cond builder.Cond) ([]int64, error) {
+ repoIDs := make([]int64, 0, 10)
+ return repoIDs, db.GetEngine(ctx).
+ Table("repository").
+ Cols("id").
+ Where(cond).
+ Find(&repoIDs)
+}
+
+// AccessibleRepositoryCondition takes a user a returns a condition for checking if a repository is accessible
+func AccessibleRepositoryCondition(user *user_model.User, unitType unit.Type) builder.Cond {
+ cond := builder.NewCond()
+
+ if user == nil || !user.IsRestricted || user.ID <= 0 {
+ orgVisibilityLimit := []structs.VisibleType{structs.VisibleTypePrivate}
+ if user == nil || user.ID <= 0 {
+ orgVisibilityLimit = append(orgVisibilityLimit, structs.VisibleTypeLimited)
+ }
+ // 1. Be able to see all non-private repositories that either:
+ cond = cond.Or(builder.And(
+ builder.Eq{"`repository`.is_private": false},
+ // 2. Aren't in an private organisation/user or limited organisation/user if the doer is not logged in.
+ builder.NotIn("`repository`.owner_id", builder.Select("id").From("`user`").Where(
+ builder.In("visibility", orgVisibilityLimit)))))
+ }
+
+ if user != nil {
+ // 2. Be able to see all repositories that we have unit independent access to
+ // 3. Be able to see all repositories through team membership(s)
+ if unitType == unit.TypeInvalid {
+ // Regardless of UnitType
+ cond = cond.Or(
+ UserAccessRepoCond("`repository`.id", user.ID),
+ UserOrgTeamRepoCond("`repository`.id", user.ID),
+ )
+ } else {
+ // For a specific UnitType
+ cond = cond.Or(
+ UserCollaborationRepoCond("`repository`.id", user.ID),
+ userOrgTeamUnitRepoCond("`repository`.id", user.ID, unitType),
+ )
+ }
+ // 4. Repositories that we directly own
+ cond = cond.Or(builder.Eq{"`repository`.owner_id": user.ID})
+ if !user.IsRestricted {
+ // 5. Be able to see all public repos in private organizations that we are an org_user of
+ cond = cond.Or(userOrgPublicRepoCond(user.ID))
+ }
+ }
+
+ return cond
+}
+
+// SearchRepositoryByName takes keyword and part of repository name to search,
+// it returns results in given range and number of total results.
+func SearchRepositoryByName(ctx context.Context, opts *SearchRepoOptions) (RepositoryList, int64, error) {
+ opts.IncludeDescription = false
+ return SearchRepository(ctx, opts)
+}
+
+// SearchRepositoryIDs takes keyword and part of repository name to search,
+// it returns results in given range and number of total results.
+func SearchRepositoryIDs(ctx context.Context, opts *SearchRepoOptions) ([]int64, int64, error) {
+ opts.IncludeDescription = false
+
+ cond := SearchRepositoryCondition(opts)
+
+ sess, count, err := searchRepositoryByCondition(ctx, opts, cond)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ defaultSize := 50
+ if opts.PageSize > 0 {
+ defaultSize = opts.PageSize
+ }
+
+ ids := make([]int64, 0, defaultSize)
+ err = sess.Select("id").Table("repository").Find(&ids)
+ if opts.PageSize <= 0 {
+ count = int64(len(ids))
+ }
+
+ return ids, count, err
+}
+
+// AccessibleRepoIDsQuery queries accessible repository ids. Usable as a subquery wherever repo ids need to be filtered.
+func AccessibleRepoIDsQuery(user *user_model.User) *builder.Builder {
+ // NB: Please note this code needs to still work if user is nil
+ return builder.Select("id").From("repository").Where(AccessibleRepositoryCondition(user, unit.TypeInvalid))
+}
+
+// FindUserCodeAccessibleRepoIDs finds all at Code level accessible repositories' ID by the user's id
+func FindUserCodeAccessibleRepoIDs(ctx context.Context, user *user_model.User) ([]int64, error) {
+ return SearchRepositoryIDsByCondition(ctx, AccessibleRepositoryCondition(user, unit.TypeCode))
+}
+
+// FindUserCodeAccessibleOwnerRepoIDs finds all repository IDs for the given owner whose code the user can see.
+func FindUserCodeAccessibleOwnerRepoIDs(ctx context.Context, ownerID int64, user *user_model.User) ([]int64, error) {
+ return SearchRepositoryIDsByCondition(ctx, builder.NewCond().And(
+ builder.Eq{"owner_id": ownerID},
+ AccessibleRepositoryCondition(user, unit.TypeCode),
+ ))
+}
+
+// GetUserRepositories returns a list of repositories of given user.
+func GetUserRepositories(ctx context.Context, opts *SearchRepoOptions) (RepositoryList, int64, error) {
+ if len(opts.OrderBy) == 0 {
+ opts.OrderBy = "updated_unix DESC"
+ }
+
+ cond := builder.NewCond()
+ if opts.Actor == nil {
+ return nil, 0, util.NewInvalidArgumentErrorf("GetUserRepositories: Actor is needed but not given")
+ }
+ cond = cond.And(builder.Eq{"owner_id": opts.Actor.ID})
+ if !opts.Private {
+ cond = cond.And(builder.Eq{"is_private": false})
+ }
+
+ if len(opts.LowerNames) > 0 {
+ cond = cond.And(builder.In("lower_name", opts.LowerNames))
+ }
+
+ sess := db.GetEngine(ctx)
+
+ count, err := sess.Where(cond).Count(new(Repository))
+ if err != nil {
+ return nil, 0, fmt.Errorf("Count: %w", err)
+ }
+
+ sess = sess.Where(cond).OrderBy(opts.OrderBy.String())
+ repos := make(RepositoryList, 0, opts.PageSize)
+ return repos, count, db.SetSessionPagination(sess, opts).Find(&repos)
+}
diff --git a/models/repo/repo_list_test.go b/models/repo/repo_list_test.go
new file mode 100644
index 0000000..8c13f38
--- /dev/null
+++ b/models/repo/repo_list_test.go
@@ -0,0 +1,450 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "path/filepath"
+ "slices"
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func getTestCases() []struct {
+ name string
+ opts *repo_model.SearchRepoOptions
+ count int
+} {
+ testCases := []struct {
+ name string
+ opts *repo_model.SearchRepoOptions
+ count int
+ }{
+ {
+ name: "PublicRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{PageSize: 10}, Collaborate: optional.Some(false)},
+ count: 7,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFirstPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 1, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitSecondPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 2, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitThirdPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFourthPage",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "PublicRepositoriesOfUser",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Collaborate: optional.Some(false)},
+ count: 2,
+ },
+ {
+ name: "PublicRepositoriesOfUser2",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Collaborate: optional.Some(false)},
+ count: 0,
+ },
+ {
+ name: "PublicRepositoriesOfOrg3",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Collaborate: optional.Some(false)},
+ count: 2,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, Collaborate: optional.Some(false)},
+ count: 4,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser2",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, Collaborate: optional.Some(false)},
+ count: 0,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfOrg3",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true, Collaborate: optional.Some(false)},
+ count: 4,
+ },
+ {
+ name: "PublicRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15},
+ count: 5,
+ },
+ {
+ name: "PublicRepositoriesOfUser2IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18},
+ count: 1,
+ },
+ {
+ name: "PublicRepositoriesOfOrg3IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20},
+ count: 3,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true},
+ count: 9,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser2IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true},
+ count: 4,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfOrg3IncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true},
+ count: 7,
+ },
+ {
+ name: "PublicRepositoriesOfOrganization",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Collaborate: optional.Some(false)},
+ count: 1,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfOrganization",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Private: true, Collaborate: optional.Some(false)},
+ count: 2,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{PageSize: 10}, AllPublic: true, Collaborate: optional.Some(false)},
+ count: 7,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "big_test_", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, AllPublic: true, Collaborate: optional.Some(false)},
+ count: 14,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, AllPublic: true, Template: optional.Some(false)},
+ count: 35,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true, AllLimited: true, Template: optional.Some(false)},
+ count: 40,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborativeByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "test", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true},
+ count: 16,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUser2IncludingCollaborativeByName",
+ opts: &repo_model.SearchRepoOptions{Keyword: "test", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, AllPublic: true},
+ count: 14,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesOfOrganization",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, AllPublic: true, Collaborate: optional.Some(false), Template: optional.Some(false)},
+ count: 35,
+ },
+ {
+ name: "AllTemplates",
+ opts: &repo_model.SearchRepoOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Template: optional.Some(true)},
+ count: 2,
+ },
+ {
+ name: "OwnerSlashRepoSearch",
+ opts: &repo_model.SearchRepoOptions{Keyword: "user/repo2", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, OwnerID: 0},
+ count: 2,
+ },
+ {
+ name: "OwnerSlashSearch",
+ opts: &repo_model.SearchRepoOptions{Keyword: "user20/", ListOptions: db.ListOptions{Page: 1, PageSize: 10}, Private: true, OwnerID: 0},
+ count: 4,
+ },
+ }
+
+ return testCases
+}
+
+func TestSearchRepository(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // test search public repository on explore page
+ repos, count, err := repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "repo_12",
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ if assert.Len(t, repos, 1) {
+ assert.Equal(t, "test_repo_12", repos[0].Name)
+ }
+ assert.Equal(t, int64(1), count)
+
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "test_repo",
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), count)
+ assert.Len(t, repos, 2)
+
+ // test search private repository on explore page
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "repo_13",
+ Private: true,
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ if assert.Len(t, repos, 1) {
+ assert.Equal(t, "test_repo_13", repos[0].Name)
+ }
+ assert.Equal(t, int64(1), count)
+
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "test_repo",
+ Private: true,
+ Collaborate: optional.Some(false),
+ })
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(3), count)
+ assert.Len(t, repos, 3)
+
+ // Test non existing owner
+ repos, count, err = repo_model.SearchRepositoryByName(db.DefaultContext, &repo_model.SearchRepoOptions{OwnerID: unittest.NonexistentID})
+
+ require.NoError(t, err)
+ assert.Empty(t, repos)
+ assert.Equal(t, int64(0), count)
+
+ // Test search within description
+ repos, count, err = repo_model.SearchRepository(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "description_14",
+ Collaborate: optional.Some(false),
+ IncludeDescription: true,
+ })
+
+ require.NoError(t, err)
+ if assert.Len(t, repos, 1) {
+ assert.Equal(t, "test_repo_14", repos[0].Name)
+ }
+ assert.Equal(t, int64(1), count)
+
+ // Test NOT search within description
+ repos, count, err = repo_model.SearchRepository(db.DefaultContext, &repo_model.SearchRepoOptions{
+ ListOptions: db.ListOptions{
+ Page: 1,
+ PageSize: 10,
+ },
+ Keyword: "description_14",
+ Collaborate: optional.Some(false),
+ IncludeDescription: false,
+ })
+
+ require.NoError(t, err)
+ assert.Empty(t, repos)
+ assert.Equal(t, int64(0), count)
+
+ testCases := getTestCases()
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ repos, count, err := repo_model.SearchRepositoryByName(db.DefaultContext, testCase.opts)
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(testCase.count), count)
+
+ page := testCase.opts.Page
+ if page <= 0 {
+ page = 1
+ }
+ expectedLen := testCase.opts.PageSize
+ if testCase.opts.PageSize*page > testCase.count+testCase.opts.PageSize {
+ expectedLen = 0
+ } else if testCase.opts.PageSize*page > testCase.count {
+ expectedLen = testCase.count % testCase.opts.PageSize
+ }
+ if assert.Len(t, repos, expectedLen) {
+ for _, repo := range repos {
+ assert.NotEmpty(t, repo.Name)
+
+ if len(testCase.opts.Keyword) > 0 {
+ // Keyword match condition is different for search terms of form "owner/repo"
+ if strings.Count(testCase.opts.Keyword, "/") == 1 {
+ // May still match as a whole...
+ wholeMatch := strings.Contains(repo.Name, testCase.opts.Keyword)
+
+ pieces := strings.Split(testCase.opts.Keyword, "/")
+ ownerName := pieces[0]
+ repoName := pieces[1]
+ // ... or match in parts
+ splitMatch := strings.Contains(repo.OwnerName, ownerName) && strings.Contains(repo.Name, repoName)
+
+ assert.True(t, wholeMatch || splitMatch, "Keyword '%s' does not match repo '%s/%s'", testCase.opts.Keyword, repo.Owner.Name, repo.Name)
+ } else {
+ assert.Contains(t, repo.Name, testCase.opts.Keyword)
+ }
+ }
+
+ if !testCase.opts.Private {
+ assert.False(t, repo.IsPrivate)
+ }
+
+ if testCase.opts.Fork.Value() && testCase.opts.Mirror.Value() {
+ assert.True(t, repo.IsFork && repo.IsMirror)
+ } else {
+ if testCase.opts.Fork.Has() {
+ assert.Equal(t, testCase.opts.Fork.Value(), repo.IsFork)
+ }
+
+ if testCase.opts.Mirror.Has() {
+ assert.Equal(t, testCase.opts.Mirror.Value(), repo.IsMirror)
+ }
+ }
+
+ if testCase.opts.OwnerID > 0 && !testCase.opts.AllPublic {
+ if testCase.opts.Collaborate.Has() {
+ if testCase.opts.Collaborate.Value() {
+ assert.NotEqual(t, testCase.opts.OwnerID, repo.Owner.ID)
+ } else {
+ assert.Equal(t, testCase.opts.OwnerID, repo.Owner.ID)
+ }
+ }
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestCountRepository(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testCases := getTestCases()
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ count, err := repo_model.CountRepository(db.DefaultContext, testCase.opts)
+
+ require.NoError(t, err)
+ assert.Equal(t, int64(testCase.count), count)
+ })
+ }
+}
+
+func TestSearchRepositoryByTopicName(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ testCases := []struct {
+ name string
+ opts *repo_model.SearchRepoOptions
+ count int
+ }{
+ {
+ name: "AllPublic/SearchPublicRepositoriesFromTopicAndName",
+ opts: &repo_model.SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql"},
+ count: 2,
+ },
+ {
+ name: "AllPublic/OnlySearchPublicRepositoriesFromTopic",
+ opts: &repo_model.SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql", TopicOnly: true},
+ count: 1,
+ },
+ {
+ name: "AllPublic/OnlySearchMultipleKeywordPublicRepositoriesFromTopic",
+ opts: &repo_model.SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql,golang", TopicOnly: true},
+ count: 2,
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ _, count, err := repo_model.SearchRepositoryByName(db.DefaultContext, testCase.opts)
+ require.NoError(t, err)
+ assert.Equal(t, int64(testCase.count), count)
+ })
+ }
+}
+
+func TestSearchRepositoryIDsByCondition(t *testing.T) {
+ defer unittest.OverrideFixtures(
+ unittest.FixturesOptions{
+ Dir: filepath.Join(setting.AppWorkPath, "models/fixtures/"),
+ Base: setting.AppWorkPath,
+ Dirs: []string{"models/repo/TestSearchRepositoryIDsByCondition/"},
+ },
+ )()
+ require.NoError(t, unittest.PrepareTestDatabase())
+ // Sanity check of the database
+ limitedUser := unittest.AssertExistsAndLoadBean(t, &user.User{ID: 33, Visibility: structs.VisibleTypeLimited})
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1001, OwnerID: limitedUser.ID})
+
+ testCases := []struct {
+ user *user.User
+ repoIDs []int64
+ }{
+ {
+ user: nil,
+ repoIDs: []int64{1, 4, 8, 9, 10, 11, 12, 14, 17, 18, 21, 23, 25, 27, 29, 32, 33, 34, 35, 36, 37, 42, 44, 45, 46, 47, 48, 49, 50, 51, 53, 57, 58, 60, 61, 62, 1059},
+ },
+ {
+ user: unittest.AssertExistsAndLoadBean(t, &user.User{ID: 4}),
+ repoIDs: []int64{1, 3, 4, 8, 9, 10, 11, 12, 14, 17, 18, 21, 23, 25, 27, 29, 32, 33, 34, 35, 36, 37, 38, 40, 42, 44, 45, 46, 47, 48, 49, 50, 51, 53, 57, 58, 60, 61, 62, 1001, 1059},
+ },
+ {
+ user: unittest.AssertExistsAndLoadBean(t, &user.User{ID: 5}),
+ repoIDs: []int64{1, 4, 8, 9, 10, 11, 12, 14, 17, 18, 21, 23, 25, 27, 29, 32, 33, 34, 35, 36, 37, 38, 40, 42, 44, 45, 46, 47, 48, 49, 50, 51, 53, 57, 58, 60, 61, 62, 1001, 1059},
+ },
+ }
+
+ for _, testCase := range testCases {
+ repoIDs, err := repo_model.FindUserCodeAccessibleRepoIDs(db.DefaultContext, testCase.user)
+ require.NoError(t, err)
+
+ slices.Sort(repoIDs)
+ assert.EqualValues(t, testCase.repoIDs, repoIDs)
+ }
+}
diff --git a/models/repo/repo_repository.go b/models/repo/repo_repository.go
new file mode 100644
index 0000000..6780165
--- /dev/null
+++ b/models/repo/repo_repository.go
@@ -0,0 +1,60 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/validation"
+)
+
+func init() {
+ db.RegisterModel(new(FollowingRepo))
+}
+
+func FindFollowingReposByRepoID(ctx context.Context, repoID int64) ([]*FollowingRepo, error) {
+ maxFollowingRepos := 10
+ sess := db.GetEngine(ctx).Where("repo_id=?", repoID)
+ sess = sess.Limit(maxFollowingRepos, 0)
+ followingRepoList := make([]*FollowingRepo, 0, maxFollowingRepos)
+ err := sess.Find(&followingRepoList)
+ if err != nil {
+ return make([]*FollowingRepo, 0, maxFollowingRepos), err
+ }
+ for _, followingRepo := range followingRepoList {
+ if res, err := validation.IsValid(*followingRepo); !res {
+ return make([]*FollowingRepo, 0, maxFollowingRepos), err
+ }
+ }
+ return followingRepoList, nil
+}
+
+func StoreFollowingRepos(ctx context.Context, localRepoID int64, followingRepoList []*FollowingRepo) error {
+ for _, followingRepo := range followingRepoList {
+ if res, err := validation.IsValid(*followingRepo); !res {
+ return err
+ }
+ }
+
+ // Begin transaction
+ ctx, committer, err := db.TxContext((ctx))
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ _, err = db.GetEngine(ctx).Where("repo_id=?", localRepoID).Delete(FollowingRepo{})
+ if err != nil {
+ return err
+ }
+ for _, followingRepo := range followingRepoList {
+ _, err = db.GetEngine(ctx).Insert(followingRepo)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Commit transaction
+ return committer.Commit()
+}
diff --git a/models/repo/repo_test.go b/models/repo/repo_test.go
new file mode 100644
index 0000000..56b8479
--- /dev/null
+++ b/models/repo/repo_test.go
@@ -0,0 +1,230 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/optional"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ countRepospts = repo_model.CountRepositoryOptions{OwnerID: 10}
+ countReposptsPublic = repo_model.CountRepositoryOptions{OwnerID: 10, Private: optional.Some(false)}
+ countReposptsPrivate = repo_model.CountRepositoryOptions{OwnerID: 10, Private: optional.Some(true)}
+)
+
+func TestGetRepositoryCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ ctx := db.DefaultContext
+ count, err1 := repo_model.CountRepositories(ctx, countRepospts)
+ privateCount, err2 := repo_model.CountRepositories(ctx, countReposptsPrivate)
+ publicCount, err3 := repo_model.CountRepositories(ctx, countReposptsPublic)
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+ require.NoError(t, err3)
+ assert.Equal(t, int64(3), count)
+ assert.Equal(t, privateCount+publicCount, count)
+}
+
+func TestGetPublicRepositoryCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := repo_model.CountRepositories(db.DefaultContext, countReposptsPublic)
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), count)
+}
+
+func TestGetPrivateRepositoryCount(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ count, err := repo_model.CountRepositories(db.DefaultContext, countReposptsPrivate)
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), count)
+}
+
+func TestRepoAPIURL(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 10})
+
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user12/repo10", repo.APIURL())
+}
+
+func TestWatchRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ const repoID = 3
+ const userID = 2
+
+ require.NoError(t, repo_model.WatchRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{RepoID: repoID, UserID: userID})
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: repoID})
+
+ require.NoError(t, repo_model.WatchRepo(db.DefaultContext, userID, repoID, false))
+ unittest.AssertNotExistsBean(t, &repo_model.Watch{RepoID: repoID, UserID: userID})
+ unittest.CheckConsistencyFor(t, &repo_model.Repository{ID: repoID})
+}
+
+func TestMetas(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := &repo_model.Repository{Name: "testRepo"}
+ repo.Owner = &user_model.User{Name: "testOwner"}
+ repo.OwnerName = repo.Owner.Name
+
+ repo.Units = nil
+
+ metas := repo.ComposeMetas(db.DefaultContext)
+ assert.Equal(t, "testRepo", metas["repo"])
+ assert.Equal(t, "testOwner", metas["user"])
+
+ externalTracker := repo_model.RepoUnit{
+ Type: unit.TypeExternalTracker,
+ Config: &repo_model.ExternalTrackerConfig{
+ ExternalTrackerFormat: "https://someurl.com/{user}/{repo}/{issue}",
+ },
+ }
+
+ testSuccess := func(expectedStyle string) {
+ repo.Units = []*repo_model.RepoUnit{&externalTracker}
+ repo.RenderingMetas = nil
+ metas := repo.ComposeMetas(db.DefaultContext)
+ assert.Equal(t, expectedStyle, metas["style"])
+ assert.Equal(t, "testRepo", metas["repo"])
+ assert.Equal(t, "testOwner", metas["user"])
+ assert.Equal(t, "https://someurl.com/{user}/{repo}/{issue}", metas["format"])
+ }
+
+ testSuccess(markup.IssueNameStyleNumeric)
+
+ externalTracker.ExternalTrackerConfig().ExternalTrackerStyle = markup.IssueNameStyleAlphanumeric
+ testSuccess(markup.IssueNameStyleAlphanumeric)
+
+ externalTracker.ExternalTrackerConfig().ExternalTrackerStyle = markup.IssueNameStyleNumeric
+ testSuccess(markup.IssueNameStyleNumeric)
+
+ externalTracker.ExternalTrackerConfig().ExternalTrackerStyle = markup.IssueNameStyleRegexp
+ testSuccess(markup.IssueNameStyleRegexp)
+
+ repo, err := repo_model.GetRepositoryByID(db.DefaultContext, 3)
+ require.NoError(t, err)
+
+ metas = repo.ComposeMetas(db.DefaultContext)
+ assert.Contains(t, metas, "org")
+ assert.Contains(t, metas, "teams")
+ assert.Equal(t, "org3", metas["org"])
+ assert.Equal(t, ",owners,team1,", metas["teams"])
+}
+
+func TestGetRepositoryByURL(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ t.Run("InvalidPath", func(t *testing.T) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, "something")
+
+ assert.Nil(t, repo)
+ require.Error(t, err)
+ })
+
+ t.Run("ValidHttpURL", func(t *testing.T) {
+ test := func(t *testing.T, url string) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, url)
+
+ assert.NotNil(t, repo)
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(2), repo.ID)
+ assert.Equal(t, int64(2), repo.OwnerID)
+ }
+
+ test(t, "https://try.gitea.io/user2/repo2")
+ test(t, "https://try.gitea.io/user2/repo2.git")
+ })
+
+ t.Run("ValidGitSshURL", func(t *testing.T) {
+ test := func(t *testing.T, url string) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, url)
+
+ assert.NotNil(t, repo)
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(2), repo.ID)
+ assert.Equal(t, int64(2), repo.OwnerID)
+ }
+
+ test(t, "git+ssh://sshuser@try.gitea.io/user2/repo2")
+ test(t, "git+ssh://sshuser@try.gitea.io/user2/repo2.git")
+
+ test(t, "git+ssh://try.gitea.io/user2/repo2")
+ test(t, "git+ssh://try.gitea.io/user2/repo2.git")
+ })
+
+ t.Run("ValidImplicitSshURL", func(t *testing.T) {
+ test := func(t *testing.T, url string) {
+ repo, err := repo_model.GetRepositoryByURL(db.DefaultContext, url)
+
+ assert.NotNil(t, repo)
+ require.NoError(t, err)
+
+ assert.Equal(t, int64(2), repo.ID)
+ assert.Equal(t, int64(2), repo.OwnerID)
+ }
+
+ test(t, "sshuser@try.gitea.io:user2/repo2")
+ test(t, "sshuser@try.gitea.io:user2/repo2.git")
+
+ test(t, "try.gitea.io:user2/repo2")
+ test(t, "try.gitea.io:user2/repo2.git")
+ })
+}
+
+func TestComposeSSHCloneURL(t *testing.T) {
+ defer test.MockVariableValue(&setting.SSH, setting.SSH)()
+ defer test.MockVariableValue(&setting.Repository, setting.Repository)()
+
+ setting.SSH.User = "git"
+
+ // test SSH_DOMAIN
+ setting.SSH.Domain = "domain"
+ setting.SSH.Port = 22
+ setting.Repository.UseCompatSSHURI = false
+ assert.Equal(t, "git@domain:user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ setting.Repository.UseCompatSSHURI = true
+ assert.Equal(t, "ssh://git@domain/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ // test SSH_DOMAIN while use non-standard SSH port
+ setting.SSH.Port = 123
+ setting.Repository.UseCompatSSHURI = false
+ assert.Equal(t, "ssh://git@domain:123/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ setting.Repository.UseCompatSSHURI = true
+ assert.Equal(t, "ssh://git@domain:123/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+
+ // test IPv6 SSH_DOMAIN
+ setting.Repository.UseCompatSSHURI = false
+ setting.SSH.Domain = "::1"
+ setting.SSH.Port = 22
+ assert.Equal(t, "git@[::1]:user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+ setting.SSH.Port = 123
+ assert.Equal(t, "ssh://git@[::1]:123/user/repo.git", repo_model.ComposeSSHCloneURL("user", "repo"))
+}
+
+func TestAPActorID(t *testing.T) {
+ repo := repo_model.Repository{ID: 1}
+ url := repo.APActorID()
+ expected := "https://try.gitea.io/api/v1/activitypub/repository-id/1"
+ if url != expected {
+ t.Errorf("unexpected APActorID, expected: %q, actual: %q", expected, url)
+ }
+}
diff --git a/models/repo/repo_unit.go b/models/repo/repo_unit.go
new file mode 100644
index 0000000..ed55384
--- /dev/null
+++ b/models/repo/repo_unit.go
@@ -0,0 +1,317 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/xorm"
+ "xorm.io/xorm/convert"
+)
+
+// ErrUnitTypeNotExist represents a "UnitTypeNotExist" kind of error.
+type ErrUnitTypeNotExist struct {
+ UT unit.Type
+}
+
+// IsErrUnitTypeNotExist checks if an error is a ErrUnitNotExist.
+func IsErrUnitTypeNotExist(err error) bool {
+ _, ok := err.(ErrUnitTypeNotExist)
+ return ok
+}
+
+func (err ErrUnitTypeNotExist) Error() string {
+ return fmt.Sprintf("Unit type does not exist: %s", err.UT.String())
+}
+
+func (err ErrUnitTypeNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// RepoUnitAccessMode specifies the users access mode to a repo unit
+type UnitAccessMode int
+
+const (
+ // UnitAccessModeUnset - no unit mode set
+ UnitAccessModeUnset UnitAccessMode = iota // 0
+ // UnitAccessModeNone no access
+ UnitAccessModeNone // 1
+ // UnitAccessModeRead read access
+ UnitAccessModeRead // 2
+ // UnitAccessModeWrite write access
+ UnitAccessModeWrite // 3
+)
+
+func (mode UnitAccessMode) ToAccessMode(modeIfUnset perm.AccessMode) perm.AccessMode {
+ switch mode {
+ case UnitAccessModeUnset:
+ return modeIfUnset
+ case UnitAccessModeNone:
+ return perm.AccessModeNone
+ case UnitAccessModeRead:
+ return perm.AccessModeRead
+ case UnitAccessModeWrite:
+ return perm.AccessModeWrite
+ default:
+ return perm.AccessModeNone
+ }
+}
+
+// RepoUnit describes all units of a repository
+type RepoUnit struct { //revive:disable-line:exported
+ ID int64
+ RepoID int64 `xorm:"INDEX(s)"`
+ Type unit.Type `xorm:"INDEX(s)"`
+ Config convert.Conversion `xorm:"TEXT"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
+ DefaultPermissions UnitAccessMode `xorm:"NOT NULL DEFAULT 0"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoUnit))
+}
+
+// UnitConfig describes common unit config
+type UnitConfig struct{}
+
+// FromDB fills up a UnitConfig from serialized format.
+func (cfg *UnitConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a UnitConfig to a serialized format.
+func (cfg *UnitConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// ExternalWikiConfig describes external wiki config
+type ExternalWikiConfig struct {
+ ExternalWikiURL string
+}
+
+// FromDB fills up a ExternalWikiConfig from serialized format.
+func (cfg *ExternalWikiConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a ExternalWikiConfig to a serialized format.
+func (cfg *ExternalWikiConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// ExternalTrackerConfig describes external tracker config
+type ExternalTrackerConfig struct {
+ ExternalTrackerURL string
+ ExternalTrackerFormat string
+ ExternalTrackerStyle string
+ ExternalTrackerRegexpPattern string
+}
+
+// FromDB fills up a ExternalTrackerConfig from serialized format.
+func (cfg *ExternalTrackerConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a ExternalTrackerConfig to a serialized format.
+func (cfg *ExternalTrackerConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// IssuesConfig describes issues config
+type IssuesConfig struct {
+ EnableTimetracker bool
+ AllowOnlyContributorsToTrackTime bool
+ EnableDependencies bool
+}
+
+// FromDB fills up a IssuesConfig from serialized format.
+func (cfg *IssuesConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a IssuesConfig to a serialized format.
+func (cfg *IssuesConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// PullRequestsConfig describes pull requests config
+type PullRequestsConfig struct {
+ IgnoreWhitespaceConflicts bool
+ AllowMerge bool
+ AllowRebase bool
+ AllowRebaseMerge bool
+ AllowSquash bool
+ AllowFastForwardOnly bool
+ AllowManualMerge bool
+ AutodetectManualMerge bool
+ AllowRebaseUpdate bool
+ DefaultDeleteBranchAfterMerge bool
+ DefaultMergeStyle MergeStyle
+ DefaultAllowMaintainerEdit bool
+}
+
+// FromDB fills up a PullRequestsConfig from serialized format.
+func (cfg *PullRequestsConfig) FromDB(bs []byte) error {
+ // AllowRebaseUpdate = true as default for existing PullRequestConfig in DB
+ cfg.AllowRebaseUpdate = true
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a PullRequestsConfig to a serialized format.
+func (cfg *PullRequestsConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// IsMergeStyleAllowed returns if merge style is allowed
+func (cfg *PullRequestsConfig) IsMergeStyleAllowed(mergeStyle MergeStyle) bool {
+ return mergeStyle == MergeStyleMerge && cfg.AllowMerge ||
+ mergeStyle == MergeStyleRebase && cfg.AllowRebase ||
+ mergeStyle == MergeStyleRebaseMerge && cfg.AllowRebaseMerge ||
+ mergeStyle == MergeStyleSquash && cfg.AllowSquash ||
+ mergeStyle == MergeStyleFastForwardOnly && cfg.AllowFastForwardOnly ||
+ mergeStyle == MergeStyleManuallyMerged && cfg.AllowManualMerge
+}
+
+// GetDefaultMergeStyle returns the default merge style for this pull request
+func (cfg *PullRequestsConfig) GetDefaultMergeStyle() MergeStyle {
+ if len(cfg.DefaultMergeStyle) != 0 {
+ return cfg.DefaultMergeStyle
+ }
+
+ if setting.Repository.PullRequest.DefaultMergeStyle != "" {
+ return MergeStyle(setting.Repository.PullRequest.DefaultMergeStyle)
+ }
+
+ return MergeStyleMerge
+}
+
+type ActionsConfig struct {
+ DisabledWorkflows []string
+}
+
+func (cfg *ActionsConfig) EnableWorkflow(file string) {
+ cfg.DisabledWorkflows = util.SliceRemoveAll(cfg.DisabledWorkflows, file)
+}
+
+func (cfg *ActionsConfig) ToString() string {
+ return strings.Join(cfg.DisabledWorkflows, ",")
+}
+
+func (cfg *ActionsConfig) IsWorkflowDisabled(file string) bool {
+ return slices.Contains(cfg.DisabledWorkflows, file)
+}
+
+func (cfg *ActionsConfig) DisableWorkflow(file string) {
+ for _, workflow := range cfg.DisabledWorkflows {
+ if file == workflow {
+ return
+ }
+ }
+
+ cfg.DisabledWorkflows = append(cfg.DisabledWorkflows, file)
+}
+
+// FromDB fills up a ActionsConfig from serialized format.
+func (cfg *ActionsConfig) FromDB(bs []byte) error {
+ return json.UnmarshalHandleDoubleEncode(bs, &cfg)
+}
+
+// ToDB exports a ActionsConfig to a serialized format.
+func (cfg *ActionsConfig) ToDB() ([]byte, error) {
+ return json.Marshal(cfg)
+}
+
+// BeforeSet is invoked from XORM before setting the value of a field of this object.
+func (r *RepoUnit) BeforeSet(colName string, val xorm.Cell) {
+ if colName == "type" {
+ switch unit.Type(db.Cell2Int64(val)) {
+ case unit.TypeExternalWiki:
+ r.Config = new(ExternalWikiConfig)
+ case unit.TypeExternalTracker:
+ r.Config = new(ExternalTrackerConfig)
+ case unit.TypePullRequests:
+ r.Config = new(PullRequestsConfig)
+ case unit.TypeIssues:
+ r.Config = new(IssuesConfig)
+ case unit.TypeActions:
+ r.Config = new(ActionsConfig)
+ case unit.TypeCode, unit.TypeReleases, unit.TypeWiki, unit.TypeProjects, unit.TypePackages:
+ fallthrough
+ default:
+ r.Config = new(UnitConfig)
+ }
+ }
+}
+
+// Unit returns Unit
+func (r *RepoUnit) Unit() unit.Unit {
+ return unit.Units[r.Type]
+}
+
+// CodeConfig returns config for unit.TypeCode
+func (r *RepoUnit) CodeConfig() *UnitConfig {
+ return r.Config.(*UnitConfig)
+}
+
+// PullRequestsConfig returns config for unit.TypePullRequests
+func (r *RepoUnit) PullRequestsConfig() *PullRequestsConfig {
+ return r.Config.(*PullRequestsConfig)
+}
+
+// ReleasesConfig returns config for unit.TypeReleases
+func (r *RepoUnit) ReleasesConfig() *UnitConfig {
+ return r.Config.(*UnitConfig)
+}
+
+// ExternalWikiConfig returns config for unit.TypeExternalWiki
+func (r *RepoUnit) ExternalWikiConfig() *ExternalWikiConfig {
+ return r.Config.(*ExternalWikiConfig)
+}
+
+// IssuesConfig returns config for unit.TypeIssues
+func (r *RepoUnit) IssuesConfig() *IssuesConfig {
+ return r.Config.(*IssuesConfig)
+}
+
+// ExternalTrackerConfig returns config for unit.TypeExternalTracker
+func (r *RepoUnit) ExternalTrackerConfig() *ExternalTrackerConfig {
+ return r.Config.(*ExternalTrackerConfig)
+}
+
+// ActionsConfig returns config for unit.ActionsConfig
+func (r *RepoUnit) ActionsConfig() *ActionsConfig {
+ return r.Config.(*ActionsConfig)
+}
+
+func getUnitsByRepoID(ctx context.Context, repoID int64) (units []*RepoUnit, err error) {
+ var tmpUnits []*RepoUnit
+ if err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Find(&tmpUnits); err != nil {
+ return nil, err
+ }
+
+ for _, u := range tmpUnits {
+ if !u.Type.UnitGlobalDisabled() {
+ units = append(units, u)
+ }
+ }
+
+ return units, nil
+}
+
+// UpdateRepoUnit updates the provided repo unit
+func UpdateRepoUnit(ctx context.Context, unit *RepoUnit) error {
+ _, err := db.GetEngine(ctx).ID(unit.ID).Update(unit)
+ return err
+}
diff --git a/models/repo/repo_unit_test.go b/models/repo/repo_unit_test.go
new file mode 100644
index 0000000..deee1a7
--- /dev/null
+++ b/models/repo/repo_unit_test.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/perm"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestActionsConfig(t *testing.T) {
+ cfg := &ActionsConfig{}
+ cfg.DisableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{"test1.yaml"}, cfg.DisabledWorkflows)
+
+ cfg.DisableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{"test1.yaml"}, cfg.DisabledWorkflows)
+
+ cfg.EnableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{}, cfg.DisabledWorkflows)
+
+ cfg.EnableWorkflow("test1.yaml")
+ assert.EqualValues(t, []string{}, cfg.DisabledWorkflows)
+
+ cfg.DisableWorkflow("test1.yaml")
+ cfg.DisableWorkflow("test2.yaml")
+ cfg.DisableWorkflow("test3.yaml")
+ assert.EqualValues(t, "test1.yaml,test2.yaml,test3.yaml", cfg.ToString())
+}
+
+func TestRepoUnitAccessMode(t *testing.T) {
+ assert.Equal(t, perm.AccessModeNone, UnitAccessModeNone.ToAccessMode(perm.AccessModeAdmin))
+ assert.Equal(t, perm.AccessModeRead, UnitAccessModeRead.ToAccessMode(perm.AccessModeAdmin))
+ assert.Equal(t, perm.AccessModeWrite, UnitAccessModeWrite.ToAccessMode(perm.AccessModeAdmin))
+ assert.Equal(t, perm.AccessModeRead, UnitAccessModeUnset.ToAccessMode(perm.AccessModeRead))
+}
diff --git a/models/repo/search.go b/models/repo/search.go
new file mode 100644
index 0000000..ffb8e26
--- /dev/null
+++ b/models/repo/search.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import "code.gitea.io/gitea/models/db"
+
+// OrderByMap represents all possible search order
+var OrderByMap = map[string]map[string]db.SearchOrderBy{
+ "asc": {
+ "alpha": "owner_name ASC, name ASC",
+ "created": db.SearchOrderByOldest,
+ "updated": db.SearchOrderByLeastUpdated,
+ "size": "size ASC",
+ "git_size": "git_size ASC",
+ "lfs_size": "lfs_size ASC",
+ "id": db.SearchOrderByID,
+ "stars": db.SearchOrderByStars,
+ "forks": db.SearchOrderByForks,
+ },
+ "desc": {
+ "alpha": "owner_name DESC, name DESC",
+ "created": db.SearchOrderByNewest,
+ "updated": db.SearchOrderByRecentUpdated,
+ "size": "size DESC",
+ "git_size": "git_size DESC",
+ "lfs_size": "lfs_size DESC",
+ "id": db.SearchOrderByIDReverse,
+ "stars": db.SearchOrderByStarsReverse,
+ "forks": db.SearchOrderByForksReverse,
+ },
+}
+
+// OrderByFlatMap is similar to OrderByMap but use human language keywords
+// to decide between asc and desc
+var OrderByFlatMap = map[string]db.SearchOrderBy{
+ "newest": OrderByMap["desc"]["created"],
+ "oldest": OrderByMap["asc"]["created"],
+ "recentupdate": OrderByMap["desc"]["updated"],
+ "leastupdate": OrderByMap["asc"]["updated"],
+ "reversealphabetically": OrderByMap["desc"]["alpha"],
+ "alphabetically": OrderByMap["asc"]["alpha"],
+ "reversesize": OrderByMap["desc"]["size"],
+ "size": OrderByMap["asc"]["size"],
+ "reversegitsize": OrderByMap["desc"]["git_size"],
+ "gitsize": OrderByMap["asc"]["git_size"],
+ "reverselfssize": OrderByMap["desc"]["lfs_size"],
+ "lfssize": OrderByMap["asc"]["lfs_size"],
+ "moststars": OrderByMap["desc"]["stars"],
+ "feweststars": OrderByMap["asc"]["stars"],
+ "mostforks": OrderByMap["desc"]["forks"],
+ "fewestforks": OrderByMap["asc"]["forks"],
+}
diff --git a/models/repo/star.go b/models/repo/star.go
new file mode 100644
index 0000000..6073714
--- /dev/null
+++ b/models/repo/star.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// Star represents a starred repo by an user.
+type Star struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"UNIQUE(s)"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+}
+
+func init() {
+ db.RegisterModel(new(Star))
+}
+
+// StarRepo or unstar repository.
+func StarRepo(ctx context.Context, userID, repoID int64, star bool) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ staring := IsStaring(ctx, userID, repoID)
+
+ if star {
+ if staring {
+ return nil
+ }
+
+ if err := db.Insert(ctx, &Star{UID: userID, RepoID: repoID}); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoID); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ } else {
+ if !staring {
+ return nil
+ }
+
+ if _, err := db.DeleteByBean(ctx, &Star{UID: userID, RepoID: repoID}); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoID); err != nil {
+ return err
+ }
+ if _, err := db.Exec(ctx, "UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ }
+
+ return committer.Commit()
+}
+
+// IsStaring checks if user has starred given repository.
+func IsStaring(ctx context.Context, userID, repoID int64) bool {
+ has, _ := db.GetEngine(ctx).Get(&Star{UID: userID, RepoID: repoID})
+ return has
+}
+
+// GetStargazers returns the users that starred the repo.
+func GetStargazers(ctx context.Context, repo *Repository, opts db.ListOptions) ([]*user_model.User, error) {
+ sess := db.GetEngine(ctx).Where("star.repo_id = ?", repo.ID).
+ Join("LEFT", "star", "`user`.id = star.uid")
+ if opts.Page > 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+
+ users := make([]*user_model.User, 0, opts.PageSize)
+ return users, sess.Find(&users)
+ }
+
+ users := make([]*user_model.User, 0, 8)
+ return users, sess.Find(&users)
+}
+
+// ClearRepoStars clears all stars for a repository and from the user that starred it.
+// Used when a repository is set to private.
+func ClearRepoStars(ctx context.Context, repoID int64) error {
+ if _, err := db.Exec(ctx, "UPDATE `user` SET num_stars=num_stars-1 WHERE id IN (SELECT `uid` FROM `star` WHERE repo_id = ?)", repoID); err != nil {
+ return err
+ }
+
+ if _, err := db.Exec(ctx, "UPDATE `repository` SET num_stars = 0 WHERE id = ?", repoID); err != nil {
+ return err
+ }
+
+ return db.DeleteBeans(ctx, Star{RepoID: repoID})
+}
diff --git a/models/repo/star_test.go b/models/repo/star_test.go
new file mode 100644
index 0000000..73b362c
--- /dev/null
+++ b/models/repo/star_test.go
@@ -0,0 +1,72 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStarRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ const userID = 2
+ const repoID = 1
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, false))
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+}
+
+func TestIsStaring(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ assert.True(t, repo_model.IsStaring(db.DefaultContext, 2, 4))
+ assert.False(t, repo_model.IsStaring(db.DefaultContext, 3, 4))
+}
+
+func TestRepository_GetStargazers(t *testing.T) {
+ // repo with stargazers
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 4})
+ gazers, err := repo_model.GetStargazers(db.DefaultContext, repo, db.ListOptions{Page: 0})
+ require.NoError(t, err)
+ if assert.Len(t, gazers, 1) {
+ assert.Equal(t, int64(2), gazers[0].ID)
+ }
+}
+
+func TestRepository_GetStargazers2(t *testing.T) {
+ // repo with stargazers
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+ gazers, err := repo_model.GetStargazers(db.DefaultContext, repo, db.ListOptions{Page: 0})
+ require.NoError(t, err)
+ assert.Empty(t, gazers)
+}
+
+func TestClearRepoStars(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ const userID = 2
+ const repoID = 1
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, true))
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.StarRepo(db.DefaultContext, userID, repoID, false))
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+ require.NoError(t, repo_model.ClearRepoStars(db.DefaultContext, repoID))
+ unittest.AssertNotExistsBean(t, &repo_model.Star{UID: userID, RepoID: repoID})
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ gazers, err := repo_model.GetStargazers(db.DefaultContext, repo, db.ListOptions{Page: 0})
+ require.NoError(t, err)
+ assert.Empty(t, gazers)
+}
diff --git a/models/repo/topic.go b/models/repo/topic.go
new file mode 100644
index 0000000..6db6c8a
--- /dev/null
+++ b/models/repo/topic.go
@@ -0,0 +1,389 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+func init() {
+ db.RegisterModel(new(Topic))
+ db.RegisterModel(new(RepoTopic))
+}
+
+var topicPattern = regexp.MustCompile(`^[a-z0-9][-.a-z0-9]*$`)
+
+// Topic represents a topic of repositories
+type Topic struct {
+ ID int64 `xorm:"pk autoincr"`
+ Name string `xorm:"UNIQUE VARCHAR(50)"`
+ RepoCount int
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+// RepoTopic represents associated repositories and topics
+type RepoTopic struct { //revive:disable-line:exported
+ RepoID int64 `xorm:"pk"`
+ TopicID int64 `xorm:"pk"`
+}
+
+// ErrTopicNotExist represents an error that a topic is not exist
+type ErrTopicNotExist struct {
+ Name string
+}
+
+// IsErrTopicNotExist checks if an error is an ErrTopicNotExist.
+func IsErrTopicNotExist(err error) bool {
+ _, ok := err.(ErrTopicNotExist)
+ return ok
+}
+
+// Error implements error interface
+func (err ErrTopicNotExist) Error() string {
+ return fmt.Sprintf("topic is not exist [name: %s]", err.Name)
+}
+
+func (err ErrTopicNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ValidateTopic checks a topic by length and match pattern rules
+func ValidateTopic(topic string) bool {
+ return len(topic) <= 35 && topicPattern.MatchString(topic)
+}
+
+// SanitizeAndValidateTopics sanitizes and checks an array or topics
+func SanitizeAndValidateTopics(topics []string) (validTopics, invalidTopics []string) {
+ validTopics = make([]string, 0)
+ mValidTopics := make(container.Set[string])
+ invalidTopics = make([]string, 0)
+
+ for _, topic := range topics {
+ topic = strings.TrimSpace(strings.ToLower(topic))
+ // ignore empty string
+ if len(topic) == 0 {
+ continue
+ }
+ // ignore same topic twice
+ if mValidTopics.Contains(topic) {
+ continue
+ }
+ if ValidateTopic(topic) {
+ validTopics = append(validTopics, topic)
+ mValidTopics.Add(topic)
+ } else {
+ invalidTopics = append(invalidTopics, topic)
+ }
+ }
+
+ return validTopics, invalidTopics
+}
+
+// GetTopicByName retrieves topic by name
+func GetTopicByName(ctx context.Context, name string) (*Topic, error) {
+ var topic Topic
+ if has, err := db.GetEngine(ctx).Where("name = ?", name).Get(&topic); err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrTopicNotExist{name}
+ }
+ return &topic, nil
+}
+
+// addTopicByNameToRepo adds a topic name to a repo and increments the topic count.
+// Returns topic after the addition
+func addTopicByNameToRepo(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ var topic Topic
+ e := db.GetEngine(ctx)
+ has, err := e.Where("name = ?", topicName).Get(&topic)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ topic.Name = topicName
+ topic.RepoCount = 1
+ if err := db.Insert(ctx, &topic); err != nil {
+ return nil, err
+ }
+ } else {
+ topic.RepoCount++
+ if _, err := e.ID(topic.ID).Cols("repo_count").Update(&topic); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := db.Insert(ctx, &RepoTopic{
+ RepoID: repoID,
+ TopicID: topic.ID,
+ }); err != nil {
+ return nil, err
+ }
+
+ return &topic, nil
+}
+
+// removeTopicFromRepo remove a topic from a repo and decrements the topic repo count
+func removeTopicFromRepo(ctx context.Context, repoID int64, topic *Topic) error {
+ topic.RepoCount--
+ e := db.GetEngine(ctx)
+ if _, err := e.ID(topic.ID).Cols("repo_count").Update(topic); err != nil {
+ return err
+ }
+
+ if _, err := e.Delete(&RepoTopic{
+ RepoID: repoID,
+ TopicID: topic.ID,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// RemoveTopicsFromRepo remove all topics from the repo and decrements respective topics repo count
+func RemoveTopicsFromRepo(ctx context.Context, repoID int64) error {
+ e := db.GetEngine(ctx)
+ _, err := e.Where(
+ builder.In("id",
+ builder.Select("topic_id").From("repo_topic").Where(builder.Eq{"repo_id": repoID}),
+ ),
+ ).Cols("repo_count").SetExpr("repo_count", "repo_count-1").Update(&Topic{})
+ if err != nil {
+ return err
+ }
+
+ if _, err = e.Delete(&RepoTopic{RepoID: repoID}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// FindTopicOptions represents the options when fdin topics
+type FindTopicOptions struct {
+ db.ListOptions
+ RepoID int64
+ Keyword string
+}
+
+func (opts *FindTopicOptions) toConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_topic.repo_id": opts.RepoID})
+ }
+
+ if opts.Keyword != "" {
+ cond = cond.And(builder.Like{"topic.name", opts.Keyword})
+ }
+
+ return cond
+}
+
+// FindTopics retrieves the topics via FindTopicOptions
+func FindTopics(ctx context.Context, opts *FindTopicOptions) ([]*Topic, int64, error) {
+ sess := db.GetEngine(ctx).Select("topic.*").Where(opts.toConds())
+ orderBy := "topic.repo_count DESC"
+ if opts.RepoID > 0 {
+ sess.Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id")
+ orderBy = "topic.name" // when render topics for a repo, it's better to sort them by name, to get consistent result
+ }
+ if opts.PageSize > 0 {
+ sess = db.SetSessionPagination(sess, opts)
+ }
+ topics := make([]*Topic, 0, 10)
+ total, err := sess.OrderBy(orderBy).FindAndCount(&topics)
+ return topics, total, err
+}
+
+// CountTopics counts the number of topics matching the FindTopicOptions
+func CountTopics(ctx context.Context, opts *FindTopicOptions) (int64, error) {
+ sess := db.GetEngine(ctx).Where(opts.toConds())
+ if opts.RepoID > 0 {
+ sess.Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id")
+ }
+ return sess.Count(new(Topic))
+}
+
+// GetRepoTopicByName retrieves topic from name for a repo if it exist
+func GetRepoTopicByName(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ cond := builder.NewCond()
+ var topic Topic
+ cond = cond.And(builder.Eq{"repo_topic.repo_id": repoID}).And(builder.Eq{"topic.name": topicName})
+ sess := db.GetEngine(ctx).Table("topic").Where(cond)
+ sess.Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id")
+ has, err := sess.Select("topic.*").Get(&topic)
+ if has {
+ return &topic, err
+ }
+ return nil, err
+}
+
+// AddTopic adds a topic name to a repository (if it does not already have it)
+func AddTopic(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ topic, err := GetRepoTopicByName(ctx, repoID, topicName)
+ if err != nil {
+ return nil, err
+ }
+ if topic != nil {
+ // Repo already have topic
+ return topic, nil
+ }
+
+ topic, err = addTopicByNameToRepo(ctx, repoID, topicName)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = syncTopicsInRepository(sess, repoID); err != nil {
+ return nil, err
+ }
+
+ return topic, committer.Commit()
+}
+
+// DeleteTopic removes a topic name from a repository (if it has it)
+func DeleteTopic(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
+ topic, err := GetRepoTopicByName(ctx, repoID, topicName)
+ if err != nil {
+ return nil, err
+ }
+ if topic == nil {
+ // Repo doesn't have topic, can't be removed
+ return nil, nil
+ }
+
+ err = removeTopicFromRepo(ctx, repoID, topic)
+ if err != nil {
+ return nil, err
+ }
+
+ err = syncTopicsInRepository(db.GetEngine(ctx), repoID)
+
+ return topic, err
+}
+
+// SaveTopics save topics to a repository
+func SaveTopics(ctx context.Context, repoID int64, topicNames ...string) error {
+ topics, _, err := FindTopics(ctx, &FindTopicOptions{
+ RepoID: repoID,
+ })
+ if err != nil {
+ return err
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+ sess := db.GetEngine(ctx)
+
+ var addedTopicNames []string
+ for _, topicName := range topicNames {
+ if strings.TrimSpace(topicName) == "" {
+ continue
+ }
+
+ var found bool
+ for _, t := range topics {
+ if strings.EqualFold(topicName, t.Name) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ addedTopicNames = append(addedTopicNames, topicName)
+ }
+ }
+
+ var removeTopics []*Topic
+ for _, t := range topics {
+ var found bool
+ for _, topicName := range topicNames {
+ if strings.EqualFold(topicName, t.Name) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ removeTopics = append(removeTopics, t)
+ }
+ }
+
+ for _, topicName := range addedTopicNames {
+ _, err := addTopicByNameToRepo(ctx, repoID, topicName)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, topic := range removeTopics {
+ err := removeTopicFromRepo(ctx, repoID, topic)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := syncTopicsInRepository(sess, repoID); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// GenerateTopics generates topics from a template repository
+func GenerateTopics(ctx context.Context, templateRepo, generateRepo *Repository) error {
+ for _, topic := range templateRepo.Topics {
+ if _, err := addTopicByNameToRepo(ctx, generateRepo.ID, topic); err != nil {
+ return err
+ }
+ }
+
+ return syncTopicsInRepository(db.GetEngine(ctx), generateRepo.ID)
+}
+
+// syncTopicsInRepository makes sure topics in the topics table are copied into the topics field of the repository
+func syncTopicsInRepository(sess db.Engine, repoID int64) error {
+ topicNames := make([]string, 0, 25)
+ if err := sess.Table("topic").Cols("name").
+ Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id").
+ Where("repo_topic.repo_id = ?", repoID).Asc("topic.name").Find(&topicNames); err != nil {
+ return err
+ }
+
+ if _, err := sess.ID(repoID).Cols("topics").Update(&Repository{
+ Topics: topicNames,
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+// CountOrphanedAttachments returns the number of topics that don't belong to any repository.
+func CountOrphanedTopics(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where("repo_count = 0").Count(new(Topic))
+}
+
+// DeleteOrphanedAttachments delete all topics that don't belong to any repository.
+func DeleteOrphanedTopics(ctx context.Context) (int64, error) {
+ return db.GetEngine(ctx).Where("repo_count = 0").Delete(new(Topic))
+}
diff --git a/models/repo/topic_test.go b/models/repo/topic_test.go
new file mode 100644
index 0000000..45cee52
--- /dev/null
+++ b/models/repo/topic_test.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddTopic(t *testing.T) {
+ totalNrOfTopics := 6
+ repo1NrOfTopics := 3
+
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ topics, _, err := repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{})
+ require.NoError(t, err)
+ assert.Len(t, topics, totalNrOfTopics)
+
+ topics, total, err := repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ ListOptions: db.ListOptions{Page: 1, PageSize: 2},
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, 2)
+ assert.EqualValues(t, 6, total)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ RepoID: 1,
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, repo1NrOfTopics)
+
+ require.NoError(t, repo_model.SaveTopics(db.DefaultContext, 2, "golang"))
+ repo2NrOfTopics := 1
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{})
+ require.NoError(t, err)
+ assert.Len(t, topics, totalNrOfTopics)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ RepoID: 2,
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, repo2NrOfTopics)
+
+ require.NoError(t, repo_model.SaveTopics(db.DefaultContext, 2, "golang", "gitea"))
+ repo2NrOfTopics = 2
+ totalNrOfTopics++
+ topic, err := repo_model.GetTopicByName(db.DefaultContext, "gitea")
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, topic.RepoCount)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{})
+ require.NoError(t, err)
+ assert.Len(t, topics, totalNrOfTopics)
+
+ topics, _, err = repo_model.FindTopics(db.DefaultContext, &repo_model.FindTopicOptions{
+ RepoID: 2,
+ })
+ require.NoError(t, err)
+ assert.Len(t, topics, repo2NrOfTopics)
+}
+
+func TestTopicValidator(t *testing.T) {
+ assert.True(t, repo_model.ValidateTopic("12345"))
+ assert.True(t, repo_model.ValidateTopic("2-test"))
+ assert.True(t, repo_model.ValidateTopic("foo.bar"))
+ assert.True(t, repo_model.ValidateTopic("test-3"))
+ assert.True(t, repo_model.ValidateTopic("first"))
+ assert.True(t, repo_model.ValidateTopic("second-test-topic"))
+ assert.True(t, repo_model.ValidateTopic("third-project-topic-with-max-length"))
+
+ assert.False(t, repo_model.ValidateTopic("$fourth-test,topic"))
+ assert.False(t, repo_model.ValidateTopic("-fifth-test-topic"))
+ assert.False(t, repo_model.ValidateTopic("sixth-go-project-topic-with-excess-length"))
+ assert.False(t, repo_model.ValidateTopic(".foo"))
+}
diff --git a/models/repo/update.go b/models/repo/update.go
new file mode 100644
index 0000000..e7ca224
--- /dev/null
+++ b/models/repo/update.go
@@ -0,0 +1,145 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// UpdateRepositoryOwnerNames updates repository owner_names (this should only be used when the ownerName has changed case)
+func UpdateRepositoryOwnerNames(ctx context.Context, ownerID int64, ownerName string) error {
+ if ownerID == 0 {
+ return nil
+ }
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ if _, err := db.GetEngine(ctx).Where("owner_id = ?", ownerID).Cols("owner_name").Update(&Repository{
+ OwnerName: ownerName,
+ }); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// UpdateRepositoryUpdatedTime updates a repository's updated time
+func UpdateRepositoryUpdatedTime(ctx context.Context, repoID int64, updateTime time.Time) error {
+ _, err := db.GetEngine(ctx).Exec("UPDATE repository SET updated_unix = ? WHERE id = ?", updateTime.Unix(), repoID)
+ return err
+}
+
+// UpdateRepositoryCols updates repository's columns
+func UpdateRepositoryCols(ctx context.Context, repo *Repository, cols ...string) error {
+ _, err := db.GetEngine(ctx).ID(repo.ID).Cols(cols...).Update(repo)
+ return err
+}
+
+// ErrReachLimitOfRepo represents a "ReachLimitOfRepo" kind of error.
+type ErrReachLimitOfRepo struct {
+ Limit int
+}
+
+// IsErrReachLimitOfRepo checks if an error is a ErrReachLimitOfRepo.
+func IsErrReachLimitOfRepo(err error) bool {
+ _, ok := err.(ErrReachLimitOfRepo)
+ return ok
+}
+
+func (err ErrReachLimitOfRepo) Error() string {
+ return fmt.Sprintf("user has reached maximum limit of repositories [limit: %d]", err.Limit)
+}
+
+func (err ErrReachLimitOfRepo) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrRepoAlreadyExist represents a "RepoAlreadyExist" kind of error.
+type ErrRepoAlreadyExist struct {
+ Uname string
+ Name string
+}
+
+// IsErrRepoAlreadyExist checks if an error is a ErrRepoAlreadyExist.
+func IsErrRepoAlreadyExist(err error) bool {
+ _, ok := err.(ErrRepoAlreadyExist)
+ return ok
+}
+
+func (err ErrRepoAlreadyExist) Error() string {
+ return fmt.Sprintf("repository already exists [uname: %s, name: %s]", err.Uname, err.Name)
+}
+
+func (err ErrRepoAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrRepoFilesAlreadyExist represents a "RepoFilesAlreadyExist" kind of error.
+type ErrRepoFilesAlreadyExist struct {
+ Uname string
+ Name string
+}
+
+// IsErrRepoFilesAlreadyExist checks if an error is a ErrRepoAlreadyExist.
+func IsErrRepoFilesAlreadyExist(err error) bool {
+ _, ok := err.(ErrRepoFilesAlreadyExist)
+ return ok
+}
+
+func (err ErrRepoFilesAlreadyExist) Error() string {
+ return fmt.Sprintf("repository files already exist [uname: %s, name: %s]", err.Uname, err.Name)
+}
+
+func (err ErrRepoFilesAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// CheckCreateRepository check if could created a repository
+func CheckCreateRepository(ctx context.Context, doer, u *user_model.User, name string, overwriteOrAdopt bool) error {
+ if !doer.CanCreateRepo() {
+ return ErrReachLimitOfRepo{u.MaxRepoCreation}
+ }
+
+ if err := IsUsableRepoName(name); err != nil {
+ return err
+ }
+
+ has, err := IsRepositoryModelOrDirExist(ctx, u, name)
+ if err != nil {
+ return fmt.Errorf("IsRepositoryExist: %w", err)
+ } else if has {
+ return ErrRepoAlreadyExist{u.Name, name}
+ }
+
+ repoPath := RepoPath(u.Name, name)
+ isExist, err := util.IsExist(repoPath)
+ if err != nil {
+ log.Error("Unable to check if %s exists. Error: %v", repoPath, err)
+ return err
+ }
+ if !overwriteOrAdopt && isExist {
+ return ErrRepoFilesAlreadyExist{u.Name, name}
+ }
+ return nil
+}
+
+// UpdateRepoSize updates the repository size, calculating it using getDirectorySize
+func UpdateRepoSize(ctx context.Context, repoID, gitSize, lfsSize int64) error {
+ _, err := db.GetEngine(ctx).ID(repoID).Cols("size", "git_size", "lfs_size").NoAutoTime().Update(&Repository{
+ Size: gitSize + lfsSize,
+ GitSize: gitSize,
+ LFSSize: lfsSize,
+ })
+ return err
+}
diff --git a/models/repo/upload.go b/models/repo/upload.go
new file mode 100644
index 0000000..18834f6
--- /dev/null
+++ b/models/repo/upload.go
@@ -0,0 +1,175 @@
+// Copyright 2016 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "mime/multipart"
+ "os"
+ "path"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ gouuid "github.com/google/uuid"
+)
+
+// ErrUploadNotExist represents a "UploadNotExist" kind of error.
+type ErrUploadNotExist struct {
+ ID int64
+ UUID string
+}
+
+// IsErrUploadNotExist checks if an error is a ErrUploadNotExist.
+func IsErrUploadNotExist(err error) bool {
+ _, ok := err.(ErrUploadNotExist)
+ return ok
+}
+
+func (err ErrUploadNotExist) Error() string {
+ return fmt.Sprintf("attachment does not exist [id: %d, uuid: %s]", err.ID, err.UUID)
+}
+
+func (err ErrUploadNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// Upload represent a uploaded file to a repo to be deleted when moved
+type Upload struct {
+ ID int64 `xorm:"pk autoincr"`
+ UUID string `xorm:"uuid UNIQUE"`
+ Name string
+}
+
+func init() {
+ db.RegisterModel(new(Upload))
+}
+
+// UploadLocalPath returns where uploads is stored in local file system based on given UUID.
+func UploadLocalPath(uuid string) string {
+ return path.Join(setting.Repository.Upload.TempPath, uuid[0:1], uuid[1:2], uuid)
+}
+
+// LocalPath returns where uploads are temporarily stored in local file system.
+func (upload *Upload) LocalPath() string {
+ return UploadLocalPath(upload.UUID)
+}
+
+// NewUpload creates a new upload object.
+func NewUpload(ctx context.Context, name string, buf []byte, file multipart.File) (_ *Upload, err error) {
+ upload := &Upload{
+ UUID: gouuid.New().String(),
+ Name: name,
+ }
+
+ localPath := upload.LocalPath()
+ if err = os.MkdirAll(path.Dir(localPath), os.ModePerm); err != nil {
+ return nil, fmt.Errorf("MkdirAll: %w", err)
+ }
+
+ fw, err := os.Create(localPath)
+ if err != nil {
+ return nil, fmt.Errorf("Create: %w", err)
+ }
+ defer fw.Close()
+
+ if _, err = fw.Write(buf); err != nil {
+ return nil, fmt.Errorf("Write: %w", err)
+ } else if _, err = io.Copy(fw, file); err != nil {
+ return nil, fmt.Errorf("Copy: %w", err)
+ }
+
+ if _, err := db.GetEngine(ctx).Insert(upload); err != nil {
+ return nil, err
+ }
+
+ return upload, nil
+}
+
+// GetUploadByUUID returns the Upload by UUID
+func GetUploadByUUID(ctx context.Context, uuid string) (*Upload, error) {
+ upload := &Upload{}
+ has, err := db.GetEngine(ctx).Where("uuid=?", uuid).Get(upload)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrUploadNotExist{0, uuid}
+ }
+ return upload, nil
+}
+
+// GetUploadsByUUIDs returns multiple uploads by UUIDS
+func GetUploadsByUUIDs(ctx context.Context, uuids []string) ([]*Upload, error) {
+ if len(uuids) == 0 {
+ return []*Upload{}, nil
+ }
+
+ // Silently drop invalid uuids.
+ uploads := make([]*Upload, 0, len(uuids))
+ return uploads, db.GetEngine(ctx).In("uuid", uuids).Find(&uploads)
+}
+
+// DeleteUploads deletes multiple uploads
+func DeleteUploads(ctx context.Context, uploads ...*Upload) (err error) {
+ if len(uploads) == 0 {
+ return nil
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ ids := make([]int64, len(uploads))
+ for i := 0; i < len(uploads); i++ {
+ ids[i] = uploads[i].ID
+ }
+ if err = db.DeleteByIDs[Upload](ctx, ids...); err != nil {
+ return fmt.Errorf("delete uploads: %w", err)
+ }
+
+ if err = committer.Commit(); err != nil {
+ return err
+ }
+
+ for _, upload := range uploads {
+ localPath := upload.LocalPath()
+ isFile, err := util.IsFile(localPath)
+ if err != nil {
+ log.Error("Unable to check if %s is a file. Error: %v", localPath, err)
+ }
+ if !isFile {
+ continue
+ }
+
+ if err := util.Remove(localPath); err != nil {
+ return fmt.Errorf("remove upload: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// DeleteUploadByUUID deletes a upload by UUID
+func DeleteUploadByUUID(ctx context.Context, uuid string) error {
+ upload, err := GetUploadByUUID(ctx, uuid)
+ if err != nil {
+ if IsErrUploadNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("GetUploadByUUID: %w", err)
+ }
+
+ if err := DeleteUploads(ctx, upload); err != nil {
+ return fmt.Errorf("DeleteUpload: %w", err)
+ }
+
+ return nil
+}
diff --git a/models/repo/user_repo.go b/models/repo/user_repo.go
new file mode 100644
index 0000000..6790ee1
--- /dev/null
+++ b/models/repo/user_repo.go
@@ -0,0 +1,197 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ api "code.gitea.io/gitea/modules/structs"
+
+ "xorm.io/builder"
+)
+
+// GetStarredRepos returns the repos starred by a particular user
+func GetStarredRepos(ctx context.Context, userID int64, private bool, listOptions db.ListOptions) ([]*Repository, error) {
+ sess := db.GetEngine(ctx).
+ Where("star.uid=?", userID).
+ Join("LEFT", "star", "`repository`.id=`star`.repo_id")
+ if !private {
+ sess = sess.And("is_private=?", false)
+ }
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+
+ repos := make([]*Repository, 0, listOptions.PageSize)
+ return repos, sess.Find(&repos)
+ }
+
+ repos := make([]*Repository, 0, 10)
+ return repos, sess.Find(&repos)
+}
+
+// GetWatchedRepos returns the repos watched by a particular user
+func GetWatchedRepos(ctx context.Context, userID int64, private bool, listOptions db.ListOptions) ([]*Repository, int64, error) {
+ sess := db.GetEngine(ctx).
+ Where("watch.user_id=?", userID).
+ And("`watch`.mode<>?", WatchModeDont).
+ Join("LEFT", "watch", "`repository`.id=`watch`.repo_id")
+ if !private {
+ sess = sess.And("is_private=?", false)
+ }
+
+ if listOptions.Page != 0 {
+ sess = db.SetSessionPagination(sess, &listOptions)
+
+ repos := make([]*Repository, 0, listOptions.PageSize)
+ total, err := sess.FindAndCount(&repos)
+ return repos, total, err
+ }
+
+ repos := make([]*Repository, 0, 10)
+ total, err := sess.FindAndCount(&repos)
+ return repos, total, err
+}
+
+// GetRepoAssignees returns all users that have write access and can be assigned to issues
+// of the repository,
+func GetRepoAssignees(ctx context.Context, repo *Repository) (_ []*user_model.User, err error) {
+ if err = repo.LoadOwner(ctx); err != nil {
+ return nil, err
+ }
+
+ e := db.GetEngine(ctx)
+ userIDs := make([]int64, 0, 10)
+ if err = e.Table("access").
+ Where("repo_id = ? AND mode >= ?", repo.ID, perm.AccessModeWrite).
+ Select("user_id").
+ Find(&userIDs); err != nil {
+ return nil, err
+ }
+
+ additionalUserIDs := make([]int64, 0, 10)
+ if err = e.Table("team_user").
+ Join("INNER", "team_repo", "`team_repo`.team_id = `team_user`.team_id").
+ Join("INNER", "team_unit", "`team_unit`.team_id = `team_user`.team_id").
+ Where("`team_repo`.repo_id = ? AND (`team_unit`.access_mode >= ? OR (`team_unit`.access_mode = ? AND `team_unit`.`type` = ?))",
+ repo.ID, perm.AccessModeWrite, perm.AccessModeRead, unit.TypePullRequests).
+ Distinct("`team_user`.uid").
+ Select("`team_user`.uid").
+ Find(&additionalUserIDs); err != nil {
+ return nil, err
+ }
+
+ uniqueUserIDs := make(container.Set[int64])
+ uniqueUserIDs.AddMultiple(userIDs...)
+ uniqueUserIDs.AddMultiple(additionalUserIDs...)
+
+ // Leave a seat for owner itself to append later, but if owner is an organization
+ // and just waste 1 unit is cheaper than re-allocate memory once.
+ users := make([]*user_model.User, 0, len(uniqueUserIDs)+1)
+ if len(userIDs) > 0 {
+ if err = e.In("id", uniqueUserIDs.Values()).
+ Where(builder.Eq{"`user`.is_active": true}).
+ OrderBy(user_model.GetOrderByName()).
+ Find(&users); err != nil {
+ return nil, err
+ }
+ }
+ if !repo.Owner.IsOrganization() && !uniqueUserIDs.Contains(repo.OwnerID) {
+ users = append(users, repo.Owner)
+ }
+
+ return users, nil
+}
+
+// GetReviewers get all users can be requested to review:
+// * for private repositories this returns all users that have read access or higher to the repository.
+// * for public repositories this returns all users that have read access or higher to the repository,
+// all repo watchers and all organization members.
+// TODO: may be we should have a busy choice for users to block review request to them.
+func GetReviewers(ctx context.Context, repo *Repository, doerID, posterID int64) ([]*user_model.User, error) {
+ // Get the owner of the repository - this often already pre-cached and if so saves complexity for the following queries
+ if err := repo.LoadOwner(ctx); err != nil {
+ return nil, err
+ }
+
+ cond := builder.And(builder.Neq{"`user`.id": posterID}).
+ And(builder.Eq{"`user`.is_active": true})
+
+ if repo.IsPrivate || repo.Owner.Visibility == api.VisibleTypePrivate {
+ // This a private repository:
+ // Anyone who can read the repository is a requestable reviewer
+
+ cond = cond.And(builder.In("`user`.id",
+ builder.Select("user_id").From("access").Where(
+ builder.Eq{"repo_id": repo.ID}.
+ And(builder.Gte{"mode": perm.AccessModeRead}),
+ ),
+ ))
+
+ if repo.Owner.Type == user_model.UserTypeIndividual && repo.Owner.ID != posterID {
+ // as private *user* repos don't generate an entry in the `access` table,
+ // the owner of a private repo needs to be explicitly added.
+ cond = cond.Or(builder.Eq{"`user`.id": repo.Owner.ID})
+ }
+ } else {
+ // This is a "public" repository:
+ // Any user that has read access, is a watcher or organization member can be requested to review
+ cond = cond.And(builder.And(builder.In("`user`.id",
+ builder.Select("user_id").From("access").
+ Where(builder.Eq{"repo_id": repo.ID}.
+ And(builder.Gte{"mode": perm.AccessModeRead})),
+ ).Or(builder.In("`user`.id",
+ builder.Select("user_id").From("watch").
+ Where(builder.Eq{"repo_id": repo.ID}.
+ And(builder.In("mode", WatchModeNormal, WatchModeAuto))),
+ ).Or(builder.In("`user`.id",
+ builder.Select("uid").From("org_user").
+ Where(builder.Eq{"org_id": repo.OwnerID}),
+ )))))
+ }
+
+ users := make([]*user_model.User, 0, 8)
+ return users, db.GetEngine(ctx).Where(cond).OrderBy(user_model.GetOrderByName()).Find(&users)
+}
+
+// GetIssuePostersWithSearch returns users with limit of 30 whose username started with prefix that have authored an issue/pull request for the given repository
+// If isShowFullName is set to true, also include full name prefix search
+func GetIssuePostersWithSearch(ctx context.Context, repo *Repository, isPull bool, search string, isShowFullName bool) ([]*user_model.User, error) {
+ users := make([]*user_model.User, 0, 30)
+ var prefixCond builder.Cond = builder.Like{"name", search + "%"}
+ if isShowFullName {
+ prefixCond = prefixCond.Or(builder.Like{"full_name", "%" + search + "%"})
+ }
+
+ cond := builder.In("`user`.id",
+ builder.Select("poster_id").From("issue").Where(
+ builder.Eq{"repo_id": repo.ID}.
+ And(builder.Eq{"is_pull": isPull}),
+ ).GroupBy("poster_id")).And(prefixCond)
+
+ return users, db.GetEngine(ctx).
+ Where(cond).
+ Cols("id", "name", "full_name", "avatar", "avatar_email", "use_custom_avatar").
+ OrderBy("name").
+ Limit(30).
+ Find(&users)
+}
+
+// GetWatchedRepoIDsOwnedBy returns the repos owned by a particular user watched by a particular user
+func GetWatchedRepoIDsOwnedBy(ctx context.Context, userID, ownedByUserID int64) ([]int64, error) {
+ repoIDs := make([]int64, 0, 10)
+ err := db.GetEngine(ctx).
+ Table("repository").
+ Select("`repository`.id").
+ Join("LEFT", "watch", "`repository`.id=`watch`.repo_id").
+ Where("`watch`.user_id=?", userID).
+ And("`watch`.mode<>?", WatchModeDont).
+ And("`repository`.owner_id=?", ownedByUserID).Find(&repoIDs)
+ return repoIDs, err
+}
diff --git a/models/repo/user_repo_test.go b/models/repo/user_repo_test.go
new file mode 100644
index 0000000..c784a55
--- /dev/null
+++ b/models/repo/user_repo_test.go
@@ -0,0 +1,96 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepoAssignees(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ users, err := repo_model.GetRepoAssignees(db.DefaultContext, repo2)
+ require.NoError(t, err)
+ assert.Len(t, users, 1)
+ assert.Equal(t, int64(2), users[0].ID)
+
+ repo21 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 21})
+ users, err = repo_model.GetRepoAssignees(db.DefaultContext, repo21)
+ require.NoError(t, err)
+ if assert.Len(t, users, 3) {
+ assert.ElementsMatch(t, []int64{15, 16, 18}, []int64{users[0].ID, users[1].ID, users[2].ID})
+ }
+
+ // do not return deactivated users
+ require.NoError(t, user_model.UpdateUserCols(db.DefaultContext, &user_model.User{ID: 15, IsActive: false}, "is_active"))
+ users, err = repo_model.GetRepoAssignees(db.DefaultContext, repo21)
+ require.NoError(t, err)
+ if assert.Len(t, users, 2) {
+ assert.NotContains(t, []int64{users[0].ID, users[1].ID}, 15)
+ }
+}
+
+func TestRepoGetReviewers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // test public repo
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ ctx := db.DefaultContext
+ reviewers, err := repo_model.GetReviewers(ctx, repo1, 2, 2)
+ require.NoError(t, err)
+ if assert.Len(t, reviewers, 3) {
+ assert.ElementsMatch(t, []int64{1, 4, 11}, []int64{reviewers[0].ID, reviewers[1].ID, reviewers[2].ID})
+ }
+
+ // should include doer if doer is not PR poster.
+ reviewers, err = repo_model.GetReviewers(ctx, repo1, 11, 2)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 3)
+
+ // should not include PR poster, if PR poster would be otherwise eligible
+ reviewers, err = repo_model.GetReviewers(ctx, repo1, 11, 4)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 2)
+
+ // test private user repo
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+
+ reviewers, err = repo_model.GetReviewers(ctx, repo2, 2, 4)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 1)
+ assert.EqualValues(t, 2, reviewers[0].ID)
+
+ // test private org repo
+ repo3 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3})
+
+ reviewers, err = repo_model.GetReviewers(ctx, repo3, 2, 1)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 2)
+
+ reviewers, err = repo_model.GetReviewers(ctx, repo3, 2, 2)
+ require.NoError(t, err)
+ assert.Len(t, reviewers, 1)
+}
+
+func GetWatchedRepoIDsOwnedBy(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 9})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+
+ repoIDs, err := repo_model.GetWatchedRepoIDsOwnedBy(db.DefaultContext, user1.ID, user2.ID)
+ require.NoError(t, err)
+ assert.Len(t, repoIDs, 1)
+ assert.EqualValues(t, 1, repoIDs[0])
+}
diff --git a/models/repo/watch.go b/models/repo/watch.go
new file mode 100644
index 0000000..6974d89
--- /dev/null
+++ b/models/repo/watch.go
@@ -0,0 +1,190 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// WatchMode specifies what kind of watch the user has on a repository
+type WatchMode int8
+
+const (
+ // WatchModeNone don't watch
+ WatchModeNone WatchMode = iota // 0
+ // WatchModeNormal watch repository (from other sources)
+ WatchModeNormal // 1
+ // WatchModeDont explicit don't auto-watch
+ WatchModeDont // 2
+ // WatchModeAuto watch repository (from AutoWatchOnChanges)
+ WatchModeAuto // 3
+)
+
+// Watch is connection request for receiving repository notification.
+type Watch struct {
+ ID int64 `xorm:"pk autoincr"`
+ UserID int64 `xorm:"UNIQUE(watch)"`
+ RepoID int64 `xorm:"UNIQUE(watch)"`
+ Mode WatchMode `xorm:"SMALLINT NOT NULL DEFAULT 1"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(Watch))
+}
+
+// GetWatch gets what kind of subscription a user has on a given repository; returns dummy record if none found
+func GetWatch(ctx context.Context, userID, repoID int64) (Watch, error) {
+ watch := Watch{UserID: userID, RepoID: repoID}
+ has, err := db.GetEngine(ctx).Get(&watch)
+ if err != nil {
+ return watch, err
+ }
+ if !has {
+ watch.Mode = WatchModeNone
+ }
+ return watch, nil
+}
+
+// IsWatchMode Decodes watchability of WatchMode
+func IsWatchMode(mode WatchMode) bool {
+ return mode != WatchModeNone && mode != WatchModeDont
+}
+
+// IsWatching checks if user has watched given repository.
+func IsWatching(ctx context.Context, userID, repoID int64) bool {
+ watch, err := GetWatch(ctx, userID, repoID)
+ return err == nil && IsWatchMode(watch.Mode)
+}
+
+func watchRepoMode(ctx context.Context, watch Watch, mode WatchMode) (err error) {
+ if watch.Mode == mode {
+ return nil
+ }
+ if mode == WatchModeAuto && (watch.Mode == WatchModeDont || IsWatchMode(watch.Mode)) {
+ // Don't auto watch if already watching or deliberately not watching
+ return nil
+ }
+
+ hadrec := watch.Mode != WatchModeNone
+ needsrec := mode != WatchModeNone
+ repodiff := 0
+
+ if IsWatchMode(mode) && !IsWatchMode(watch.Mode) {
+ repodiff = 1
+ } else if !IsWatchMode(mode) && IsWatchMode(watch.Mode) {
+ repodiff = -1
+ }
+
+ watch.Mode = mode
+
+ if !hadrec && needsrec {
+ watch.Mode = mode
+ if err = db.Insert(ctx, watch); err != nil {
+ return err
+ }
+ } else if needsrec {
+ watch.Mode = mode
+ if _, err := db.GetEngine(ctx).ID(watch.ID).AllCols().Update(watch); err != nil {
+ return err
+ }
+ } else if _, err = db.DeleteByID[Watch](ctx, watch.ID); err != nil {
+ return err
+ }
+ if repodiff != 0 {
+ _, err = db.GetEngine(ctx).Exec("UPDATE `repository` SET num_watches = num_watches + ? WHERE id = ?", repodiff, watch.RepoID)
+ }
+ return err
+}
+
+// WatchRepoMode watch repository in specific mode.
+func WatchRepoMode(ctx context.Context, userID, repoID int64, mode WatchMode) (err error) {
+ var watch Watch
+ if watch, err = GetWatch(ctx, userID, repoID); err != nil {
+ return err
+ }
+ return watchRepoMode(ctx, watch, mode)
+}
+
+// WatchRepo watch or unwatch repository.
+func WatchRepo(ctx context.Context, userID, repoID int64, doWatch bool) (err error) {
+ var watch Watch
+ if watch, err = GetWatch(ctx, userID, repoID); err != nil {
+ return err
+ }
+ if !doWatch && watch.Mode == WatchModeAuto {
+ err = watchRepoMode(ctx, watch, WatchModeDont)
+ } else if !doWatch {
+ err = watchRepoMode(ctx, watch, WatchModeNone)
+ } else {
+ err = watchRepoMode(ctx, watch, WatchModeNormal)
+ }
+ return err
+}
+
+// GetWatchers returns all watchers of given repository.
+func GetWatchers(ctx context.Context, repoID int64) ([]*Watch, error) {
+ watches := make([]*Watch, 0, 10)
+ return watches, db.GetEngine(ctx).Where("`watch`.repo_id=?", repoID).
+ And("`watch`.mode<>?", WatchModeDont).
+ And("`user`.is_active=?", true).
+ And("`user`.prohibit_login=?", false).
+ Join("INNER", "`user`", "`user`.id = `watch`.user_id").
+ Find(&watches)
+}
+
+// GetRepoWatchersIDs returns IDs of watchers for a given repo ID
+// but avoids joining with `user` for performance reasons
+// User permissions must be verified elsewhere if required
+func GetRepoWatchersIDs(ctx context.Context, repoID int64) ([]int64, error) {
+ ids := make([]int64, 0, 64)
+ return ids, db.GetEngine(ctx).Table("watch").
+ Where("watch.repo_id=?", repoID).
+ And("watch.mode<>?", WatchModeDont).
+ Select("user_id").
+ Find(&ids)
+}
+
+// GetRepoWatchers returns range of users watching given repository.
+func GetRepoWatchers(ctx context.Context, repoID int64, opts db.ListOptions) ([]*user_model.User, error) {
+ sess := db.GetEngine(ctx).Where("watch.repo_id=?", repoID).
+ Join("LEFT", "watch", "`user`.id=`watch`.user_id").
+ And("`watch`.mode<>?", WatchModeDont)
+ if opts.Page > 0 {
+ sess = db.SetSessionPagination(sess, &opts)
+ users := make([]*user_model.User, 0, opts.PageSize)
+
+ return users, sess.Find(&users)
+ }
+
+ users := make([]*user_model.User, 0, 8)
+ return users, sess.Find(&users)
+}
+
+// WatchIfAuto subscribes to repo if AutoWatchOnChanges is set
+func WatchIfAuto(ctx context.Context, userID, repoID int64, isWrite bool) error {
+ if !isWrite || !setting.Service.AutoWatchOnChanges {
+ return nil
+ }
+ watch, err := GetWatch(ctx, userID, repoID)
+ if err != nil {
+ return err
+ }
+ if watch.Mode != WatchModeNone {
+ return nil
+ }
+ return watchRepoMode(ctx, watch, WatchModeAuto)
+}
+
+// UnwatchRepos will unwatch the user from all given repositories.
+func UnwatchRepos(ctx context.Context, userID int64, repoIDs []int64) error {
+ _, err := db.GetEngine(ctx).Where("user_id=?", userID).In("repo_id", repoIDs).Delete(&Watch{})
+ return err
+}
diff --git a/models/repo/watch_test.go b/models/repo/watch_test.go
new file mode 100644
index 0000000..dbf1505
--- /dev/null
+++ b/models/repo/watch_test.go
@@ -0,0 +1,153 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsWatching(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ assert.True(t, repo_model.IsWatching(db.DefaultContext, 1, 1))
+ assert.True(t, repo_model.IsWatching(db.DefaultContext, 4, 1))
+ assert.True(t, repo_model.IsWatching(db.DefaultContext, 11, 1))
+
+ assert.False(t, repo_model.IsWatching(db.DefaultContext, 1, 5))
+ assert.False(t, repo_model.IsWatching(db.DefaultContext, 8, 1))
+ assert.False(t, repo_model.IsWatching(db.DefaultContext, unittest.NonexistentID, unittest.NonexistentID))
+}
+
+func TestGetWatchers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ watches, err := repo_model.GetWatchers(db.DefaultContext, repo.ID)
+ require.NoError(t, err)
+ // One watchers are inactive, thus minus 1
+ assert.Len(t, watches, repo.NumWatches-1)
+ for _, watch := range watches {
+ assert.EqualValues(t, repo.ID, watch.RepoID)
+ }
+
+ watches, err = repo_model.GetWatchers(db.DefaultContext, unittest.NonexistentID)
+ require.NoError(t, err)
+ assert.Empty(t, watches)
+}
+
+func TestRepository_GetWatchers(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ watchers, err := repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, repo.NumWatches)
+ for _, watcher := range watchers {
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{UserID: watcher.ID, RepoID: repo.ID})
+ }
+
+ repo = unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 9})
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Empty(t, watchers)
+}
+
+func TestWatchIfAuto(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ watchers, err := repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, repo.NumWatches)
+
+ setting.Service.AutoWatchOnChanges = false
+
+ prevCount := repo.NumWatches
+
+ // Must not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 8, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Should not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 10, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ setting.Service.AutoWatchOnChanges = true
+
+ // Must not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 8, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Should not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 12, 1, false))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Should add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 12, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount+1)
+
+ // Should remove watch, inhibit from adding auto
+ require.NoError(t, repo_model.WatchRepo(db.DefaultContext, 12, 1, false))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+
+ // Must not add watch
+ require.NoError(t, repo_model.WatchIfAuto(db.DefaultContext, 12, 1, true))
+ watchers, err = repo_model.GetRepoWatchers(db.DefaultContext, repo.ID, db.ListOptions{Page: 1})
+ require.NoError(t, err)
+ assert.Len(t, watchers, prevCount)
+}
+
+func TestWatchRepoMode(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 0)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeAuto))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 1)
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1, Mode: repo_model.WatchModeAuto}, 1)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeNormal))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 1)
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1, Mode: repo_model.WatchModeNormal}, 1)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeDont))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 1)
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1, Mode: repo_model.WatchModeDont}, 1)
+
+ require.NoError(t, repo_model.WatchRepoMode(db.DefaultContext, 12, 1, repo_model.WatchModeNone))
+ unittest.AssertCount(t, &repo_model.Watch{UserID: 12, RepoID: 1}, 0)
+}
+
+func TestUnwatchRepos(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{UserID: 4, RepoID: 1})
+ unittest.AssertExistsAndLoadBean(t, &repo_model.Watch{UserID: 4, RepoID: 2})
+
+ err := repo_model.UnwatchRepos(db.DefaultContext, 4, []int64{1, 2})
+ require.NoError(t, err)
+
+ unittest.AssertNotExistsBean(t, &repo_model.Watch{UserID: 4, RepoID: 1})
+ unittest.AssertNotExistsBean(t, &repo_model.Watch{UserID: 4, RepoID: 2})
+}
diff --git a/models/repo/wiki.go b/models/repo/wiki.go
new file mode 100644
index 0000000..b378666
--- /dev/null
+++ b/models/repo/wiki.go
@@ -0,0 +1,96 @@
+// Copyright 2015 The Gogs Authors. All rights reserved.
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// ErrWikiAlreadyExist represents a "WikiAlreadyExist" kind of error.
+type ErrWikiAlreadyExist struct {
+ Title string
+}
+
+// IsErrWikiAlreadyExist checks if an error is an ErrWikiAlreadyExist.
+func IsErrWikiAlreadyExist(err error) bool {
+ _, ok := err.(ErrWikiAlreadyExist)
+ return ok
+}
+
+func (err ErrWikiAlreadyExist) Error() string {
+ return fmt.Sprintf("wiki page already exists [title: %s]", err.Title)
+}
+
+func (err ErrWikiAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrWikiReservedName represents a reserved name error.
+type ErrWikiReservedName struct {
+ Title string
+}
+
+// IsErrWikiReservedName checks if an error is an ErrWikiReservedName.
+func IsErrWikiReservedName(err error) bool {
+ _, ok := err.(ErrWikiReservedName)
+ return ok
+}
+
+func (err ErrWikiReservedName) Error() string {
+ return fmt.Sprintf("wiki title is reserved: %s", err.Title)
+}
+
+func (err ErrWikiReservedName) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// ErrWikiInvalidFileName represents an invalid wiki file name.
+type ErrWikiInvalidFileName struct {
+ FileName string
+}
+
+// IsErrWikiInvalidFileName checks if an error is an ErrWikiInvalidFileName.
+func IsErrWikiInvalidFileName(err error) bool {
+ _, ok := err.(ErrWikiInvalidFileName)
+ return ok
+}
+
+func (err ErrWikiInvalidFileName) Error() string {
+ return fmt.Sprintf("Invalid wiki filename: %s", err.FileName)
+}
+
+func (err ErrWikiInvalidFileName) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// WikiCloneLink returns clone URLs of repository wiki.
+func (repo *Repository) WikiCloneLink() *CloneLink {
+ return repo.cloneLink(true)
+}
+
+// WikiPath returns wiki data path by given user and repository name.
+func WikiPath(userName, repoName string) string {
+ return filepath.Join(user_model.UserPath(userName), strings.ToLower(repoName)+".wiki.git")
+}
+
+// WikiPath returns wiki data path for given repository.
+func (repo *Repository) WikiPath() string {
+ return WikiPath(repo.OwnerName, repo.Name)
+}
+
+// HasWiki returns true if repository has wiki.
+func (repo *Repository) HasWiki() bool {
+ isDir, err := util.IsDir(repo.WikiPath())
+ if err != nil {
+ log.Error("Unable to check if %s is a directory: %v", repo.WikiPath(), err)
+ }
+ return isDir
+}
diff --git a/models/repo/wiki_test.go b/models/repo/wiki_test.go
new file mode 100644
index 0000000..28495a4
--- /dev/null
+++ b/models/repo/wiki_test.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package repo_test
+
+import (
+ "path/filepath"
+ "testing"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRepository_WikiCloneLink(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ cloneLink := repo.WikiCloneLink()
+ assert.Equal(t, "ssh://sshuser@try.gitea.io:3000/user2/repo1.wiki.git", cloneLink.SSH)
+ assert.Equal(t, "https://try.gitea.io/user2/repo1.wiki.git", cloneLink.HTTPS)
+}
+
+func TestWikiPath(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ expected := filepath.Join(setting.RepoRootPath, "user2/repo1.wiki.git")
+ assert.Equal(t, expected, repo_model.WikiPath("user2", "repo1"))
+}
+
+func TestRepository_WikiPath(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ expected := filepath.Join(setting.RepoRootPath, "user2/repo1.wiki.git")
+ assert.Equal(t, expected, repo.WikiPath())
+}
+
+func TestRepository_HasWiki(t *testing.T) {
+ unittest.PrepareTestEnv(t)
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.True(t, repo1.HasWiki())
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ assert.False(t, repo2.HasWiki())
+}
diff --git a/models/repo_test.go b/models/repo_test.go
new file mode 100644
index 0000000..958725f
--- /dev/null
+++ b/models/repo_test.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestCheckRepoStats(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ require.NoError(t, CheckRepoStats(db.DefaultContext))
+}
+
+func TestDoctorUserStarNum(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ require.NoError(t, DoctorUserStarNum(db.DefaultContext))
+}
diff --git a/models/repo_transfer.go b/models/repo_transfer.go
new file mode 100644
index 0000000..0c23d75
--- /dev/null
+++ b/models/repo_transfer.go
@@ -0,0 +1,185 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+// RepoTransfer is used to manage repository transfers
+type RepoTransfer struct {
+ ID int64 `xorm:"pk autoincr"`
+ DoerID int64
+ Doer *user_model.User `xorm:"-"`
+ RecipientID int64
+ Recipient *user_model.User `xorm:"-"`
+ RepoID int64
+ TeamIDs []int64
+ Teams []*organization.Team `xorm:"-"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX NOT NULL created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX NOT NULL updated"`
+}
+
+func init() {
+ db.RegisterModel(new(RepoTransfer))
+}
+
+// LoadAttributes fetches the transfer recipient from the database
+func (r *RepoTransfer) LoadAttributes(ctx context.Context) error {
+ if r.Recipient == nil {
+ u, err := user_model.GetUserByID(ctx, r.RecipientID)
+ if err != nil {
+ return err
+ }
+
+ r.Recipient = u
+ }
+
+ if r.Recipient.IsOrganization() && len(r.TeamIDs) != len(r.Teams) {
+ for _, v := range r.TeamIDs {
+ team, err := organization.GetTeamByID(ctx, v)
+ if err != nil {
+ return err
+ }
+
+ if team.OrgID != r.Recipient.ID {
+ return fmt.Errorf("team %d belongs not to org %d", v, r.Recipient.ID)
+ }
+
+ r.Teams = append(r.Teams, team)
+ }
+ }
+
+ if r.Doer == nil {
+ u, err := user_model.GetUserByID(ctx, r.DoerID)
+ if err != nil {
+ return err
+ }
+
+ r.Doer = u
+ }
+
+ return nil
+}
+
+// CanUserAcceptTransfer checks if the user has the rights to accept/decline a repo transfer.
+// For user, it checks if it's himself
+// For organizations, it checks if the user is able to create repos
+func (r *RepoTransfer) CanUserAcceptTransfer(ctx context.Context, u *user_model.User) bool {
+ if err := r.LoadAttributes(ctx); err != nil {
+ log.Error("LoadAttributes: %v", err)
+ return false
+ }
+
+ if !r.Recipient.IsOrganization() {
+ return r.RecipientID == u.ID
+ }
+
+ allowed, err := organization.CanCreateOrgRepo(ctx, r.RecipientID, u.ID)
+ if err != nil {
+ log.Error("CanCreateOrgRepo: %v", err)
+ return false
+ }
+
+ return allowed
+}
+
+// GetPendingRepositoryTransfer fetches the most recent and ongoing transfer
+// process for the repository
+func GetPendingRepositoryTransfer(ctx context.Context, repo *repo_model.Repository) (*RepoTransfer, error) {
+ transfer := new(RepoTransfer)
+
+ has, err := db.GetEngine(ctx).Where("repo_id = ? ", repo.ID).Get(transfer)
+ if err != nil {
+ return nil, err
+ }
+
+ if !has {
+ return nil, ErrNoPendingRepoTransfer{RepoID: repo.ID}
+ }
+
+ return transfer, nil
+}
+
+func DeleteRepositoryTransfer(ctx context.Context, repoID int64) error {
+ _, err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Delete(&RepoTransfer{})
+ return err
+}
+
+// TestRepositoryReadyForTransfer make sure repo is ready to transfer
+func TestRepositoryReadyForTransfer(status repo_model.RepositoryStatus) error {
+ switch status {
+ case repo_model.RepositoryBeingMigrated:
+ return errors.New("repo is not ready, currently migrating")
+ case repo_model.RepositoryPendingTransfer:
+ return ErrRepoTransferInProgress{}
+ }
+ return nil
+}
+
+// CreatePendingRepositoryTransfer transfer a repo from one owner to a new one.
+// it marks the repository transfer as "pending"
+func CreatePendingRepositoryTransfer(ctx context.Context, doer, newOwner *user_model.User, repoID int64, teams []*organization.Team) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ repo, err := repo_model.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return err
+ }
+
+ // Make sure repo is ready to transfer
+ if err := TestRepositoryReadyForTransfer(repo.Status); err != nil {
+ return err
+ }
+
+ repo.Status = repo_model.RepositoryPendingTransfer
+ if err := repo_model.UpdateRepositoryCols(ctx, repo, "status"); err != nil {
+ return err
+ }
+
+ // Check if new owner has repository with same name.
+ if has, err := repo_model.IsRepositoryModelExist(ctx, newOwner, repo.Name); err != nil {
+ return fmt.Errorf("IsRepositoryExist: %w", err)
+ } else if has {
+ return repo_model.ErrRepoAlreadyExist{
+ Uname: newOwner.LowerName,
+ Name: repo.Name,
+ }
+ }
+
+ transfer := &RepoTransfer{
+ RepoID: repo.ID,
+ RecipientID: newOwner.ID,
+ CreatedUnix: timeutil.TimeStampNow(),
+ UpdatedUnix: timeutil.TimeStampNow(),
+ DoerID: doer.ID,
+ TeamIDs: make([]int64, 0, len(teams)),
+ }
+
+ for k := range teams {
+ transfer.TeamIDs = append(transfer.TeamIDs, teams[k].ID)
+ }
+
+ return db.Insert(ctx, transfer)
+ })
+}
+
+// GetPendingTransfers returns the pending transfers of recipient which were sent by by doer.
+func GetPendingTransferIDs(ctx context.Context, reciepientID, doerID int64) ([]int64, error) {
+ pendingTransferIDs := make([]int64, 0, 8)
+ return pendingTransferIDs, db.GetEngine(ctx).Table("repo_transfer").
+ Where("doer_id = ?", doerID).
+ And("recipient_id = ?", reciepientID).
+ Cols("id").
+ Find(&pendingTransferIDs)
+}
diff --git a/models/repo_transfer_test.go b/models/repo_transfer_test.go
new file mode 100644
index 0000000..6b6d5a8
--- /dev/null
+++ b/models/repo_transfer_test.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package models
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetPendingTransferIDs(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
+ reciepient := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
+ pendingTransfer := unittest.AssertExistsAndLoadBean(t, &RepoTransfer{RecipientID: reciepient.ID, DoerID: doer.ID})
+
+ pendingTransferIDs, err := GetPendingTransferIDs(db.DefaultContext, reciepient.ID, doer.ID)
+ require.NoError(t, err)
+ if assert.Len(t, pendingTransferIDs, 1) {
+ assert.EqualValues(t, pendingTransfer.ID, pendingTransferIDs[0])
+ }
+}