summaryrefslogtreecommitdiffstats
path: root/services/doctor
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-10-18 20:33:49 +0200
committerDaniel Baumann <daniel@debian.org>2024-12-12 23:57:56 +0100
commite68b9d00a6e05b3a941f63ffb696f91e554ac5ec (patch)
tree97775d6c13b0f416af55314eb6a89ef792474615 /services/doctor
parentInitial commit. (diff)
downloadforgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.tar.xz
forgejo-e68b9d00a6e05b3a941f63ffb696f91e554ac5ec.zip
Adding upstream version 9.0.3.
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to '')
-rw-r--r--services/doctor/authorizedkeys.go100
-rw-r--r--services/doctor/breaking.go97
-rw-r--r--services/doctor/checkOldArchives.go59
-rw-r--r--services/doctor/dbconsistency.go268
-rw-r--r--services/doctor/dbversion.go42
-rw-r--r--services/doctor/doctor.go138
-rw-r--r--services/doctor/fix16961.go328
-rw-r--r--services/doctor/fix16961_test.go271
-rw-r--r--services/doctor/fix8312.go61
-rw-r--r--services/doctor/heads.go88
-rw-r--r--services/doctor/lfs.go52
-rw-r--r--services/doctor/mergebase.go114
-rw-r--r--services/doctor/misc.go299
-rw-r--r--services/doctor/packages_nuget.go160
-rw-r--r--services/doctor/paths.go124
-rw-r--r--services/doctor/push_mirror_consistency.go91
-rw-r--r--services/doctor/repository.go80
-rw-r--r--services/doctor/storage.go270
-rw-r--r--services/doctor/usertype.go41
19 files changed, 2683 insertions, 0 deletions
diff --git a/services/doctor/authorizedkeys.go b/services/doctor/authorizedkeys.go
new file mode 100644
index 0000000..2920cf5
--- /dev/null
+++ b/services/doctor/authorizedkeys.go
@@ -0,0 +1,100 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+const tplCommentPrefix = `# gitea public key`
+
+func checkAuthorizedKeys(ctx context.Context, logger log.Logger, autofix bool) error {
+ if setting.SSH.StartBuiltinServer || !setting.SSH.CreateAuthorizedKeysFile {
+ return nil
+ }
+
+ fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
+ f, err := os.Open(fPath)
+ if err != nil {
+ if !autofix {
+ logger.Critical("Unable to open authorized_keys file. ERROR: %v", err)
+ return fmt.Errorf("Unable to open authorized_keys file. ERROR: %w", err)
+ }
+ logger.Warn("Unable to open authorized_keys. (ERROR: %v). Attempting to rewrite...", err)
+ if err = asymkey_model.RewriteAllPublicKeys(ctx); err != nil {
+ logger.Critical("Unable to rewrite authorized_keys file. ERROR: %v", err)
+ return fmt.Errorf("Unable to rewrite authorized_keys file. ERROR: %w", err)
+ }
+ }
+ defer f.Close()
+
+ linesInAuthorizedKeys := make(container.Set[string])
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, tplCommentPrefix) {
+ continue
+ }
+ linesInAuthorizedKeys.Add(line)
+ }
+ if err = scanner.Err(); err != nil {
+ return fmt.Errorf("scan: %w", err)
+ }
+ // although there is a "defer close" above, here close explicitly before the generating, because it needs to open the file for writing again
+ _ = f.Close()
+
+ // now we regenerate and check if there are any lines missing
+ regenerated := &bytes.Buffer{}
+ if err := asymkey_model.RegeneratePublicKeys(ctx, regenerated); err != nil {
+ logger.Critical("Unable to regenerate authorized_keys file. ERROR: %v", err)
+ return fmt.Errorf("Unable to regenerate authorized_keys file. ERROR: %w", err)
+ }
+ scanner = bufio.NewScanner(regenerated)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, tplCommentPrefix) {
+ continue
+ }
+ if linesInAuthorizedKeys.Contains(line) {
+ continue
+ }
+ if !autofix {
+ logger.Critical(
+ "authorized_keys file %q is out of date.\nRegenerate it with:\n\t\"%s\"\nor\n\t\"%s\"",
+ fPath,
+ "forgejo admin regenerate keys",
+ "forgejo doctor check --run authorized-keys --fix")
+ return fmt.Errorf(`authorized_keys is out of date and should be regenerated with "forgejo admin regenerate keys" or "forgejo doctor check --run authorized-keys --fix"`)
+ }
+ logger.Warn("authorized_keys is out of date. Attempting rewrite...")
+ err = asymkey_model.RewriteAllPublicKeys(ctx)
+ if err != nil {
+ logger.Critical("Unable to rewrite authorized_keys file. ERROR: %v", err)
+ return fmt.Errorf("Unable to rewrite authorized_keys file. ERROR: %w", err)
+ }
+ }
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check if OpenSSH authorized_keys file is up-to-date",
+ Name: "authorized-keys",
+ IsDefault: true,
+ Run: checkAuthorizedKeys,
+ Priority: 4,
+ })
+}
diff --git a/services/doctor/breaking.go b/services/doctor/breaking.go
new file mode 100644
index 0000000..77e3d4e
--- /dev/null
+++ b/services/doctor/breaking.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+)
+
+func iterateUserAccounts(ctx context.Context, each func(*user.User) error) error {
+ err := db.Iterate(
+ ctx,
+ builder.Gt{"id": 0},
+ func(ctx context.Context, bean *user.User) error {
+ return each(bean)
+ },
+ )
+ return err
+}
+
+// Since 1.16.4 new restrictions has been set on email addresses. However users with invalid email
+// addresses would be currently facing a error due to their invalid email address.
+// Ref: https://github.com/go-gitea/gitea/pull/19085 & https://github.com/go-gitea/gitea/pull/17688
+func checkUserEmail(ctx context.Context, logger log.Logger, _ bool) error {
+ // We could use quirky SQL to get all users that start without a [a-zA-Z0-9], but that would mean
+ // DB provider-specific SQL and only works _now_. So instead we iterate through all user accounts
+ // and use the user.ValidateEmail function to be future-proof.
+ var invalidUserCount int64
+ if err := iterateUserAccounts(ctx, func(u *user.User) error {
+ // Only check for users, skip
+ if u.Type != user.UserTypeIndividual {
+ return nil
+ }
+
+ if err := user.ValidateEmail(u.Email); err != nil {
+ invalidUserCount++
+ logger.Warn("User[id=%d name=%q] have not a valid e-mail: %v", u.ID, u.Name, err)
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("iterateUserAccounts: %w", err)
+ }
+
+ if invalidUserCount == 0 {
+ logger.Info("All users have a valid e-mail.")
+ } else {
+ logger.Warn("%d user(s) have a non-valid e-mail.", invalidUserCount)
+ }
+ return nil
+}
+
+// From time to time Gitea makes changes to the reserved usernames and which symbols
+// are allowed for various reasons. This check helps with detecting users that, according
+// to our reserved names, don't have a valid username.
+func checkUserName(ctx context.Context, logger log.Logger, _ bool) error {
+ var invalidUserCount int64
+ if err := iterateUserAccounts(ctx, func(u *user.User) error {
+ if err := user.IsUsableUsername(u.Name); err != nil {
+ invalidUserCount++
+ logger.Warn("User[id=%d] does not have a valid username: %v", u.ID, err)
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("iterateUserAccounts: %w", err)
+ }
+
+ if invalidUserCount == 0 {
+ logger.Info("All users have a valid username.")
+ } else {
+ logger.Warn("%d user(s) have a non-valid username.", invalidUserCount)
+ }
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check if users has an valid email address",
+ Name: "check-user-email",
+ IsDefault: false,
+ Run: checkUserEmail,
+ Priority: 9,
+ })
+ Register(&Check{
+ Title: "Check if users have a valid username",
+ Name: "check-user-names",
+ IsDefault: false,
+ Run: checkUserName,
+ Priority: 9,
+ })
+}
diff --git a/services/doctor/checkOldArchives.go b/services/doctor/checkOldArchives.go
new file mode 100644
index 0000000..390dfb4
--- /dev/null
+++ b/services/doctor/checkOldArchives.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/util"
+)
+
+func checkOldArchives(ctx context.Context, logger log.Logger, autofix bool) error {
+ numRepos := 0
+ numReposUpdated := 0
+ err := iterateRepositories(ctx, func(repo *repo_model.Repository) error {
+ if repo.IsEmpty {
+ return nil
+ }
+
+ p := filepath.Join(repo.RepoPath(), "archives")
+ isDir, err := util.IsDir(p)
+ if err != nil {
+ log.Warn("check if %s is directory failed: %v", p, err)
+ }
+ if isDir {
+ numRepos++
+ if autofix {
+ if err := os.RemoveAll(p); err == nil {
+ numReposUpdated++
+ } else {
+ log.Warn("remove %s failed: %v", p, err)
+ }
+ }
+ }
+ return nil
+ })
+
+ if autofix {
+ logger.Info("%d / %d old archives in repository deleted", numReposUpdated, numRepos)
+ } else {
+ logger.Info("%d old archives in repository need to be deleted", numRepos)
+ }
+
+ return err
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check old archives",
+ Name: "check-old-archives",
+ IsDefault: false,
+ Run: checkOldArchives,
+ Priority: 7,
+ })
+}
diff --git a/services/doctor/dbconsistency.go b/services/doctor/dbconsistency.go
new file mode 100644
index 0000000..80f538d
--- /dev/null
+++ b/services/doctor/dbconsistency.go
@@ -0,0 +1,268 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ activities_model "code.gitea.io/gitea/models/activities"
+ auth_model "code.gitea.io/gitea/models/auth"
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ "code.gitea.io/gitea/models/migrations"
+ org_model "code.gitea.io/gitea/models/organization"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+type consistencyCheck struct {
+ Name string
+ Counter func(context.Context) (int64, error)
+ Fixer func(context.Context) (int64, error)
+ FixedMessage string
+}
+
+func (c *consistencyCheck) Run(ctx context.Context, logger log.Logger, autofix bool) error {
+ count, err := c.Counter(ctx)
+ if err != nil {
+ logger.Critical("Error: %v whilst counting %s", err, c.Name)
+ return err
+ }
+ if count > 0 {
+ if autofix {
+ var fixed int64
+ if fixed, err = c.Fixer(ctx); err != nil {
+ logger.Critical("Error: %v whilst fixing %s", err, c.Name)
+ return err
+ }
+
+ prompt := "Deleted"
+ if c.FixedMessage != "" {
+ prompt = c.FixedMessage
+ }
+
+ if fixed < 0 {
+ logger.Info(prompt+" %d %s", count, c.Name)
+ } else {
+ logger.Info(prompt+" %d/%d %s", fixed, count, c.Name)
+ }
+ } else {
+ logger.Warn("Found %d %s", count, c.Name)
+ }
+ }
+ return nil
+}
+
+func asFixer(fn func(ctx context.Context) error) func(ctx context.Context) (int64, error) {
+ return func(ctx context.Context) (int64, error) {
+ err := fn(ctx)
+ return -1, err
+ }
+}
+
+func genericOrphanCheck(name, subject, refobject, joincond string) consistencyCheck {
+ return consistencyCheck{
+ Name: name,
+ Counter: func(ctx context.Context) (int64, error) {
+ return db.CountOrphanedObjects(ctx, subject, refobject, joincond)
+ },
+ Fixer: func(ctx context.Context) (int64, error) {
+ err := db.DeleteOrphanedObjects(ctx, subject, refobject, joincond)
+ return -1, err
+ },
+ }
+}
+
+func checkDBConsistency(ctx context.Context, logger log.Logger, autofix bool) error {
+ // make sure DB version is up-to-date
+ if err := db.InitEngineWithMigration(ctx, migrations.EnsureUpToDate); err != nil {
+ logger.Critical("Model version on the database does not match the current Gitea version. Model consistency will not be checked until the database is upgraded")
+ return err
+ }
+
+ consistencyChecks := []consistencyCheck{
+ {
+ // find labels without existing repo or org
+ Name: "Orphaned Labels without existing repository or organisation",
+ Counter: issues_model.CountOrphanedLabels,
+ Fixer: asFixer(issues_model.DeleteOrphanedLabels),
+ },
+ {
+ // find IssueLabels without existing label
+ Name: "Orphaned Issue Labels without existing label",
+ Counter: issues_model.CountOrphanedIssueLabels,
+ Fixer: asFixer(issues_model.DeleteOrphanedIssueLabels),
+ },
+ {
+ // find issues without existing repository
+ Name: "Orphaned Issues without existing repository",
+ Counter: issues_model.CountOrphanedIssues,
+ Fixer: asFixer(issues_model.DeleteOrphanedIssues),
+ },
+ // find releases without existing repository
+ genericOrphanCheck("Orphaned Releases without existing repository",
+ "release", "repository", "`release`.repo_id=repository.id"),
+ // find pulls without existing issues
+ genericOrphanCheck("Orphaned PullRequests without existing issue",
+ "pull_request", "issue", "pull_request.issue_id=issue.id"),
+ // find pull requests without base repository
+ genericOrphanCheck("Pull request entries without existing base repository",
+ "pull_request", "repository", "pull_request.base_repo_id=repository.id"),
+ // find tracked times without existing issues/pulls
+ genericOrphanCheck("Orphaned TrackedTimes without existing issue",
+ "tracked_time", "issue", "tracked_time.issue_id=issue.id"),
+ // find attachments without existing issues or releases
+ {
+ Name: "Orphaned Attachments without existing issues or releases",
+ Counter: repo_model.CountOrphanedAttachments,
+ Fixer: asFixer(repo_model.DeleteOrphanedAttachments),
+ },
+ // find null archived repositories
+ {
+ Name: "Repositories with is_archived IS NULL",
+ Counter: repo_model.CountNullArchivedRepository,
+ Fixer: repo_model.FixNullArchivedRepository,
+ FixedMessage: "Fixed",
+ },
+ // find label comments with empty labels
+ {
+ Name: "Label comments with empty labels",
+ Counter: issues_model.CountCommentTypeLabelWithEmptyLabel,
+ Fixer: issues_model.FixCommentTypeLabelWithEmptyLabel,
+ FixedMessage: "Fixed",
+ },
+ // find label comments with labels from outside the repository
+ {
+ Name: "Label comments with labels from outside the repository",
+ Counter: issues_model.CountCommentTypeLabelWithOutsideLabels,
+ Fixer: issues_model.FixCommentTypeLabelWithOutsideLabels,
+ FixedMessage: "Removed",
+ },
+ // find issue_label with labels from outside the repository
+ {
+ Name: "IssueLabels with Labels from outside the repository",
+ Counter: issues_model.CountIssueLabelWithOutsideLabels,
+ Fixer: issues_model.FixIssueLabelWithOutsideLabels,
+ FixedMessage: "Removed",
+ },
+ {
+ Name: "Action with created_unix set as an empty string",
+ Counter: activities_model.CountActionCreatedUnixString,
+ Fixer: activities_model.FixActionCreatedUnixString,
+ FixedMessage: "Set to zero",
+ },
+ {
+ Name: "Action Runners without existing owner",
+ Counter: actions_model.CountRunnersWithoutBelongingOwner,
+ Fixer: actions_model.FixRunnersWithoutBelongingOwner,
+ FixedMessage: "Removed",
+ },
+ {
+ Name: "Action Runners without existing repository",
+ Counter: actions_model.CountRunnersWithoutBelongingRepo,
+ Fixer: actions_model.FixRunnersWithoutBelongingRepo,
+ FixedMessage: "Removed",
+ },
+ {
+ Name: "Topics with empty repository count",
+ Counter: repo_model.CountOrphanedTopics,
+ Fixer: repo_model.DeleteOrphanedTopics,
+ FixedMessage: "Removed",
+ },
+ {
+ Name: "Orphaned OAuth2Application without existing User",
+ Counter: auth_model.CountOrphanedOAuth2Applications,
+ Fixer: auth_model.DeleteOrphanedOAuth2Applications,
+ FixedMessage: "Removed",
+ },
+ {
+ Name: "Owner teams with no admin access",
+ Counter: org_model.CountInconsistentOwnerTeams,
+ Fixer: org_model.FixInconsistentOwnerTeams,
+ FixedMessage: "Fixed",
+ },
+ }
+
+ // TODO: function to recalc all counters
+
+ if setting.Database.Type.IsPostgreSQL() {
+ consistencyChecks = append(consistencyChecks, consistencyCheck{
+ Name: "Sequence values",
+ Counter: db.CountBadSequences,
+ Fixer: asFixer(db.FixBadSequences),
+ FixedMessage: "Updated",
+ })
+ }
+
+ consistencyChecks = append(consistencyChecks,
+ // find protected branches without existing repository
+ genericOrphanCheck("Protected Branches without existing repository",
+ "protected_branch", "repository", "protected_branch.repo_id=repository.id"),
+ // find branches without existing repository
+ genericOrphanCheck("Branches without existing repository",
+ "branch", "repository", "branch.repo_id=repository.id"),
+ // find LFS locks without existing repository
+ genericOrphanCheck("LFS locks without existing repository",
+ "lfs_lock", "repository", "lfs_lock.repo_id=repository.id"),
+ // find collaborations without users
+ genericOrphanCheck("Collaborations without existing user",
+ "collaboration", "user", "collaboration.user_id=`user`.id"),
+ // find collaborations without repository
+ genericOrphanCheck("Collaborations without existing repository",
+ "collaboration", "repository", "collaboration.repo_id=repository.id"),
+ // find access without users
+ genericOrphanCheck("Access entries without existing user",
+ "access", "user", "access.user_id=`user`.id"),
+ // find access without repository
+ genericOrphanCheck("Access entries without existing repository",
+ "access", "repository", "access.repo_id=repository.id"),
+ // find action without repository
+ genericOrphanCheck("Action entries without existing repository",
+ "action", "repository", "action.repo_id=repository.id"),
+ // find action without user
+ genericOrphanCheck("Action entries without existing user",
+ "action", "user", "action.act_user_id=`user`.id"),
+ // find OAuth2Grant without existing user
+ genericOrphanCheck("Orphaned OAuth2Grant without existing User",
+ "oauth2_grant", "user", "oauth2_grant.user_id=`user`.id"),
+ // find OAuth2AuthorizationCode without existing OAuth2Grant
+ genericOrphanCheck("Orphaned OAuth2AuthorizationCode without existing OAuth2Grant",
+ "oauth2_authorization_code", "oauth2_grant", "oauth2_authorization_code.grant_id=oauth2_grant.id"),
+ // find stopwatches without existing user
+ genericOrphanCheck("Orphaned Stopwatches without existing User",
+ "stopwatch", "user", "stopwatch.user_id=`user`.id"),
+ // find stopwatches without existing issue
+ genericOrphanCheck("Orphaned Stopwatches without existing Issue",
+ "stopwatch", "issue", "stopwatch.issue_id=`issue`.id"),
+ // find redirects without existing user.
+ genericOrphanCheck("Orphaned Redirects without existing redirect user",
+ "user_redirect", "user", "user_redirect.redirect_user_id=`user`.id"),
+ // find archive download count without existing release
+ genericOrphanCheck("Archive download count without existing Release",
+ "repo_archive_download_count", "release", "repo_archive_download_count.release_id=release.id"),
+ // find authorization tokens without existing user
+ genericOrphanCheck("Authorization token without existing User",
+ "forgejo_auth_token", "user", "forgejo_auth_token.uid=`user`.id"),
+ )
+
+ for _, c := range consistencyChecks {
+ if err := c.Run(ctx, logger, autofix); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check consistency of database",
+ Name: "check-db-consistency",
+ IsDefault: false,
+ Run: checkDBConsistency,
+ Priority: 3,
+ })
+}
diff --git a/services/doctor/dbversion.go b/services/doctor/dbversion.go
new file mode 100644
index 0000000..2b20cb2
--- /dev/null
+++ b/services/doctor/dbversion.go
@@ -0,0 +1,42 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/migrations"
+ "code.gitea.io/gitea/modules/log"
+)
+
+func checkDBVersion(ctx context.Context, logger log.Logger, autofix bool) error {
+ logger.Info("Expected database version: %d", migrations.ExpectedVersion())
+ if err := db.InitEngineWithMigration(ctx, migrations.EnsureUpToDate); err != nil {
+ if !autofix {
+ logger.Critical("Error: %v during ensure up to date", err)
+ return err
+ }
+ logger.Warn("Got Error: %v during ensure up to date", err)
+ logger.Warn("Attempting to migrate to the latest DB version to fix this.")
+
+ err = db.InitEngineWithMigration(ctx, migrations.Migrate)
+ if err != nil {
+ logger.Critical("Error: %v during migration", err)
+ }
+ return err
+ }
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check Database Version",
+ Name: "check-db-version",
+ IsDefault: true,
+ Run: checkDBVersion,
+ AbortIfFailed: false,
+ Priority: 2,
+ })
+}
diff --git a/services/doctor/doctor.go b/services/doctor/doctor.go
new file mode 100644
index 0000000..a4eb5e1
--- /dev/null
+++ b/services/doctor/doctor.go
@@ -0,0 +1,138 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+)
+
+// Check represents a Doctor check
+type Check struct {
+ Title string
+ Name string
+ IsDefault bool
+ Run func(ctx context.Context, logger log.Logger, autofix bool) error
+ AbortIfFailed bool
+ SkipDatabaseInitialization bool
+ Priority int
+ InitStorage bool
+}
+
+func initDBSkipLogger(ctx context.Context) error {
+ setting.MustInstalled()
+ setting.LoadDBSetting()
+ if err := db.InitEngine(ctx); err != nil {
+ return fmt.Errorf("db.InitEngine: %w", err)
+ }
+ // some doctor sub-commands need to use git command
+ if err := git.InitFull(ctx); err != nil {
+ return fmt.Errorf("git.InitFull: %w", err)
+ }
+ return nil
+}
+
+type doctorCheckLogger struct {
+ colorize bool
+}
+
+var _ log.BaseLogger = (*doctorCheckLogger)(nil)
+
+func (d *doctorCheckLogger) Log(skip int, level log.Level, format string, v ...any) {
+ _, _ = fmt.Fprintf(os.Stdout, format+"\n", v...)
+}
+
+func (d *doctorCheckLogger) GetLevel() log.Level {
+ return log.TRACE
+}
+
+type doctorCheckStepLogger struct {
+ colorize bool
+}
+
+var _ log.BaseLogger = (*doctorCheckStepLogger)(nil)
+
+func (d *doctorCheckStepLogger) Log(skip int, level log.Level, format string, v ...any) {
+ levelChar := fmt.Sprintf("[%s]", strings.ToUpper(level.String()[0:1]))
+ var levelArg any = levelChar
+ if d.colorize {
+ levelArg = log.NewColoredValue(levelChar, level.ColorAttributes()...)
+ }
+ args := append([]any{levelArg}, v...)
+ _, _ = fmt.Fprintf(os.Stdout, " - %s "+format+"\n", args...)
+}
+
+func (d *doctorCheckStepLogger) GetLevel() log.Level {
+ return log.TRACE
+}
+
+// Checks is the list of available commands
+var Checks []*Check
+
+// RunChecks runs the doctor checks for the provided list
+func RunChecks(ctx context.Context, colorize, autofix bool, checks []*Check) error {
+ SortChecks(checks)
+ // the checks output logs by a special logger, they do not use the default logger
+ logger := log.BaseLoggerToGeneralLogger(&doctorCheckLogger{colorize: colorize})
+ loggerStep := log.BaseLoggerToGeneralLogger(&doctorCheckStepLogger{colorize: colorize})
+ dbIsInit := false
+ storageIsInit := false
+ for i, check := range checks {
+ if !dbIsInit && !check.SkipDatabaseInitialization {
+ // Only open database after the most basic configuration check
+ if err := initDBSkipLogger(ctx); err != nil {
+ logger.Error("Error whilst initializing the database: %v", err)
+ logger.Error("Check if you are using the right config file. You can use a --config directive to specify one.")
+ return nil
+ }
+ dbIsInit = true
+ }
+ if !storageIsInit && check.InitStorage {
+ if err := storage.Init(); err != nil {
+ logger.Error("Error whilst initializing the storage: %v", err)
+ logger.Error("Check if you are using the right config file. You can use a --config directive to specify one.")
+ return nil
+ }
+ storageIsInit = true
+ }
+ logger.Info("\n[%d] %s", i+1, check.Title)
+ if err := check.Run(ctx, loggerStep, autofix); err != nil {
+ if check.AbortIfFailed {
+ logger.Critical("FAIL")
+ return err
+ }
+ logger.Error("ERROR")
+ } else {
+ logger.Info("OK")
+ }
+ }
+ logger.Info("\nAll done (checks: %d).", len(checks))
+ return nil
+}
+
+// Register registers a command with the list
+func Register(command *Check) {
+ Checks = append(Checks, command)
+}
+
+func SortChecks(checks []*Check) {
+ sort.SliceStable(checks, func(i, j int) bool {
+ if checks[i].Priority == checks[j].Priority {
+ return checks[i].Name < checks[j].Name
+ }
+ if checks[i].Priority == 0 {
+ return false
+ }
+ return checks[i].Priority < checks[j].Priority
+ })
+}
diff --git a/services/doctor/fix16961.go b/services/doctor/fix16961.go
new file mode 100644
index 0000000..50d9ac6
--- /dev/null
+++ b/services/doctor/fix16961.go
@@ -0,0 +1,328 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "xorm.io/builder"
+)
+
+// #16831 revealed that the dump command that was broken in 1.14.3-1.14.6 and 1.15.0 (#15885).
+// This led to repo_unit and login_source cfg not being converted to JSON in the dump
+// Unfortunately although it was hoped that there were only a few users affected it
+// appears that many users are affected.
+
+// We therefore need to provide a doctor command to fix this repeated issue #16961
+
+func parseBool16961(bs []byte) (bool, error) {
+ if bytes.EqualFold(bs, []byte("%!s(bool=false)")) {
+ return false, nil
+ }
+
+ if bytes.EqualFold(bs, []byte("%!s(bool=true)")) {
+ return true, nil
+ }
+
+ return false, fmt.Errorf("unexpected bool format: %s", string(bs))
+}
+
+func fixUnitConfig16961(bs []byte, cfg *repo_model.UnitConfig) (fixed bool, err error) {
+ err = json.UnmarshalHandleDoubleEncode(bs, &cfg)
+ if err == nil {
+ return false, nil
+ }
+
+ // Handle #16961
+ if string(bs) != "&{}" && len(bs) != 0 {
+ return false, err
+ }
+
+ return true, nil
+}
+
+func fixExternalWikiConfig16961(bs []byte, cfg *repo_model.ExternalWikiConfig) (fixed bool, err error) {
+ err = json.UnmarshalHandleDoubleEncode(bs, &cfg)
+ if err == nil {
+ return false, nil
+ }
+
+ if len(bs) < 3 {
+ return false, err
+ }
+ if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
+ return false, err
+ }
+ cfg.ExternalWikiURL = string(bs[2 : len(bs)-1])
+ return true, nil
+}
+
+func fixExternalTrackerConfig16961(bs []byte, cfg *repo_model.ExternalTrackerConfig) (fixed bool, err error) {
+ err = json.UnmarshalHandleDoubleEncode(bs, &cfg)
+ if err == nil {
+ return false, nil
+ }
+ // Handle #16961
+ if len(bs) < 3 {
+ return false, err
+ }
+
+ if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
+ return false, err
+ }
+
+ parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
+ if len(parts) != 3 {
+ return false, err
+ }
+
+ cfg.ExternalTrackerURL = string(bytes.Join(parts[:len(parts)-2], []byte{' '}))
+ cfg.ExternalTrackerFormat = string(parts[len(parts)-2])
+ cfg.ExternalTrackerStyle = string(parts[len(parts)-1])
+ return true, nil
+}
+
+func fixPullRequestsConfig16961(bs []byte, cfg *repo_model.PullRequestsConfig) (fixed bool, err error) {
+ err = json.UnmarshalHandleDoubleEncode(bs, &cfg)
+ if err == nil {
+ return false, nil
+ }
+
+ // Handle #16961
+ if len(bs) < 3 {
+ return false, err
+ }
+
+ if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
+ return false, err
+ }
+
+ // PullRequestsConfig was the following in 1.14
+ // type PullRequestsConfig struct {
+ // IgnoreWhitespaceConflicts bool
+ // AllowMerge bool
+ // AllowRebase bool
+ // AllowRebaseMerge bool
+ // AllowSquash bool
+ // AllowManualMerge bool
+ // AutodetectManualMerge bool
+ // }
+ //
+ // 1.15 added in addition:
+ // DefaultDeleteBranchAfterMerge bool
+ // DefaultMergeStyle MergeStyle
+ parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
+ if len(parts) < 7 {
+ return false, err
+ }
+
+ var parseErr error
+ cfg.IgnoreWhitespaceConflicts, parseErr = parseBool16961(parts[0])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.AllowMerge, parseErr = parseBool16961(parts[1])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.AllowRebase, parseErr = parseBool16961(parts[2])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.AllowRebaseMerge, parseErr = parseBool16961(parts[3])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.AllowSquash, parseErr = parseBool16961(parts[4])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.AllowManualMerge, parseErr = parseBool16961(parts[5])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.AutodetectManualMerge, parseErr = parseBool16961(parts[6])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+
+ // 1.14 unit
+ if len(parts) == 7 {
+ return true, nil
+ }
+
+ if len(parts) < 9 {
+ return false, err
+ }
+
+ cfg.DefaultDeleteBranchAfterMerge, parseErr = parseBool16961(parts[7])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+
+ cfg.DefaultMergeStyle = repo_model.MergeStyle(string(bytes.Join(parts[8:], []byte{' '})))
+ return true, nil
+}
+
+func fixIssuesConfig16961(bs []byte, cfg *repo_model.IssuesConfig) (fixed bool, err error) {
+ err = json.UnmarshalHandleDoubleEncode(bs, &cfg)
+ if err == nil {
+ return false, nil
+ }
+
+ // Handle #16961
+ if len(bs) < 3 {
+ return false, err
+ }
+
+ if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
+ return false, err
+ }
+
+ parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
+ if len(parts) != 3 {
+ return false, err
+ }
+ var parseErr error
+ cfg.EnableTimetracker, parseErr = parseBool16961(parts[0])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.AllowOnlyContributorsToTrackTime, parseErr = parseBool16961(parts[1])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ cfg.EnableDependencies, parseErr = parseBool16961(parts[2])
+ if parseErr != nil {
+ return false, errors.Join(err, parseErr)
+ }
+ return true, nil
+}
+
+func fixBrokenRepoUnit16961(repoUnit *repo_model.RepoUnit, bs []byte) (fixed bool, err error) {
+ // Shortcut empty or null values
+ if len(bs) == 0 {
+ return false, nil
+ }
+
+ var cfg any
+ err = json.UnmarshalHandleDoubleEncode(bs, &cfg)
+ if err == nil {
+ return false, nil
+ }
+
+ switch repoUnit.Type {
+ case unit.TypeCode, unit.TypeReleases, unit.TypeWiki, unit.TypeProjects:
+ cfg := &repo_model.UnitConfig{}
+ repoUnit.Config = cfg
+ if fixed, err := fixUnitConfig16961(bs, cfg); !fixed {
+ return false, err
+ }
+ case unit.TypeExternalWiki:
+ cfg := &repo_model.ExternalWikiConfig{}
+ repoUnit.Config = cfg
+
+ if fixed, err := fixExternalWikiConfig16961(bs, cfg); !fixed {
+ return false, err
+ }
+ case unit.TypeExternalTracker:
+ cfg := &repo_model.ExternalTrackerConfig{}
+ repoUnit.Config = cfg
+ if fixed, err := fixExternalTrackerConfig16961(bs, cfg); !fixed {
+ return false, err
+ }
+ case unit.TypePullRequests:
+ cfg := &repo_model.PullRequestsConfig{}
+ repoUnit.Config = cfg
+
+ if fixed, err := fixPullRequestsConfig16961(bs, cfg); !fixed {
+ return false, err
+ }
+ case unit.TypeIssues:
+ cfg := &repo_model.IssuesConfig{}
+ repoUnit.Config = cfg
+ if fixed, err := fixIssuesConfig16961(bs, cfg); !fixed {
+ return false, err
+ }
+ default:
+ panic(fmt.Sprintf("unrecognized repo unit type: %v", repoUnit.Type))
+ }
+ return true, nil
+}
+
+func fixBrokenRepoUnits16961(ctx context.Context, logger log.Logger, autofix bool) error {
+ // RepoUnit describes all units of a repository
+ type RepoUnit struct {
+ ID int64
+ RepoID int64
+ Type unit.Type
+ Config []byte
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
+ }
+
+ count := 0
+
+ err := db.Iterate(
+ ctx,
+ builder.Gt{
+ "id": 0,
+ },
+ func(ctx context.Context, unit *RepoUnit) error {
+ bs := unit.Config
+ repoUnit := &repo_model.RepoUnit{
+ ID: unit.ID,
+ RepoID: unit.RepoID,
+ Type: unit.Type,
+ CreatedUnix: unit.CreatedUnix,
+ }
+
+ if fixed, err := fixBrokenRepoUnit16961(repoUnit, bs); !fixed {
+ return err
+ }
+
+ count++
+ if !autofix {
+ return nil
+ }
+
+ return repo_model.UpdateRepoUnit(ctx, repoUnit)
+ },
+ )
+ if err != nil {
+ logger.Critical("Unable to iterate across repounits to fix the broken units: Error %v", err)
+ return err
+ }
+
+ if !autofix {
+ if count == 0 {
+ logger.Info("Found no broken repo_units")
+ } else {
+ logger.Warn("Found %d broken repo_units", count)
+ }
+ return nil
+ }
+ logger.Info("Fixed %d broken repo_units", count)
+
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check for incorrectly dumped repo_units (See #16961)",
+ Name: "fix-broken-repo-units",
+ IsDefault: false,
+ Run: fixBrokenRepoUnits16961,
+ Priority: 7,
+ })
+}
diff --git a/services/doctor/fix16961_test.go b/services/doctor/fix16961_test.go
new file mode 100644
index 0000000..498ed9c
--- /dev/null
+++ b/services/doctor/fix16961_test.go
@@ -0,0 +1,271 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "testing"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_fixUnitConfig_16961(t *testing.T) {
+ tests := []struct {
+ name string
+ bs string
+ wantFixed bool
+ wantErr bool
+ }{
+ {
+ name: "empty",
+ bs: "",
+ wantFixed: true,
+ wantErr: false,
+ },
+ {
+ name: "normal: {}",
+ bs: "{}",
+ wantFixed: false,
+ wantErr: false,
+ },
+ {
+ name: "broken but fixable: &{}",
+ bs: "&{}",
+ wantFixed: true,
+ wantErr: false,
+ },
+ {
+ name: "broken but unfixable: &{asdasd}",
+ bs: "&{asdasd}",
+ wantFixed: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotFixed, err := fixUnitConfig16961([]byte(tt.bs), &repo_model.UnitConfig{})
+ if (err != nil) != tt.wantErr {
+ t.Errorf("fixUnitConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotFixed != tt.wantFixed {
+ t.Errorf("fixUnitConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
+ }
+ })
+ }
+}
+
+func Test_fixExternalWikiConfig_16961(t *testing.T) {
+ tests := []struct {
+ name string
+ bs string
+ expected string
+ wantFixed bool
+ wantErr bool
+ }{
+ {
+ name: "normal: {\"ExternalWikiURL\":\"http://someurl\"}",
+ bs: "{\"ExternalWikiURL\":\"http://someurl\"}",
+ expected: "http://someurl",
+ wantFixed: false,
+ wantErr: false,
+ },
+ {
+ name: "broken: &{http://someurl}",
+ bs: "&{http://someurl}",
+ expected: "http://someurl",
+ wantFixed: true,
+ wantErr: false,
+ },
+ {
+ name: "broken but unfixable: http://someurl",
+ bs: "http://someurl",
+ wantFixed: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &repo_model.ExternalWikiConfig{}
+ gotFixed, err := fixExternalWikiConfig16961([]byte(tt.bs), cfg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("fixExternalWikiConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotFixed != tt.wantFixed {
+ t.Errorf("fixExternalWikiConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
+ }
+ if cfg.ExternalWikiURL != tt.expected {
+ t.Errorf("fixExternalWikiConfig_16961().ExternalWikiURL = %v, want %v", cfg.ExternalWikiURL, tt.expected)
+ }
+ })
+ }
+}
+
+func Test_fixExternalTrackerConfig_16961(t *testing.T) {
+ tests := []struct {
+ name string
+ bs string
+ expected repo_model.ExternalTrackerConfig
+ wantFixed bool
+ wantErr bool
+ }{
+ {
+ name: "normal",
+ bs: `{"ExternalTrackerURL":"a","ExternalTrackerFormat":"b","ExternalTrackerStyle":"c"}`,
+ expected: repo_model.ExternalTrackerConfig{
+ ExternalTrackerURL: "a",
+ ExternalTrackerFormat: "b",
+ ExternalTrackerStyle: "c",
+ },
+ wantFixed: false,
+ wantErr: false,
+ },
+ {
+ name: "broken",
+ bs: "&{a b c}",
+ expected: repo_model.ExternalTrackerConfig{
+ ExternalTrackerURL: "a",
+ ExternalTrackerFormat: "b",
+ ExternalTrackerStyle: "c",
+ },
+ wantFixed: true,
+ wantErr: false,
+ },
+ {
+ name: "broken - too many fields",
+ bs: "&{a b c d}",
+ wantFixed: false,
+ wantErr: true,
+ },
+ {
+ name: "broken - wrong format",
+ bs: "a b c d}",
+ wantFixed: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &repo_model.ExternalTrackerConfig{}
+ gotFixed, err := fixExternalTrackerConfig16961([]byte(tt.bs), cfg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("fixExternalTrackerConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotFixed != tt.wantFixed {
+ t.Errorf("fixExternalTrackerConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
+ }
+ if cfg.ExternalTrackerFormat != tt.expected.ExternalTrackerFormat {
+ t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerFormat = %v, want %v", tt.expected.ExternalTrackerFormat, cfg.ExternalTrackerFormat)
+ }
+ if cfg.ExternalTrackerStyle != tt.expected.ExternalTrackerStyle {
+ t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerStyle = %v, want %v", tt.expected.ExternalTrackerStyle, cfg.ExternalTrackerStyle)
+ }
+ if cfg.ExternalTrackerURL != tt.expected.ExternalTrackerURL {
+ t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerURL = %v, want %v", tt.expected.ExternalTrackerURL, cfg.ExternalTrackerURL)
+ }
+ })
+ }
+}
+
+func Test_fixPullRequestsConfig_16961(t *testing.T) {
+ tests := []struct {
+ name string
+ bs string
+ expected repo_model.PullRequestsConfig
+ wantFixed bool
+ wantErr bool
+ }{
+ {
+ name: "normal",
+ bs: `{"IgnoreWhitespaceConflicts":false,"AllowMerge":false,"AllowRebase":false,"AllowRebaseMerge":false,"AllowSquash":false,"AllowManualMerge":false,"AutodetectManualMerge":false,"DefaultDeleteBranchAfterMerge":false,"DefaultMergeStyle":""}`,
+ },
+ {
+ name: "broken - 1.14",
+ bs: `&{%!s(bool=false) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=false) %!s(bool=false)}`,
+ expected: repo_model.PullRequestsConfig{
+ IgnoreWhitespaceConflicts: false,
+ AllowMerge: true,
+ AllowRebase: true,
+ AllowRebaseMerge: true,
+ AllowSquash: true,
+ AllowManualMerge: false,
+ AutodetectManualMerge: false,
+ },
+ wantFixed: true,
+ },
+ {
+ name: "broken - 1.15",
+ bs: `&{%!s(bool=false) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=false) %!s(bool=false) %!s(bool=false) merge}`,
+ expected: repo_model.PullRequestsConfig{
+ AllowMerge: true,
+ AllowRebase: true,
+ AllowRebaseMerge: true,
+ AllowSquash: true,
+ DefaultMergeStyle: repo_model.MergeStyleMerge,
+ },
+ wantFixed: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &repo_model.PullRequestsConfig{}
+ gotFixed, err := fixPullRequestsConfig16961([]byte(tt.bs), cfg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("fixPullRequestsConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotFixed != tt.wantFixed {
+ t.Errorf("fixPullRequestsConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
+ }
+ assert.EqualValues(t, &tt.expected, cfg)
+ })
+ }
+}
+
+func Test_fixIssuesConfig_16961(t *testing.T) {
+ tests := []struct {
+ name string
+ bs string
+ expected repo_model.IssuesConfig
+ wantFixed bool
+ wantErr bool
+ }{
+ {
+ name: "normal",
+ bs: `{"EnableTimetracker":true,"AllowOnlyContributorsToTrackTime":true,"EnableDependencies":true}`,
+ expected: repo_model.IssuesConfig{
+ EnableTimetracker: true,
+ AllowOnlyContributorsToTrackTime: true,
+ EnableDependencies: true,
+ },
+ },
+ {
+ name: "broken",
+ bs: `&{%!s(bool=true) %!s(bool=true) %!s(bool=true)}`,
+ expected: repo_model.IssuesConfig{
+ EnableTimetracker: true,
+ AllowOnlyContributorsToTrackTime: true,
+ EnableDependencies: true,
+ },
+ wantFixed: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := &repo_model.IssuesConfig{}
+ gotFixed, err := fixIssuesConfig16961([]byte(tt.bs), cfg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("fixIssuesConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotFixed != tt.wantFixed {
+ t.Errorf("fixIssuesConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
+ }
+ assert.EqualValues(t, &tt.expected, cfg)
+ })
+ }
+}
diff --git a/services/doctor/fix8312.go b/services/doctor/fix8312.go
new file mode 100644
index 0000000..4fc0498
--- /dev/null
+++ b/services/doctor/fix8312.go
@@ -0,0 +1,61 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/db"
+ org_model "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+)
+
+func fixOwnerTeamCreateOrgRepo(ctx context.Context, logger log.Logger, autofix bool) error {
+ count := 0
+
+ err := db.Iterate(
+ ctx,
+ builder.Eq{"authorize": perm.AccessModeOwner, "can_create_org_repo": false},
+ func(ctx context.Context, team *org_model.Team) error {
+ team.CanCreateOrgRepo = true
+ count++
+
+ if !autofix {
+ return nil
+ }
+
+ return models.UpdateTeam(ctx, team, false, false)
+ },
+ )
+ if err != nil {
+ logger.Critical("Unable to iterate across repounits to fix incorrect can_create_org_repo: Error %v", err)
+ return err
+ }
+
+ if !autofix {
+ if count == 0 {
+ logger.Info("Found no team with incorrect can_create_org_repo")
+ } else {
+ logger.Warn("Found %d teams with incorrect can_create_org_repo", count)
+ }
+ return nil
+ }
+ logger.Info("Fixed %d teams with incorrect can_create_org_repo", count)
+
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check for incorrect can_create_org_repo for org owner teams",
+ Name: "fix-owner-team-create-org-repo",
+ IsDefault: false,
+ Run: fixOwnerTeamCreateOrgRepo,
+ Priority: 7,
+ })
+}
diff --git a/services/doctor/heads.go b/services/doctor/heads.go
new file mode 100644
index 0000000..41fca01
--- /dev/null
+++ b/services/doctor/heads.go
@@ -0,0 +1,88 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+)
+
+func synchronizeRepoHeads(ctx context.Context, logger log.Logger, autofix bool) error {
+ numRepos := 0
+ numHeadsBroken := 0
+ numDefaultBranchesBroken := 0
+ numReposUpdated := 0
+ err := iterateRepositories(ctx, func(repo *repo_model.Repository) error {
+ numRepos++
+ _, _, defaultBranchErr := git.NewCommand(ctx, "rev-parse").AddDashesAndList(repo.DefaultBranch).RunStdString(&git.RunOpts{Dir: repo.RepoPath()})
+
+ head, _, headErr := git.NewCommand(ctx, "symbolic-ref", "--short", "HEAD").RunStdString(&git.RunOpts{Dir: repo.RepoPath()})
+
+ // what we expect: default branch is valid, and HEAD points to it
+ if headErr == nil && defaultBranchErr == nil && head == repo.DefaultBranch {
+ return nil
+ }
+
+ if headErr != nil {
+ numHeadsBroken++
+ }
+ if defaultBranchErr != nil {
+ numDefaultBranchesBroken++
+ }
+
+ // if default branch is broken, let the user fix that in the UI
+ if defaultBranchErr != nil {
+ logger.Warn("Default branch for %s/%s doesn't point to a valid commit", repo.OwnerName, repo.Name)
+ return nil
+ }
+
+ // if we're not autofixing, that's all we can do
+ if !autofix {
+ return nil
+ }
+
+ // otherwise, let's try fixing HEAD
+ err := git.NewCommand(ctx, "symbolic-ref").AddDashesAndList("HEAD", git.BranchPrefix+repo.DefaultBranch).Run(&git.RunOpts{Dir: repo.RepoPath()})
+ if err != nil {
+ logger.Warn("Failed to fix HEAD for %s/%s: %v", repo.OwnerName, repo.Name, err)
+ return nil
+ }
+ numReposUpdated++
+ return nil
+ })
+ if err != nil {
+ logger.Critical("Error when fixing repo HEADs: %v", err)
+ }
+
+ if autofix {
+ logger.Info("Out of %d repos, HEADs for %d are now fixed and HEADS for %d are still broken", numRepos, numReposUpdated, numDefaultBranchesBroken+numHeadsBroken-numReposUpdated)
+ } else {
+ if numHeadsBroken == 0 && numDefaultBranchesBroken == 0 {
+ logger.Info("All %d repos have their HEADs in the correct state", numRepos)
+ } else {
+ if numHeadsBroken == 0 && numDefaultBranchesBroken != 0 {
+ logger.Critical("Default branches are broken for %d/%d repos", numDefaultBranchesBroken, numRepos)
+ } else if numHeadsBroken != 0 && numDefaultBranchesBroken == 0 {
+ logger.Warn("HEADs are broken for %d/%d repos", numHeadsBroken, numRepos)
+ } else {
+ logger.Critical("Out of %d repos, HEADS are broken for %d and default branches are broken for %d", numRepos, numHeadsBroken, numDefaultBranchesBroken)
+ }
+ }
+ }
+
+ return err
+}
+
+func init() {
+ Register(&Check{
+ Title: "Synchronize repo HEADs",
+ Name: "synchronize-repo-heads",
+ IsDefault: true,
+ Run: synchronizeRepoHeads,
+ Priority: 7,
+ })
+}
diff --git a/services/doctor/lfs.go b/services/doctor/lfs.go
new file mode 100644
index 0000000..8531b7b
--- /dev/null
+++ b/services/doctor/lfs.go
@@ -0,0 +1,52 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/services/repository"
+)
+
+func init() {
+ Register(&Check{
+ Title: "Garbage collect LFS",
+ Name: "gc-lfs",
+ IsDefault: false,
+ Run: garbageCollectLFSCheck,
+ AbortIfFailed: false,
+ SkipDatabaseInitialization: false,
+ Priority: 1,
+ })
+}
+
+func garbageCollectLFSCheck(ctx context.Context, logger log.Logger, autofix bool) error {
+ if !setting.LFS.StartServer {
+ return fmt.Errorf("LFS support is disabled")
+ }
+
+ if err := repository.GarbageCollectLFSMetaObjects(ctx, repository.GarbageCollectLFSMetaObjectsOptions{
+ LogDetail: logger.Info,
+ AutoFix: autofix,
+ // Only attempt to garbage collect lfs meta objects older than a week as the order of git lfs upload
+ // and git object upload is not necessarily guaranteed. It's possible to imagine a situation whereby
+ // an LFS object is uploaded but the git branch is not uploaded immediately, or there are some rapid
+ // changes in new branches that might lead to lfs objects becoming temporarily unassociated with git
+ // objects.
+ //
+ // It is likely that a week is potentially excessive but it should definitely be enough that any
+ // unassociated LFS object is genuinely unassociated.
+ OlderThan: time.Now().Add(-24 * time.Hour * 7),
+ // We don't set the UpdatedLessRecentlyThan because we want to do a full GC
+ }); err != nil {
+ logger.Error("Couldn't garabage collect LFS objects: %v", err)
+ return err
+ }
+
+ return checkStorage(&checkStorageOptions{LFS: true})(ctx, logger, autofix)
+}
diff --git a/services/doctor/mergebase.go b/services/doctor/mergebase.go
new file mode 100644
index 0000000..de460c4
--- /dev/null
+++ b/services/doctor/mergebase.go
@@ -0,0 +1,114 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+)
+
+func iteratePRs(ctx context.Context, repo *repo_model.Repository, each func(*repo_model.Repository, *issues_model.PullRequest) error) error {
+ return db.Iterate(
+ ctx,
+ builder.Eq{"base_repo_id": repo.ID},
+ func(ctx context.Context, bean *issues_model.PullRequest) error {
+ return each(repo, bean)
+ },
+ )
+}
+
+func checkPRMergeBase(ctx context.Context, logger log.Logger, autofix bool) error {
+ numRepos := 0
+ numPRs := 0
+ numPRsUpdated := 0
+ err := iterateRepositories(ctx, func(repo *repo_model.Repository) error {
+ numRepos++
+ return iteratePRs(ctx, repo, func(repo *repo_model.Repository, pr *issues_model.PullRequest) error {
+ numPRs++
+ pr.BaseRepo = repo
+ repoPath := repo.RepoPath()
+
+ oldMergeBase := pr.MergeBase
+
+ if !pr.HasMerged {
+ var err error
+ pr.MergeBase, _, err = git.NewCommand(ctx, "merge-base").AddDashesAndList(pr.BaseBranch, pr.GetGitRefName()).RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ var err2 error
+ pr.MergeBase, _, err2 = git.NewCommand(ctx, "rev-parse").AddDynamicArguments(git.BranchPrefix + pr.BaseBranch).RunStdString(&git.RunOpts{Dir: repoPath})
+ if err2 != nil {
+ logger.Warn("Unable to get merge base for PR ID %d, #%d onto %s in %s/%s. Error: %v & %v", pr.ID, pr.Index, pr.BaseBranch, pr.BaseRepo.OwnerName, pr.BaseRepo.Name, err, err2)
+ return nil
+ }
+ }
+ } else {
+ parentsString, _, err := git.NewCommand(ctx, "rev-list", "--parents", "-n", "1").AddDynamicArguments(pr.MergedCommitID).RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ logger.Warn("Unable to get parents for merged PR ID %d, #%d onto %s in %s/%s. Error: %v", pr.ID, pr.Index, pr.BaseBranch, pr.BaseRepo.OwnerName, pr.BaseRepo.Name, err)
+ return nil
+ }
+ parents := strings.Split(strings.TrimSpace(parentsString), " ")
+ if len(parents) < 2 {
+ return nil
+ }
+
+ refs := append([]string{}, parents[1:]...)
+ refs = append(refs, pr.GetGitRefName())
+ cmd := git.NewCommand(ctx, "merge-base").AddDashesAndList(refs...)
+ pr.MergeBase, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath})
+ if err != nil {
+ logger.Warn("Unable to get merge base for merged PR ID %d, #%d onto %s in %s/%s. Error: %v", pr.ID, pr.Index, pr.BaseBranch, pr.BaseRepo.OwnerName, pr.BaseRepo.Name, err)
+ return nil
+ }
+ }
+ pr.MergeBase = strings.TrimSpace(pr.MergeBase)
+ if pr.MergeBase != oldMergeBase {
+ if autofix {
+ if err := pr.UpdateCols(ctx, "merge_base"); err != nil {
+ logger.Critical("Failed to update merge_base. ERROR: %v", err)
+ return fmt.Errorf("Failed to update merge_base. ERROR: %w", err)
+ }
+ } else {
+ logger.Info("#%d onto %s in %s/%s: MergeBase should be %s but is %s", pr.Index, pr.BaseBranch, pr.BaseRepo.OwnerName, pr.BaseRepo.Name, oldMergeBase, pr.MergeBase)
+ }
+ numPRsUpdated++
+ }
+ return nil
+ })
+ })
+
+ if autofix {
+ logger.Info("%d PR mergebases updated of %d PRs total in %d repos", numPRsUpdated, numPRs, numRepos)
+ } else {
+ if numPRsUpdated == 0 {
+ logger.Info("All %d PRs in %d repos have a correct mergebase", numPRs, numRepos)
+ } else if err == nil {
+ logger.Critical("%d PRs with incorrect mergebases of %d PRs total in %d repos", numPRsUpdated, numPRs, numRepos)
+ return fmt.Errorf("%d PRs with incorrect mergebases of %d PRs total in %d repos", numPRsUpdated, numPRs, numRepos)
+ } else {
+ logger.Warn("%d PRs with incorrect mergebases of %d PRs total in %d repos", numPRsUpdated, numPRs, numRepos)
+ }
+ }
+
+ return err
+}
+
+func init() {
+ Register(&Check{
+ Title: "Recalculate merge bases",
+ Name: "recalculate-merge-bases",
+ IsDefault: false,
+ Run: checkPRMergeBase,
+ Priority: 7,
+ })
+}
diff --git a/services/doctor/misc.go b/services/doctor/misc.go
new file mode 100644
index 0000000..9300c3a
--- /dev/null
+++ b/services/doctor/misc.go
@@ -0,0 +1,299 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/repository"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/util"
+
+ lru "github.com/hashicorp/golang-lru/v2"
+ "xorm.io/builder"
+)
+
+func iterateRepositories(ctx context.Context, each func(*repo_model.Repository) error) error {
+ err := db.Iterate(
+ ctx,
+ builder.Gt{"id": 0},
+ func(ctx context.Context, bean *repo_model.Repository) error {
+ return each(bean)
+ },
+ )
+ return err
+}
+
+func checkScriptType(ctx context.Context, logger log.Logger, autofix bool) error {
+ path, err := exec.LookPath(setting.ScriptType)
+ if err != nil {
+ logger.Critical("ScriptType \"%q\" is not on the current PATH. Error: %v", setting.ScriptType, err)
+ return fmt.Errorf("ScriptType \"%q\" is not on the current PATH. Error: %w", setting.ScriptType, err)
+ }
+ logger.Info("ScriptType %s is on the current PATH at %s", setting.ScriptType, path)
+ return nil
+}
+
+func checkHooks(ctx context.Context, logger log.Logger, autofix bool) error {
+ if err := iterateRepositories(ctx, func(repo *repo_model.Repository) error {
+ results, err := repository.CheckDelegateHooks(repo.RepoPath())
+ if err != nil {
+ logger.Critical("Unable to check delegate hooks for repo %-v. ERROR: %v", repo, err)
+ return fmt.Errorf("Unable to check delegate hooks for repo %-v. ERROR: %w", repo, err)
+ }
+ if len(results) > 0 && autofix {
+ logger.Warn("Regenerated hooks for %s", repo.FullName())
+ if err := repository.CreateDelegateHooks(repo.RepoPath()); err != nil {
+ logger.Critical("Unable to recreate delegate hooks for %-v. ERROR: %v", repo, err)
+ return fmt.Errorf("Unable to recreate delegate hooks for %-v. ERROR: %w", repo, err)
+ }
+ }
+ for _, result := range results {
+ logger.Warn(result)
+ }
+ return nil
+ }); err != nil {
+ logger.Critical("Errors noted whilst checking delegate hooks.")
+ return err
+ }
+ return nil
+}
+
+func checkUserStarNum(ctx context.Context, logger log.Logger, autofix bool) error {
+ if autofix {
+ if err := models.DoctorUserStarNum(ctx); err != nil {
+ logger.Critical("Unable update User Stars numbers")
+ return err
+ }
+ logger.Info("Updated User Stars numbers.")
+ } else {
+ logger.Info("No check available for User Stars numbers (skipped)")
+ }
+ return nil
+}
+
+func checkEnablePushOptions(ctx context.Context, logger log.Logger, autofix bool) error {
+ numRepos := 0
+ numNeedUpdate := 0
+
+ if err := iterateRepositories(ctx, func(repo *repo_model.Repository) error {
+ numRepos++
+ r, err := gitrepo.OpenRepository(ctx, repo)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ if autofix {
+ _, _, err := git.NewCommand(ctx, "config", "receive.advertisePushOptions", "true").RunStdString(&git.RunOpts{Dir: r.Path})
+ return err
+ }
+
+ value, _, err := git.NewCommand(ctx, "config", "receive.advertisePushOptions").RunStdString(&git.RunOpts{Dir: r.Path})
+ if err != nil {
+ return err
+ }
+
+ result, valid := git.ParseBool(strings.TrimSpace(value))
+ if !result || !valid {
+ numNeedUpdate++
+ logger.Info("%s: does not have receive.advertisePushOptions set correctly: %q", repo.FullName(), value)
+ }
+ return nil
+ }); err != nil {
+ logger.Critical("Unable to EnablePushOptions: %v", err)
+ return err
+ }
+
+ if autofix {
+ logger.Info("Enabled push options for %d repositories.", numRepos)
+ } else {
+ logger.Info("Checked %d repositories, %d need updates.", numRepos, numNeedUpdate)
+ }
+
+ return nil
+}
+
+func checkDaemonExport(ctx context.Context, logger log.Logger, autofix bool) error {
+ numRepos := 0
+ numNeedUpdate := 0
+ cache, err := lru.New[int64, any](512)
+ if err != nil {
+ logger.Critical("Unable to create cache: %v", err)
+ return err
+ }
+ if err := iterateRepositories(ctx, func(repo *repo_model.Repository) error {
+ numRepos++
+
+ if owner, has := cache.Get(repo.OwnerID); has {
+ repo.Owner = owner.(*user_model.User)
+ } else {
+ if err := repo.LoadOwner(ctx); err != nil {
+ return err
+ }
+ cache.Add(repo.OwnerID, repo.Owner)
+ }
+
+ // Create/Remove git-daemon-export-ok for git-daemon...
+ daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
+ isExist, err := util.IsExist(daemonExportFile)
+ if err != nil {
+ log.Error("Unable to check if %s exists. Error: %v", daemonExportFile, err)
+ return err
+ }
+ isPublic := !repo.IsPrivate && repo.Owner.Visibility == structs.VisibleTypePublic
+
+ if isPublic != isExist {
+ numNeedUpdate++
+ if autofix {
+ if !isPublic && isExist {
+ if err = util.Remove(daemonExportFile); err != nil {
+ log.Error("Failed to remove %s: %v", daemonExportFile, err)
+ }
+ } else if isPublic && !isExist {
+ if f, err := os.Create(daemonExportFile); err != nil {
+ log.Error("Failed to create %s: %v", daemonExportFile, err)
+ } else {
+ f.Close()
+ }
+ }
+ }
+ }
+ return nil
+ }); err != nil {
+ logger.Critical("Unable to checkDaemonExport: %v", err)
+ return err
+ }
+
+ if autofix {
+ logger.Info("Updated git-daemon-export-ok files for %d of %d repositories.", numNeedUpdate, numRepos)
+ } else {
+ logger.Info("Checked %d repositories, %d need updates.", numRepos, numNeedUpdate)
+ }
+
+ return nil
+}
+
+func checkCommitGraph(ctx context.Context, logger log.Logger, autofix bool) error {
+ numRepos := 0
+ numNeedUpdate := 0
+ numWritten := 0
+ if err := iterateRepositories(ctx, func(repo *repo_model.Repository) error {
+ numRepos++
+
+ commitGraphExists := func() (bool, error) {
+ // Check commit-graph exists
+ commitGraphFile := path.Join(repo.RepoPath(), `objects/info/commit-graph`)
+ isExist, err := util.IsExist(commitGraphFile)
+ if err != nil {
+ logger.Error("Unable to check if %s exists. Error: %v", commitGraphFile, err)
+ return false, err
+ }
+
+ if !isExist {
+ commitGraphsDir := path.Join(repo.RepoPath(), `objects/info/commit-graphs`)
+ isExist, err = util.IsExist(commitGraphsDir)
+ if err != nil {
+ logger.Error("Unable to check if %s exists. Error: %v", commitGraphsDir, err)
+ return false, err
+ }
+ }
+ return isExist, nil
+ }
+
+ isExist, err := commitGraphExists()
+ if err != nil {
+ return err
+ }
+ if !isExist {
+ numNeedUpdate++
+ if autofix {
+ if err := git.WriteCommitGraph(ctx, repo.RepoPath()); err != nil {
+ logger.Error("Unable to write commit-graph in %s. Error: %v", repo.FullName(), err)
+ return err
+ }
+ isExist, err := commitGraphExists()
+ if err != nil {
+ return err
+ }
+ if isExist {
+ numWritten++
+ logger.Info("Commit-graph written: %s", repo.FullName())
+ } else {
+ logger.Warn("No commit-graph written: %s", repo.FullName())
+ }
+ }
+ }
+ return nil
+ }); err != nil {
+ logger.Critical("Unable to checkCommitGraph: %v", err)
+ return err
+ }
+
+ if autofix {
+ logger.Info("Wrote commit-graph files for %d of %d repositories.", numWritten, numRepos)
+ } else {
+ logger.Info("Checked %d repositories, %d without commit-graphs.", numRepos, numNeedUpdate)
+ }
+
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check if SCRIPT_TYPE is available",
+ Name: "script-type",
+ IsDefault: false,
+ Run: checkScriptType,
+ Priority: 5,
+ })
+ Register(&Check{
+ Title: "Check if hook files are up-to-date and executable",
+ Name: "hooks",
+ IsDefault: false,
+ Run: checkHooks,
+ Priority: 6,
+ })
+ Register(&Check{
+ Title: "Recalculate Stars number for all user",
+ Name: "recalculate-stars-number",
+ IsDefault: false,
+ Run: checkUserStarNum,
+ Priority: 6,
+ })
+ Register(&Check{
+ Title: "Check that all git repositories have receive.advertisePushOptions set to true",
+ Name: "enable-push-options",
+ IsDefault: false,
+ Run: checkEnablePushOptions,
+ Priority: 7,
+ })
+ Register(&Check{
+ Title: "Check git-daemon-export-ok files",
+ Name: "check-git-daemon-export-ok",
+ IsDefault: false,
+ Run: checkDaemonExport,
+ Priority: 8,
+ })
+ Register(&Check{
+ Title: "Check commit-graphs",
+ Name: "check-commit-graphs",
+ IsDefault: false,
+ Run: checkCommitGraph,
+ Priority: 9,
+ })
+}
diff --git a/services/doctor/packages_nuget.go b/services/doctor/packages_nuget.go
new file mode 100644
index 0000000..47fdb3a
--- /dev/null
+++ b/services/doctor/packages_nuget.go
@@ -0,0 +1,160 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ nuget_module "code.gitea.io/gitea/modules/packages/nuget"
+ packages_service "code.gitea.io/gitea/services/packages"
+
+ "xorm.io/builder"
+)
+
+func init() {
+ Register(&Check{
+ Title: "Extract Nuget Nuspec Files to content store",
+ Name: "packages-nuget-nuspec",
+ IsDefault: false,
+ Run: PackagesNugetNuspecCheck,
+ Priority: 15,
+ InitStorage: true,
+ })
+}
+
+func PackagesNugetNuspecCheck(ctx context.Context, logger log.Logger, autofix bool) error {
+ found := 0
+ fixed := 0
+ errors := 0
+
+ err := db.Iterate(ctx, builder.Eq{"package.type": packages.TypeNuGet, "package.is_internal": false}, func(ctx context.Context, pkg *packages.Package) error {
+ logger.Info("Processing package %s", pkg.Name)
+
+ pvs, _, err := packages.SearchVersions(ctx, &packages.PackageSearchOptions{
+ Type: packages.TypeNuGet,
+ PackageID: pkg.ID,
+ })
+ if err != nil {
+ // Should never happen
+ logger.Error("Failed to search for versions for package %s: %v", pkg.Name, err)
+ return err
+ }
+
+ logger.Info("Found %d versions for package %s", len(pvs), pkg.Name)
+
+ for _, pv := range pvs {
+ pfs, err := packages.GetFilesByVersionID(ctx, pv.ID)
+ if err != nil {
+ logger.Error("Failed to get files for package version %s %s: %v", pkg.Name, pv.Version, err)
+ errors++
+ continue
+ }
+
+ if slices.ContainsFunc(pfs, func(pf *packages.PackageFile) bool { return strings.HasSuffix(pf.LowerName, ".nuspec") }) {
+ logger.Debug("Nuspec file already exists for %s %s", pkg.Name, pv.Version)
+ continue
+ }
+
+ nupkgIdx := slices.IndexFunc(pfs, func(pf *packages.PackageFile) bool { return pf.IsLead })
+
+ if nupkgIdx < 0 {
+ logger.Error("Missing nupkg file for %s %s", pkg.Name, pv.Version)
+ errors++
+ continue
+ }
+
+ pf := pfs[nupkgIdx]
+
+ logger.Warn("Missing nuspec file found for %s %s", pkg.Name, pv.Version)
+ found++
+
+ if !autofix {
+ continue
+ }
+
+ s, _, _, err := packages_service.GetPackageFileStream(ctx, pf)
+ if err != nil {
+ logger.Error("Failed to get nupkg file stream for %s %s: %v", pkg.Name, pv.Version, err)
+ errors++
+ continue
+ }
+ defer s.Close()
+
+ buf, err := packages_module.CreateHashedBufferFromReader(s)
+ if err != nil {
+ logger.Error("Failed to create hashed buffer for nupkg from reader for %s %s: %v", pkg.Name, pv.Version, err)
+ errors++
+ continue
+ }
+ defer buf.Close()
+
+ np, err := nuget_module.ParsePackageMetaData(buf, buf.Size())
+ if err != nil {
+ logger.Error("Failed to parse package metadata for %s %s: %v", pkg.Name, pv.Version, err)
+ errors++
+ continue
+ }
+
+ nuspecBuf, err := packages_module.CreateHashedBufferFromReaderWithSize(np.NuspecContent, np.NuspecContent.Len())
+ if err != nil {
+ logger.Error("Failed to create hashed buffer for nuspec from reader for %s %s: %v", pkg.Name, pv.Version, err)
+ errors++
+ continue
+ }
+ defer nuspecBuf.Close()
+
+ _, err = packages_service.AddFileToPackageVersionInternal(
+ ctx,
+ pv,
+ &packages_service.PackageFileCreationInfo{
+ PackageFileInfo: packages_service.PackageFileInfo{
+ Filename: fmt.Sprintf("%s.nuspec", pkg.LowerName),
+ },
+ Data: nuspecBuf,
+ IsLead: false,
+ },
+ )
+ if err != nil {
+ logger.Error("Failed to add nuspec file for %s %s: %v", pkg.Name, pv.Version, err)
+ errors++
+ continue
+ }
+
+ fixed++
+ }
+
+ return nil
+ })
+ if err != nil {
+ logger.Error("Failed to iterate over users: %v", err)
+ return err
+ }
+
+ if autofix {
+ if fixed > 0 {
+ logger.Info("Fixed %d package versions by extracting nuspec files", fixed)
+ } else {
+ logger.Info("No package versions with missing nuspec files found")
+ }
+ } else {
+ if found > 0 {
+ logger.Info("Found %d package versions with missing nuspec files", found)
+ } else {
+ logger.Info("No package versions with missing nuspec files found")
+ }
+ }
+
+ if errors > 0 {
+ return fmt.Errorf("failed to fix %d nuspec files", errors)
+ }
+
+ return nil
+}
diff --git a/services/doctor/paths.go b/services/doctor/paths.go
new file mode 100644
index 0000000..8e37f01
--- /dev/null
+++ b/services/doctor/paths.go
@@ -0,0 +1,124 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+type configurationFile struct {
+ Name string
+ Path string
+ IsDirectory bool
+ Required bool
+ Writable bool
+}
+
+func checkConfigurationFile(logger log.Logger, autofix bool, fileOpts configurationFile) error {
+ logger.Info(`%-26s %q`, log.NewColoredValue(fileOpts.Name+":", log.Reset), fileOpts.Path)
+ fi, err := os.Stat(fileOpts.Path)
+ if err != nil {
+ if os.IsNotExist(err) && autofix && fileOpts.IsDirectory {
+ if err := os.MkdirAll(fileOpts.Path, 0o777); err != nil {
+ logger.Error(" Directory does not exist and could not be created. ERROR: %v", err)
+ return fmt.Errorf("Configuration directory: \"%q\" does not exist and could not be created. ERROR: %w", fileOpts.Path, err)
+ }
+ fi, err = os.Stat(fileOpts.Path)
+ }
+ }
+ if err != nil {
+ if fileOpts.Required {
+ logger.Error(" Is REQUIRED but is not accessible. ERROR: %v", err)
+ return fmt.Errorf("Configuration file \"%q\" is not accessible but is required. Error: %w", fileOpts.Path, err)
+ }
+ logger.Warn(" NOTICE: is not accessible (Error: %v)", err)
+ // this is a non-critical error
+ return nil
+ }
+
+ if fileOpts.IsDirectory && !fi.IsDir() {
+ logger.Error(" ERROR: not a directory")
+ return fmt.Errorf("Configuration directory \"%q\" is not a directory. Error: %w", fileOpts.Path, err)
+ } else if !fileOpts.IsDirectory && !fi.Mode().IsRegular() {
+ logger.Error(" ERROR: not a regular file")
+ return fmt.Errorf("Configuration file \"%q\" is not a regular file. Error: %w", fileOpts.Path, err)
+ } else if fileOpts.Writable {
+ if err := isWritableDir(fileOpts.Path); err != nil {
+ logger.Error(" ERROR: is required to be writable but is not writable: %v", err)
+ return fmt.Errorf("Configuration file \"%q\" is required to be writable but is not. Error: %w", fileOpts.Path, err)
+ }
+ }
+ return nil
+}
+
+func checkConfigurationFiles(ctx context.Context, logger log.Logger, autofix bool) error {
+ if fi, err := os.Stat(setting.CustomConf); err != nil || !fi.Mode().IsRegular() {
+ logger.Error("Failed to find configuration file at '%s'.", setting.CustomConf)
+ logger.Error("If you've never ran Forgejo yet, this is normal and '%s' will be created for you on first run.", setting.CustomConf)
+ logger.Error("Otherwise check that you are running this command from the correct path and/or provide a `--config` parameter.")
+ logger.Critical("Cannot proceed without a configuration file")
+ return err
+ }
+
+ setting.MustInstalled()
+
+ configurationFiles := []configurationFile{
+ {"Configuration File Path", setting.CustomConf, false, true, false},
+ {"Repository Root Path", setting.RepoRootPath, true, true, true},
+ {"Data Root Path", setting.AppDataPath, true, true, true},
+ {"Custom File Root Path", setting.CustomPath, true, false, false},
+ {"Work directory", setting.AppWorkPath, true, true, false},
+ {"Log Root Path", setting.Log.RootPath, true, true, true},
+ }
+
+ if !setting.HasBuiltinBindata {
+ configurationFiles = append(configurationFiles, configurationFile{"Static File Root Path", setting.StaticRootPath, true, true, false})
+ }
+
+ numberOfErrors := 0
+ for _, configurationFile := range configurationFiles {
+ if err := checkConfigurationFile(logger, autofix, configurationFile); err != nil {
+ numberOfErrors++
+ }
+ }
+
+ if numberOfErrors > 0 {
+ logger.Critical("Please check your configuration files and try again.")
+ return fmt.Errorf("%d configuration files with errors", numberOfErrors)
+ }
+
+ return nil
+}
+
+func isWritableDir(path string) error {
+ // There's no platform-independent way of checking if a directory is writable
+ // https://stackoverflow.com/questions/20026320/how-to-tell-if-folder-exists-and-is-writable
+
+ tmpFile, err := os.CreateTemp(path, "doctors-order")
+ if err != nil {
+ return err
+ }
+ if err := os.Remove(tmpFile.Name()); err != nil {
+ fmt.Printf("Warning: can't remove temporary file: '%s'\n", tmpFile.Name()) //nolint:forbidigo
+ }
+ tmpFile.Close()
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check paths and basic configuration",
+ Name: "paths",
+ IsDefault: true,
+ Run: checkConfigurationFiles,
+ AbortIfFailed: true,
+ SkipDatabaseInitialization: true,
+ Priority: 1,
+ })
+}
diff --git a/services/doctor/push_mirror_consistency.go b/services/doctor/push_mirror_consistency.go
new file mode 100644
index 0000000..68b96d6
--- /dev/null
+++ b/services/doctor/push_mirror_consistency.go
@@ -0,0 +1,91 @@
+// Copyright 2023 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/modules/log"
+
+ "xorm.io/builder"
+)
+
+func FixPushMirrorsWithoutGitRemote(ctx context.Context, logger log.Logger, autofix bool) error {
+ var missingMirrors []*repo_model.PushMirror
+
+ err := db.Iterate(ctx, builder.Gt{"id": 0}, func(ctx context.Context, repo *repo_model.Repository) error {
+ pushMirrors, _, err := repo_model.GetPushMirrorsByRepoID(ctx, repo.ID, db.ListOptions{})
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < len(pushMirrors); i++ {
+ _, err = repo_model.GetPushMirrorRemoteAddress(repo.OwnerName, repo.Name, pushMirrors[i].RemoteName)
+ if err != nil {
+ if strings.Contains(err.Error(), "No such remote") {
+ missingMirrors = append(missingMirrors, pushMirrors[i])
+ } else if logger != nil {
+ logger.Warn("Unable to retrieve the remote address of a mirror: %s", err)
+ }
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ if logger != nil {
+ logger.Critical("Unable to iterate across repounits to fix push mirrors without a git remote: Error %v", err)
+ }
+ return err
+ }
+
+ count := len(missingMirrors)
+ if !autofix {
+ if logger != nil {
+ if count == 0 {
+ logger.Info("Found no push mirrors with missing git remotes")
+ } else {
+ logger.Warn("Found %d push mirrors with missing git remotes", count)
+ }
+ }
+ return nil
+ }
+
+ for i := 0; i < len(missingMirrors); i++ {
+ if logger != nil {
+ logger.Info("Removing push mirror #%d (remote: %s), for repo: %s/%s",
+ missingMirrors[i].ID,
+ missingMirrors[i].RemoteName,
+ missingMirrors[i].GetRepository(ctx).OwnerName,
+ missingMirrors[i].GetRepository(ctx).Name)
+ }
+
+ err = repo_model.DeletePushMirrors(ctx, repo_model.PushMirrorOptions{
+ ID: missingMirrors[i].ID,
+ RepoID: missingMirrors[i].RepoID,
+ RemoteName: missingMirrors[i].RemoteName,
+ })
+ if err != nil {
+ if logger != nil {
+ logger.Critical("Error removing a push mirror (repo_id: %d, push_mirror: %d): %s", missingMirrors[i].Repo.ID, missingMirrors[i].ID, err)
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check for push mirrors without a git remote configured",
+ Name: "fix-push-mirrors-without-git-remote",
+ IsDefault: false,
+ Run: FixPushMirrorsWithoutGitRemote,
+ Priority: 7,
+ })
+}
diff --git a/services/doctor/repository.go b/services/doctor/repository.go
new file mode 100644
index 0000000..6c33426
--- /dev/null
+++ b/services/doctor/repository.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/storage"
+ repo_service "code.gitea.io/gitea/services/repository"
+
+ "xorm.io/builder"
+)
+
+func handleDeleteOrphanedRepos(ctx context.Context, logger log.Logger, autofix bool) error {
+ test := &consistencyCheck{
+ Name: "Repos with no existing owner",
+ Counter: countOrphanedRepos,
+ Fixer: deleteOrphanedRepos,
+ FixedMessage: "Deleted all content related to orphaned repos",
+ }
+ return test.Run(ctx, logger, autofix)
+}
+
+// countOrphanedRepos count repository where user of owner_id do not exist
+func countOrphanedRepos(ctx context.Context) (int64, error) {
+ return db.CountOrphanedObjects(ctx, "repository", "user", "repository.owner_id=`user`.id")
+}
+
+// deleteOrphanedRepos delete repository where user of owner_id do not exist
+func deleteOrphanedRepos(ctx context.Context) (int64, error) {
+ if err := storage.Init(); err != nil {
+ return 0, err
+ }
+
+ batchSize := db.MaxBatchInsertSize("repository")
+ e := db.GetEngine(ctx)
+ var deleted int64
+ adminUser := &user_model.User{IsAdmin: true}
+
+ for {
+ select {
+ case <-ctx.Done():
+ return deleted, ctx.Err()
+ default:
+ var ids []int64
+ if err := e.Table("`repository`").
+ Join("LEFT", "`user`", "repository.owner_id=`user`.id").
+ Where(builder.IsNull{"`user`.id"}).
+ Select("`repository`.id").Limit(batchSize).Find(&ids); err != nil {
+ return deleted, err
+ }
+
+ // if we don't get ids we have deleted them all
+ if len(ids) == 0 {
+ return deleted, nil
+ }
+
+ for _, id := range ids {
+ if err := repo_service.DeleteRepositoryDirectly(ctx, adminUser, id, true); err != nil {
+ return deleted, err
+ }
+ deleted++
+ }
+ }
+ }
+}
+
+func init() {
+ Register(&Check{
+ Title: "Deleted all content related to orphaned repos",
+ Name: "delete-orphaned-repos",
+ IsDefault: false,
+ Run: handleDeleteOrphanedRepos,
+ Priority: 4,
+ })
+}
diff --git a/services/doctor/storage.go b/services/doctor/storage.go
new file mode 100644
index 0000000..3f3b562
--- /dev/null
+++ b/services/doctor/storage.go
@@ -0,0 +1,270 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+ "errors"
+ "io/fs"
+ "strings"
+
+ "code.gitea.io/gitea/models/git"
+ "code.gitea.io/gitea/models/packages"
+ "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/base"
+ "code.gitea.io/gitea/modules/log"
+ packages_module "code.gitea.io/gitea/modules/packages"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/storage"
+ "code.gitea.io/gitea/modules/util"
+)
+
+type commonStorageCheckOptions struct {
+ storer storage.ObjectStorage
+ isOrphaned func(path string, obj storage.Object, stat fs.FileInfo) (bool, error)
+ name string
+}
+
+func commonCheckStorage(logger log.Logger, autofix bool, opts *commonStorageCheckOptions) error {
+ totalCount, orphanedCount := 0, 0
+ totalSize, orphanedSize := int64(0), int64(0)
+
+ var pathsToDelete []string
+ if err := opts.storer.IterateObjects("", func(p string, obj storage.Object) error {
+ defer obj.Close()
+
+ totalCount++
+ stat, err := obj.Stat()
+ if err != nil {
+ return err
+ }
+ totalSize += stat.Size()
+
+ orphaned, err := opts.isOrphaned(p, obj, stat)
+ if err != nil {
+ return err
+ }
+ if orphaned {
+ orphanedCount++
+ orphanedSize += stat.Size()
+ if autofix {
+ pathsToDelete = append(pathsToDelete, p)
+ }
+ }
+ return nil
+ }); err != nil {
+ logger.Error("Error whilst iterating %s storage: %v", opts.name, err)
+ return err
+ }
+
+ if orphanedCount > 0 {
+ if autofix {
+ var deletedNum int
+ for _, p := range pathsToDelete {
+ if err := opts.storer.Delete(p); err != nil {
+ log.Error("Error whilst deleting %s from %s storage: %v", p, opts.name, err)
+ } else {
+ deletedNum++
+ }
+ }
+ logger.Info("Deleted %d/%d orphaned %s(s)", deletedNum, orphanedCount, opts.name)
+ } else {
+ logger.Warn("Found %d/%d (%s/%s) orphaned %s(s)", orphanedCount, totalCount, base.FileSize(orphanedSize), base.FileSize(totalSize), opts.name)
+ }
+ } else {
+ logger.Info("Found %d (%s) %s(s)", totalCount, base.FileSize(totalSize), opts.name)
+ }
+ return nil
+}
+
+type checkStorageOptions struct {
+ All bool
+ Attachments bool
+ LFS bool
+ Avatars bool
+ RepoAvatars bool
+ RepoArchives bool
+ Packages bool
+}
+
+// checkStorage will return a doctor check function to check the requested storage types for "orphaned" stored object/files and optionally delete them
+func checkStorage(opts *checkStorageOptions) func(ctx context.Context, logger log.Logger, autofix bool) error {
+ return func(ctx context.Context, logger log.Logger, autofix bool) error {
+ if err := storage.Init(); err != nil {
+ logger.Error("storage.Init failed: %v", err)
+ return err
+ }
+
+ if opts.Attachments || opts.All {
+ if err := commonCheckStorage(logger, autofix,
+ &commonStorageCheckOptions{
+ storer: storage.Attachments,
+ isOrphaned: func(path string, obj storage.Object, stat fs.FileInfo) (bool, error) {
+ exists, err := repo.ExistAttachmentsByUUID(ctx, stat.Name())
+ return !exists, err
+ },
+ name: "attachment",
+ }); err != nil {
+ return err
+ }
+ }
+
+ if opts.LFS || opts.All {
+ if !setting.LFS.StartServer {
+ logger.Info("LFS isn't enabled (skipped)")
+ return nil
+ }
+ if err := commonCheckStorage(logger, autofix,
+ &commonStorageCheckOptions{
+ storer: storage.LFS,
+ isOrphaned: func(path string, obj storage.Object, stat fs.FileInfo) (bool, error) {
+ // The oid of an LFS stored object is the name but with all the path.Separators removed
+ oid := strings.ReplaceAll(path, "/", "")
+ exists, err := git.ExistsLFSObject(ctx, oid)
+ return !exists, err
+ },
+ name: "LFS file",
+ }); err != nil {
+ return err
+ }
+ }
+
+ if opts.Avatars || opts.All {
+ if err := commonCheckStorage(logger, autofix,
+ &commonStorageCheckOptions{
+ storer: storage.Avatars,
+ isOrphaned: func(path string, obj storage.Object, stat fs.FileInfo) (bool, error) {
+ exists, err := user.ExistsWithAvatarAtStoragePath(ctx, path)
+ return !exists, err
+ },
+ name: "avatar",
+ }); err != nil {
+ return err
+ }
+ }
+
+ if opts.RepoAvatars || opts.All {
+ if err := commonCheckStorage(logger, autofix,
+ &commonStorageCheckOptions{
+ storer: storage.RepoAvatars,
+ isOrphaned: func(path string, obj storage.Object, stat fs.FileInfo) (bool, error) {
+ exists, err := repo.ExistsWithAvatarAtStoragePath(ctx, path)
+ return !exists, err
+ },
+ name: "repo avatar",
+ }); err != nil {
+ return err
+ }
+ }
+
+ if opts.RepoArchives || opts.All {
+ if err := commonCheckStorage(logger, autofix,
+ &commonStorageCheckOptions{
+ storer: storage.RepoArchives,
+ isOrphaned: func(path string, obj storage.Object, stat fs.FileInfo) (bool, error) {
+ exists, err := repo.ExistsRepoArchiverWithStoragePath(ctx, path)
+ if err == nil || errors.Is(err, util.ErrInvalidArgument) {
+ // invalid arguments mean that the object is not a valid repo archiver and it should be removed
+ return !exists, nil
+ }
+ return !exists, err
+ },
+ name: "repo archive",
+ }); err != nil {
+ return err
+ }
+ }
+
+ if opts.Packages || opts.All {
+ if !setting.Packages.Enabled {
+ logger.Info("Packages isn't enabled (skipped)")
+ return nil
+ }
+ if err := commonCheckStorage(logger, autofix,
+ &commonStorageCheckOptions{
+ storer: storage.Packages,
+ isOrphaned: func(path string, obj storage.Object, stat fs.FileInfo) (bool, error) {
+ key, err := packages_module.RelativePathToKey(path)
+ if err != nil {
+ // If there is an error here then the relative path does not match a valid package
+ // Therefore it is orphaned by default
+ return true, nil
+ }
+
+ exists, err := packages.ExistPackageBlobWithSHA(ctx, string(key))
+
+ return !exists, err
+ },
+ name: "package blob",
+ }); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check if there are orphaned storage files",
+ Name: "storages",
+ IsDefault: false,
+ Run: checkStorage(&checkStorageOptions{All: true}),
+ AbortIfFailed: false,
+ SkipDatabaseInitialization: false,
+ Priority: 1,
+ })
+
+ Register(&Check{
+ Title: "Check if there are orphaned attachments in storage",
+ Name: "storage-attachments",
+ IsDefault: false,
+ Run: checkStorage(&checkStorageOptions{Attachments: true}),
+ AbortIfFailed: false,
+ SkipDatabaseInitialization: false,
+ Priority: 1,
+ })
+
+ Register(&Check{
+ Title: "Check if there are orphaned lfs files in storage",
+ Name: "storage-lfs",
+ IsDefault: false,
+ Run: checkStorage(&checkStorageOptions{LFS: true}),
+ AbortIfFailed: false,
+ SkipDatabaseInitialization: false,
+ Priority: 1,
+ })
+
+ Register(&Check{
+ Title: "Check if there are orphaned avatars in storage",
+ Name: "storage-avatars",
+ IsDefault: false,
+ Run: checkStorage(&checkStorageOptions{Avatars: true, RepoAvatars: true}),
+ AbortIfFailed: false,
+ SkipDatabaseInitialization: false,
+ Priority: 1,
+ })
+
+ Register(&Check{
+ Title: "Check if there are orphaned archives in storage",
+ Name: "storage-archives",
+ IsDefault: false,
+ Run: checkStorage(&checkStorageOptions{RepoArchives: true}),
+ AbortIfFailed: false,
+ SkipDatabaseInitialization: false,
+ Priority: 1,
+ })
+
+ Register(&Check{
+ Title: "Check if there are orphaned package blobs in storage",
+ Name: "storage-packages",
+ IsDefault: false,
+ Run: checkStorage(&checkStorageOptions{Packages: true}),
+ AbortIfFailed: false,
+ SkipDatabaseInitialization: false,
+ Priority: 1,
+ })
+}
diff --git a/services/doctor/usertype.go b/services/doctor/usertype.go
new file mode 100644
index 0000000..ab32b78
--- /dev/null
+++ b/services/doctor/usertype.go
@@ -0,0 +1,41 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package doctor
+
+import (
+ "context"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+)
+
+func checkUserType(ctx context.Context, logger log.Logger, autofix bool) error {
+ count, err := user_model.CountWrongUserType(ctx)
+ if err != nil {
+ logger.Critical("Error: %v whilst counting wrong user types")
+ return err
+ }
+ if count > 0 {
+ if autofix {
+ if count, err = user_model.FixWrongUserType(ctx); err != nil {
+ logger.Critical("Error: %v whilst fixing wrong user types")
+ return err
+ }
+ logger.Info("%d users with wrong type fixed", count)
+ } else {
+ logger.Warn("%d users with wrong type exist", count)
+ }
+ }
+ return nil
+}
+
+func init() {
+ Register(&Check{
+ Title: "Check if user with wrong type exist",
+ Name: "check-user-type",
+ IsDefault: true,
+ Run: checkUserType,
+ Priority: 3,
+ })
+}